summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acconfig.h4
-rw-r--r--include/acpi/acexcep.h12
-rw-r--r--include/acpi/acpi_bus.h3
-rw-r--r--include/acpi/acpixf.h6
-rw-r--r--include/acpi/actbl1.h159
-rw-r--r--include/acpi/actbl2.h15
-rw-r--r--include/acpi/actypes.h4
-rw-r--r--include/asm-generic/dma-mapping.h10
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/crypto/aead.h10
-rw-r--r--include/crypto/chacha20.h3
-rw-r--r--include/crypto/hash.h46
-rw-r--r--include/crypto/if_alg.h2
-rw-r--r--include/crypto/internal/hash.h2
-rw-r--r--include/crypto/internal/scompress.h11
-rw-r--r--include/crypto/null.h10
-rw-r--r--include/crypto/poly1305.h2
-rw-r--r--include/crypto/salsa20.h27
-rw-r--r--include/crypto/sha3.h6
-rw-r--r--include/crypto/skcipher.h11
-rw-r--r--include/drm/drm_file.h2
-rw-r--r--include/dt-bindings/gpio/gpio.h6
-rw-r--r--include/linux/acpi.h18
-rw-r--r--include/linux/arch_topology.h2
-rw-r--r--include/linux/arm_sdei.h79
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/backing-dev.h2
-rw-r--r--include/linux/bio.h24
-rw-r--r--include/linux/bitfield.h46
-rw-r--r--include/linux/blk-cgroup.h8
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blk_types.h28
-rw-r--r--include/linux/blkdev.h172
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/bvec.h9
-rw-r--r--include/linux/cgroup-defs.h2
-rw-r--r--include/linux/compat.h100
-rw-r--r--include/linux/cper.h48
-rw-r--r--include/linux/cpu_cooling.h75
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpuidle.h40
-rw-r--r--include/linux/crc-ccitt.h7
-rw-r--r--include/linux/crypto.h10
-rw-r--r--include/linux/device-mapper.h56
-rw-r--r--include/linux/device.h3
-rw-r--r--include/linux/dma-buf.h2
-rw-r--r--include/linux/dma-direct.h47
-rw-r--r--include/linux/dma-mapping.h23
-rw-r--r--include/linux/efi.h46
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/eventfd.h14
-rw-r--r--include/linux/f2fs_fs.h14
-rw-r--r--include/linux/fs.h34
-rw-r--r--include/linux/ftrace.h12
-rw-r--r--include/linux/fwnode.h4
-rw-r--r--include/linux/genetlink.h3
-rw-r--r--include/linux/genhd.h5
-rw-r--r--include/linux/gpio.h10
-rw-r--r--include/linux/gpio/consumer.h25
-rw-r--r--include/linux/gpio/driver.h3
-rw-r--r--include/linux/gpio/machine.h4
-rw-r--r--include/linux/hid.h22
-rw-r--r--include/linux/hrtimer.h113
-rw-r--r--include/linux/iio/adc/stm32-dfsdm-adc.h18
-rw-r--r--include/linux/iio/consumer.h37
-rw-r--r--include/linux/iio/hw-consumer.h21
-rw-r--r--include/linux/iio/iio.h28
-rw-r--r--include/linux/iio/types.h28
-rw-r--r--include/linux/init_task.h258
-rw-r--r--include/linux/integrity.h1
-rw-r--r--include/linux/irq_work.h11
-rw-r--r--include/linux/irqflags.h2
-rw-r--r--include/linux/iversion.h337
-rw-r--r--include/linux/jump_label.h7
-rw-r--r--include/linux/lightnvm.h125
-rw-r--r--include/linux/livepatch.h4
-rw-r--r--include/linux/lockd/lockd.h9
-rw-r--r--include/linux/lockdep.h7
-rw-r--r--include/linux/mfd/axp20x.h5
-rw-r--r--include/linux/mfd/cros_ec.h4
-rw-r--r--include/linux/mfd/cros_ec_commands.h17
-rw-r--r--include/linux/mfd/palmas.h3
-rw-r--r--include/linux/mfd/rave-sp.h60
-rw-r--r--include/linux/mfd/stm32-lptimer.h6
-rw-r--r--include/linux/mfd/stm32-timers.h4
-rw-r--r--include/linux/mfd/tmio.h20
-rw-r--r--include/linux/mlx5/device.h16
-rw-r--r--include/linux/mlx5/driver.h43
-rw-r--r--include/linux/mlx5/mlx5_ifc.h27
-rw-r--r--include/linux/mlx5/qp.h12
-rw-r--r--include/linux/mlx5/vport.h4
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mmc/host.h5
-rw-r--r--include/linux/mmc/slot-gpio.h1
-rw-r--r--include/linux/module.h9
-rw-r--r--include/linux/mtd/map.h130
-rw-r--r--include/linux/mtd/mtd.h28
-rw-r--r--include/linux/mtd/rawnand.h443
-rw-r--r--include/linux/mtd/spi-nor.h12
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/netfilter/nfnetlink.h3
-rw-r--r--include/linux/nfs4.h12
-rw-r--r--include/linux/nubus.h189
-rw-r--r--include/linux/nvme.h22
-rw-r--r--include/linux/of.h7
-rw-r--r--include/linux/of_gpio.h2
-rw-r--r--include/linux/omap-gpmc.h28
-rw-r--r--include/linux/percpu-refcount.h6
-rw-r--r--include/linux/percpu_counter.h6
-rw-r--r--include/linux/pinctrl/pinconf-generic.h2
-rw-r--r--include/linux/platform_data/mtd-onenand-omap2.h34
-rw-r--r--include/linux/platform_data/spi-s3c64xx.h6
-rw-r--r--include/linux/pm.h16
-rw-r--r--include/linux/pm_wakeup.h7
-rw-r--r--include/linux/poll.h10
-rw-r--r--include/linux/posix-clock.h2
-rw-r--r--include/linux/posix-timers.h25
-rw-r--r--include/linux/posix_acl.h7
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/property.h2
-rw-r--r--include/linux/psci.h4
-rw-r--r--include/linux/rcupdate.h25
-rw-r--r--include/linux/rcutiny.h1
-rw-r--r--include/linux/rcutree.h1
-rw-r--r--include/linux/regmap.h50
-rw-r--r--include/linux/regulator/driver.h2
-rw-r--r--include/linux/regulator/machine.h37
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/rpmsg.h4
-rw-r--r--include/linux/rtnetlink.h3
-rw-r--r--include/linux/rtsx_common.h (renamed from include/linux/mfd/rtsx_common.h)0
-rw-r--r--include/linux/rtsx_pci.h (renamed from include/linux/mfd/rtsx_pci.h)236
-rw-r--r--include/linux/rtsx_usb.h (renamed from include/linux/mfd/rtsx_usb.h)0
-rw-r--r--include/linux/scatterlist.h11
-rw-r--r--include/linux/sched.h14
-rw-r--r--include/linux/sched/cpufreq.h2
-rw-r--r--include/linux/sched/signal.h28
-rw-r--r--include/linux/sched/task_stack.h2
-rw-r--r--include/linux/sched/topology.h12
-rw-r--r--include/linux/scif.h4
-rw-r--r--include/linux/seccomp.h8
-rw-r--r--include/linux/seqlock.h3
-rw-r--r--include/linux/serdev.h1
-rw-r--r--include/linux/signal.h15
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/sound.h2
-rw-r--r--include/linux/srcu.h4
-rw-r--r--include/linux/srcutree.h8
-rw-r--r--include/linux/string.h3
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/xprtrdma.h2
-rw-r--r--include/linux/suspend.h28
-rw-r--r--include/linux/swiotlb.h12
-rw-r--r--include/linux/torture.h8
-rw-r--r--include/linux/tpm.h39
-rw-r--r--include/linux/tpm_eventlog.h124
-rw-r--r--include/linux/tracepoint.h5
-rw-r--r--include/linux/tty_ldisc.h2
-rw-r--r--include/linux/w1-gpio.h9
-rw-r--r--include/linux/wait.h10
-rw-r--r--include/media/lirc_dev.h2
-rw-r--r--include/media/media-devnode.h2
-rw-r--r--include/media/soc_camera.h2
-rw-r--r--include/media/v4l2-ctrls.h2
-rw-r--r--include/media/v4l2-dev.h2
-rw-r--r--include/media/v4l2-mem2mem.h4
-rw-r--r--include/media/videobuf-core.h2
-rw-r--r--include/media/videobuf2-core.h2
-rw-r--r--include/media/videobuf2-v4l2.h5
-rw-r--r--include/misc/cxl.h2
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/iucv/af_iucv.h2
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/sock.h6
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/net/udp.h2
-rw-r--r--include/rdma/ib_addr.h38
-rw-r--r--include/rdma/ib_sa.h10
-rw-r--r--include/rdma/ib_verbs.h64
-rw-r--r--include/rdma/opa_addr.h16
-rw-r--r--include/rdma/rdma_cm.h19
-rw-r--r--include/rdma/rdma_cm_ib.h8
-rw-r--r--include/rdma/rdma_vt.h31
-rw-r--r--include/rdma/restrack.h157
-rw-r--r--include/scsi/libsas.h30
-rw-r--r--include/scsi/scsi_cmnd.h1
-rw-r--r--include/scsi/scsi_host.h2
-rw-r--r--include/scsi/scsi_proto.h1
-rw-r--r--include/scsi/scsi_transport_fc.h4
-rw-r--r--include/scsi/scsi_transport_sas.h1
-rw-r--r--include/scsi/srp.h17
-rw-r--r--include/sound/hdaudio_ext.h4
-rw-r--r--include/sound/hwdep.h2
-rw-r--r--include/sound/info.h2
-rw-r--r--include/sound/pcm.h8
-rw-r--r--include/sound/rt5514.h2
-rw-r--r--include/sound/rt5645.h3
-rw-r--r--include/sound/soc-acpi-intel-match.h1
-rw-r--r--include/sound/soc-acpi.h14
-rw-r--r--include/sound/soc-dai.h5
-rw-r--r--include/sound/soc.h9
-rw-r--r--include/trace/events/btrfs.h1
-rw-r--r--include/trace/events/f2fs.h3
-rw-r--r--include/trace/events/rcu.h75
-rw-r--r--include/trace/events/rdma.h129
-rw-r--r--include/trace/events/rpcrdma.h890
-rw-r--r--include/trace/events/sunrpc.h12
-rw-r--r--include/trace/events/thermal.h10
-rw-r--r--include/trace/events/timer.h37
-rw-r--r--include/uapi/asm-generic/poll.h44
-rw-r--r--include/uapi/asm-generic/siginfo.h109
-rw-r--r--include/uapi/linux/arm_sdei.h73
-rw-r--r--include/uapi/linux/btrfs.h11
-rw-r--r--include/uapi/linux/btrfs_tree.h2
-rw-r--r--include/uapi/linux/fs.h6
-rw-r--r--include/uapi/linux/gfs2_ondisk.h62
-rw-r--r--include/uapi/linux/lightnvm.h9
-rw-r--r--include/uapi/linux/nfs.h1
-rw-r--r--include/uapi/linux/nubus.h23
-rw-r--r--include/uapi/linux/perf_event.h10
-rw-r--r--include/uapi/linux/ptrace.h6
-rw-r--r--include/uapi/linux/sched.h5
-rw-r--r--include/uapi/linux/types.h6
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h9
-rw-r--r--include/uapi/rdma/ib_user_verbs.h11
-rw-r--r--include/uapi/rdma/mlx4-abi.h7
-rw-r--r--include/uapi/rdma/mlx5-abi.h53
-rw-r--r--include/uapi/rdma/rdma_netlink.h49
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h12
-rw-r--r--include/uapi/sound/asound.h9
-rw-r--r--include/uapi/sound/snd_sst_tokens.h17
232 files changed, 5085 insertions, 1431 deletions
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 6db3b4668b1a..ffe364fa4040 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -145,9 +145,9 @@
#define ACPI_ADDRESS_RANGE_MAX 2
-/* Maximum number of While() loops before abort */
+/* Maximum time (default 30s) of While() loops before abort */
-#define ACPI_MAX_LOOP_COUNT 0x000FFFFF
+#define ACPI_MAX_LOOP_TIMEOUT 30
/******************************************************************************
*
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 17d61b1f2511..3c46f0ef5f7a 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -130,8 +130,9 @@ struct acpi_exception_info {
#define AE_HEX_OVERFLOW EXCEP_ENV (0x0020)
#define AE_DECIMAL_OVERFLOW EXCEP_ENV (0x0021)
#define AE_OCTAL_OVERFLOW EXCEP_ENV (0x0022)
+#define AE_END_OF_TABLE EXCEP_ENV (0x0023)
-#define AE_CODE_ENV_MAX 0x0022
+#define AE_CODE_ENV_MAX 0x0023
/*
* Programmer exceptions
@@ -195,7 +196,7 @@ struct acpi_exception_info {
#define AE_AML_CIRCULAR_REFERENCE EXCEP_AML (0x001E)
#define AE_AML_BAD_RESOURCE_LENGTH EXCEP_AML (0x001F)
#define AE_AML_ILLEGAL_ADDRESS EXCEP_AML (0x0020)
-#define AE_AML_INFINITE_LOOP EXCEP_AML (0x0021)
+#define AE_AML_LOOP_TIMEOUT EXCEP_AML (0x0021)
#define AE_AML_UNINITIALIZED_NODE EXCEP_AML (0x0022)
#define AE_AML_TARGET_TYPE EXCEP_AML (0x0023)
@@ -275,7 +276,8 @@ static const struct acpi_exception_info acpi_gbl_exception_names_env[] = {
EXCEP_TXT("AE_DECIMAL_OVERFLOW",
"Overflow during ASCII decimal-to-binary conversion"),
EXCEP_TXT("AE_OCTAL_OVERFLOW",
- "Overflow during ASCII octal-to-binary conversion")
+ "Overflow during ASCII octal-to-binary conversion"),
+ EXCEP_TXT("AE_END_OF_TABLE", "Reached the end of table")
};
static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = {
@@ -368,8 +370,8 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = {
"The length of a Resource Descriptor in the AML is incorrect"),
EXCEP_TXT("AE_AML_ILLEGAL_ADDRESS",
"A memory, I/O, or PCI configuration address is invalid"),
- EXCEP_TXT("AE_AML_INFINITE_LOOP",
- "An apparent infinite AML While loop, method was aborted"),
+ EXCEP_TXT("AE_AML_LOOP_TIMEOUT",
+ "An AML While loop exceeded the maximum execution time"),
EXCEP_TXT("AE_AML_UNINITIALIZED_NODE",
"A namespace node is uninitialized or unresolved"),
EXCEP_TXT("AE_AML_TARGET_TYPE",
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 79287629c888..c9608b0b80c6 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -91,6 +91,9 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
bool acpi_dev_found(const char *hid);
bool acpi_dev_present(const char *hid, const char *uid, s64 hrv);
+const char *
+acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv);
+
#ifdef CONFIG_ACPI
#include <linux/proc_fs.h>
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index e1dd1a8d42b6..c589c3e12d90 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20170831
+#define ACPI_CA_VERSION 0x20171215
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -260,11 +260,11 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0);
ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE);
/*
- * Maximum number of While() loop iterations before forced method abort.
+ * Maximum timeout for While() loop iterations before forced method abort.
* This mechanism is intended to prevent infinite loops during interpreter
* execution within a host kernel.
*/
-ACPI_INIT_GLOBAL(u32, acpi_gbl_max_loop_iterations, ACPI_MAX_LOOP_COUNT);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_max_loop_iterations, ACPI_MAX_LOOP_TIMEOUT);
/*
* This mechanism is used to trace a specified AML method. The method is
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 7a89e6de94da..4c304bf4d591 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -69,9 +69,10 @@
#define ACPI_SIG_HEST "HEST" /* Hardware Error Source Table */
#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */
#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */
-#define ACPI_SIG_PDTT "PDTT" /* Processor Debug Trigger Table */
+#define ACPI_SIG_PDTT "PDTT" /* Platform Debug Trigger Table */
#define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */
#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
+#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */
#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */
#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */
#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */
@@ -1149,7 +1150,8 @@ enum acpi_nfit_type {
ACPI_NFIT_TYPE_CONTROL_REGION = 4,
ACPI_NFIT_TYPE_DATA_REGION = 5,
ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6,
- ACPI_NFIT_TYPE_RESERVED = 7 /* 7 and greater are reserved */
+ ACPI_NFIT_TYPE_CAPABILITIES = 7,
+ ACPI_NFIT_TYPE_RESERVED = 8 /* 8 and greater are reserved */
};
/*
@@ -1162,7 +1164,7 @@ struct acpi_nfit_system_address {
struct acpi_nfit_header header;
u16 range_index;
u16 flags;
- u32 reserved; /* Reseved, must be zero */
+ u32 reserved; /* Reserved, must be zero */
u32 proximity_domain;
u8 range_guid[16];
u64 address;
@@ -1281,9 +1283,72 @@ struct acpi_nfit_flush_address {
u64 hint_address[1]; /* Variable length */
};
+/* 7: Platform Capabilities Structure */
+
+struct acpi_nfit_capabilities {
+ struct acpi_nfit_header header;
+ u8 highest_capability;
+ u8 reserved[3]; /* Reserved, must be zero */
+ u32 capabilities;
+ u32 reserved2;
+};
+
+/* Capabilities Flags */
+
+#define ACPI_NFIT_CAPABILITY_CACHE_FLUSH (1) /* 00: Cache Flush to NVDIMM capable */
+#define ACPI_NFIT_CAPABILITY_MEM_FLUSH (1<<1) /* 01: Memory Flush to NVDIMM capable */
+#define ACPI_NFIT_CAPABILITY_MEM_MIRRORING (1<<2) /* 02: Memory Mirroring capable */
+
+/*
+ * NFIT/DVDIMM device handle support - used as the _ADR for each NVDIMM
+ */
+struct nfit_device_handle {
+ u32 handle;
+};
+
+/* Device handle construction and extraction macros */
+
+#define ACPI_NFIT_DIMM_NUMBER_MASK 0x0000000F
+#define ACPI_NFIT_CHANNEL_NUMBER_MASK 0x000000F0
+#define ACPI_NFIT_MEMORY_ID_MASK 0x00000F00
+#define ACPI_NFIT_SOCKET_ID_MASK 0x0000F000
+#define ACPI_NFIT_NODE_ID_MASK 0x0FFF0000
+
+#define ACPI_NFIT_DIMM_NUMBER_OFFSET 0
+#define ACPI_NFIT_CHANNEL_NUMBER_OFFSET 4
+#define ACPI_NFIT_MEMORY_ID_OFFSET 8
+#define ACPI_NFIT_SOCKET_ID_OFFSET 12
+#define ACPI_NFIT_NODE_ID_OFFSET 16
+
+/* Macro to construct a NFIT/NVDIMM device handle */
+
+#define ACPI_NFIT_BUILD_DEVICE_HANDLE(dimm, channel, memory, socket, node) \
+ ((dimm) | \
+ ((channel) << ACPI_NFIT_CHANNEL_NUMBER_OFFSET) | \
+ ((memory) << ACPI_NFIT_MEMORY_ID_OFFSET) | \
+ ((socket) << ACPI_NFIT_SOCKET_ID_OFFSET) | \
+ ((node) << ACPI_NFIT_NODE_ID_OFFSET))
+
+/* Macros to extract individual fields from a NFIT/NVDIMM device handle */
+
+#define ACPI_NFIT_GET_DIMM_NUMBER(handle) \
+ ((handle) & ACPI_NFIT_DIMM_NUMBER_MASK)
+
+#define ACPI_NFIT_GET_CHANNEL_NUMBER(handle) \
+ (((handle) & ACPI_NFIT_CHANNEL_NUMBER_MASK) >> ACPI_NFIT_CHANNEL_NUMBER_OFFSET)
+
+#define ACPI_NFIT_GET_MEMORY_ID(handle) \
+ (((handle) & ACPI_NFIT_MEMORY_ID_MASK) >> ACPI_NFIT_MEMORY_ID_OFFSET)
+
+#define ACPI_NFIT_GET_SOCKET_ID(handle) \
+ (((handle) & ACPI_NFIT_SOCKET_ID_MASK) >> ACPI_NFIT_SOCKET_ID_OFFSET)
+
+#define ACPI_NFIT_GET_NODE_ID(handle) \
+ (((handle) & ACPI_NFIT_NODE_ID_MASK) >> ACPI_NFIT_NODE_ID_OFFSET)
+
/*******************************************************************************
*
- * PDTT - Processor Debug Trigger Table (ACPI 6.2)
+ * PDTT - Platform Debug Trigger Table (ACPI 6.2)
* Version 0
*
******************************************************************************/
@@ -1301,14 +1366,14 @@ struct acpi_table_pdtt {
* starting at array_offset.
*/
struct acpi_pdtt_channel {
- u16 sub_channel_id;
+ u8 subchannel_id;
+ u8 flags;
};
-/* Mask and Flags for above */
+/* Flags for above */
-#define ACPI_PDTT_SUBCHANNEL_ID_MASK 0x00FF
-#define ACPI_PDTT_RUNTIME_TRIGGER (1<<8)
-#define ACPI_PPTT_WAIT_COMPLETION (1<<9)
+#define ACPI_PDTT_RUNTIME_TRIGGER (1)
+#define ACPI_PDTT_WAIT_COMPLETION (1<<1)
/*******************************************************************************
*
@@ -1376,6 +1441,20 @@ struct acpi_pptt_cache {
#define ACPI_PPTT_MASK_CACHE_TYPE (0x0C) /* Cache type */
#define ACPI_PPTT_MASK_WRITE_POLICY (0x10) /* Write policy */
+/* Attributes describing cache */
+#define ACPI_PPTT_CACHE_READ_ALLOCATE (0x0) /* Cache line is allocated on read */
+#define ACPI_PPTT_CACHE_WRITE_ALLOCATE (0x01) /* Cache line is allocated on write */
+#define ACPI_PPTT_CACHE_RW_ALLOCATE (0x02) /* Cache line is allocated on read and write */
+#define ACPI_PPTT_CACHE_RW_ALLOCATE_ALT (0x03) /* Alternate representation of above */
+
+#define ACPI_PPTT_CACHE_TYPE_DATA (0x0) /* Data cache */
+#define ACPI_PPTT_CACHE_TYPE_INSTR (1<<2) /* Instruction cache */
+#define ACPI_PPTT_CACHE_TYPE_UNIFIED (2<<2) /* Unified I & D cache */
+#define ACPI_PPTT_CACHE_TYPE_UNIFIED_ALT (3<<2) /* Alternate representation of above */
+
+#define ACPI_PPTT_CACHE_POLICY_WB (0x0) /* Cache is write back */
+#define ACPI_PPTT_CACHE_POLICY_WT (1<<4) /* Cache is write through */
+
/* 2: ID Structure */
struct acpi_pptt_id {
@@ -1405,6 +1484,68 @@ struct acpi_table_sbst {
/*******************************************************************************
*
+ * SDEV - Secure Devices Table (ACPI 6.2)
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_sdev {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+struct acpi_sdev_header {
+ u8 type;
+ u8 flags;
+ u16 length;
+};
+
+/* Values for subtable type above */
+
+enum acpi_sdev_type {
+ ACPI_SDEV_TYPE_NAMESPACE_DEVICE = 0,
+ ACPI_SDEV_TYPE_PCIE_ENDPOINT_DEVICE = 1,
+ ACPI_SDEV_TYPE_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* Values for flags above */
+
+#define ACPI_SDEV_HANDOFF_TO_UNSECURE_OS (1)
+
+/*
+ * SDEV subtables
+ */
+
+/* 0: Namespace Device Based Secure Device Structure */
+
+struct acpi_sdev_namespace {
+ struct acpi_sdev_header header;
+ u16 device_id_offset;
+ u16 device_id_length;
+ u16 vendor_data_offset;
+ u16 vendor_data_length;
+};
+
+/* 1: PCIe Endpoint Device Based Device Structure */
+
+struct acpi_sdev_pcie {
+ struct acpi_sdev_header header;
+ u16 segment;
+ u16 start_bus;
+ u16 path_offset;
+ u16 path_length;
+ u16 vendor_data_offset;
+ u16 vendor_data_length;
+};
+
+/* 1a: PCIe Endpoint path entry */
+
+struct acpi_sdev_pcie_path {
+ u8 device;
+ u8 function;
+};
+
+/*******************************************************************************
+ *
* SLIT - System Locality Distance Information Table
* Version 1
*
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 686b6f8c09dc..0d60d5df14f8 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -810,6 +810,7 @@ struct acpi_iort_smmu_v3 {
u8 pxm;
u8 reserved1;
u16 reserved2;
+ u32 id_mapping_index;
};
/* Values for Model field above */
@@ -1246,6 +1247,8 @@ enum acpi_spmi_interface_types {
* TCPA - Trusted Computing Platform Alliance table
* Version 2
*
+ * TCG Hardware Interface Table for TPM 1.2 Clients and Servers
+ *
* Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
* Version 1.2, Revision 8
* February 27, 2017
@@ -1310,6 +1313,8 @@ struct acpi_table_tcpa_server {
* TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table
* Version 4
*
+ * TCG Hardware Interface Table for TPM 2.0 Clients and Servers
+ *
* Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
* Version 1.2, Revision 8
* February 27, 2017
@@ -1329,15 +1334,23 @@ struct acpi_table_tpm2 {
/* Values for start_method above */
#define ACPI_TPM2_NOT_ALLOWED 0
+#define ACPI_TPM2_RESERVED1 1
#define ACPI_TPM2_START_METHOD 2
+#define ACPI_TPM2_RESERVED3 3
+#define ACPI_TPM2_RESERVED4 4
+#define ACPI_TPM2_RESERVED5 5
#define ACPI_TPM2_MEMORY_MAPPED 6
#define ACPI_TPM2_COMMAND_BUFFER 7
#define ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD 8
+#define ACPI_TPM2_RESERVED9 9
+#define ACPI_TPM2_RESERVED10 10
#define ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC 11 /* V1.2 Rev 8 */
+#define ACPI_TPM2_RESERVED 12
-/* Trailer appears after any start_method subtables */
+/* Optional trailer appears after any start_method subtables */
struct acpi_tpm2_trailer {
+ u8 method_parameters[12];
u32 minimum_log_length; /* Minimum length for the event log area */
u64 log_address; /* Address of the event log area */
};
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 4f077edb9b81..31f1be74dd16 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -468,6 +468,8 @@ typedef void *acpi_handle; /* Actually a ptr to a NS Node */
#define ACPI_NSEC_PER_MSEC 1000000L
#define ACPI_NSEC_PER_SEC 1000000000L
+#define ACPI_TIME_AFTER(a, b) ((s64)((b) - (a)) < 0)
+
/* Owner IDs are used to track namespace nodes for selective deletion */
typedef u8 acpi_owner_id;
@@ -1299,6 +1301,8 @@ typedef enum {
#define ACPI_OSI_WIN_7 0x0B
#define ACPI_OSI_WIN_8 0x0C
#define ACPI_OSI_WIN_10 0x0D
+#define ACPI_OSI_WIN_10_RS1 0x0E
+#define ACPI_OSI_WIN_10_RS2 0x0F
/* Definitions of getopt */
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
new file mode 100644
index 000000000000..880a292d792f
--- /dev/null
+++ b/include/asm-generic/dma-mapping.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_DMA_MAPPING_H
+#define _ASM_GENERIC_DMA_MAPPING_H
+
+static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+{
+ return &dma_direct_ops;
+}
+
+#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index ebe544e048cd..1ab0e520d6fc 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -277,7 +277,11 @@
#define INIT_TASK_DATA(align) \
. = ALIGN(align); \
VMLINUX_SYMBOL(__start_init_task) = .; \
+ VMLINUX_SYMBOL(init_thread_union) = .; \
+ VMLINUX_SYMBOL(init_stack) = .; \
*(.data..init_task) \
+ *(.data..init_thread_info) \
+ . = VMLINUX_SYMBOL(__start_init_task) + THREAD_SIZE; \
VMLINUX_SYMBOL(__end_init_task) = .;
/*
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 03b97629442c..1e26f790b03f 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -327,7 +327,12 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
*/
static inline int crypto_aead_encrypt(struct aead_request *req)
{
- return crypto_aead_alg(crypto_aead_reqtfm(req))->encrypt(req);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+ if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return crypto_aead_alg(aead)->encrypt(req);
}
/**
@@ -356,6 +361,9 @@ static inline int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
if (req->cryptlen < crypto_aead_authsize(aead))
return -EINVAL;
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
index caaa470389e0..b83d66073db0 100644
--- a/include/crypto/chacha20.h
+++ b/include/crypto/chacha20.h
@@ -13,12 +13,13 @@
#define CHACHA20_IV_SIZE 16
#define CHACHA20_KEY_SIZE 32
#define CHACHA20_BLOCK_SIZE 64
+#define CHACHA20_BLOCK_WORDS (CHACHA20_BLOCK_SIZE / sizeof(u32))
struct chacha20_ctx {
u32 key[8];
};
-void chacha20_block(u32 *state, void *stream);
+void chacha20_block(u32 *state, u32 *stream);
void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keysize);
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 0ed31fd80242..2d1849dffb80 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -71,12 +71,11 @@ struct ahash_request {
/**
* struct ahash_alg - asynchronous message digest definition
- * @init: Initialize the transformation context. Intended only to initialize the
+ * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the
* state of the HASH transformation at the beginning. This shall fill in
* the internal structures used during the entire duration of the whole
* transformation. No data processing happens at this point.
- * Note: mandatory.
- * @update: Push a chunk of data into the driver for transformation. This
+ * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This
* function actually pushes blocks of data from upper layers into the
* driver, which then passes those to the hardware as seen fit. This
* function must not finalize the HASH transformation by calculating the
@@ -85,20 +84,17 @@ struct ahash_request {
* context, as this function may be called in parallel with the same
* transformation object. Data processing can happen synchronously
* [SHASH] or asynchronously [AHASH] at this point.
- * Note: mandatory.
- * @final: Retrieve result from the driver. This function finalizes the
+ * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the
* transformation and retrieves the resulting hash from the driver and
* pushes it back to upper layers. No data processing happens at this
* point unless hardware requires it to finish the transformation
* (then the data buffered by the device driver is processed).
- * Note: mandatory.
- * @finup: Combination of @update and @final. This function is effectively a
+ * @finup: **[optional]** Combination of @update and @final. This function is effectively a
* combination of @update and @final calls issued in sequence. As some
* hardware cannot do @update and @final separately, this callback was
* added to allow such hardware to be used at least by IPsec. Data
* processing can happen synchronously [SHASH] or asynchronously [AHASH]
* at this point.
- * Note: optional.
* @digest: Combination of @init and @update and @final. This function
* effectively behaves as the entire chain of operations, @init,
* @update and @final issued in sequence. Just like @finup, this was
@@ -210,7 +206,6 @@ struct crypto_ahash {
unsigned int keylen);
unsigned int reqsize;
- bool has_setkey;
struct crypto_tfm base;
};
@@ -410,11 +405,6 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
-static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm)
-{
- return tfm->has_setkey;
-}
-
/**
* crypto_ahash_finup() - update and finalize message digest
* @req: reference to the ahash_request handle that holds all information
@@ -487,7 +477,12 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out)
*/
static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
{
- return crypto_ahash_reqtfm(req)->import(req, in);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return tfm->import(req, in);
}
/**
@@ -503,7 +498,12 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
*/
static inline int crypto_ahash_init(struct ahash_request *req)
{
- return crypto_ahash_reqtfm(req)->init(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return tfm->init(req);
}
/**
@@ -855,7 +855,12 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out)
*/
static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
{
- return crypto_shash_alg(desc->tfm)->import(desc, in);
+ struct crypto_shash *tfm = desc->tfm;
+
+ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return crypto_shash_alg(tfm)->import(desc, in);
}
/**
@@ -871,7 +876,12 @@ static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
*/
static inline int crypto_shash_init(struct shash_desc *desc)
{
- return crypto_shash_alg(desc->tfm)->init(desc);
+ struct crypto_shash *tfm = desc->tfm;
+
+ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return crypto_shash_alg(tfm)->init(desc);
}
/**
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index f38227a78eae..482461d8931d 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -245,7 +245,7 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
int offset, size_t size, int flags);
void af_alg_free_resources(struct af_alg_async_req *areq);
void af_alg_async_cb(struct crypto_async_request *_req, int err);
-unsigned int af_alg_poll(struct file *file, struct socket *sock,
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait);
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
unsigned int areqlen);
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index c2bae8da642c..27040a46d50a 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -90,6 +90,8 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
return alg->setkey != shash_no_setkey;
}
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
+
int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
struct hash_alg_common *alg,
struct crypto_instance *inst);
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index ccad9b2c9bd6..0f6ddac1acfc 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -28,17 +28,6 @@ struct crypto_scomp {
* @free_ctx: Function frees context allocated with alloc_ctx
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
- * @init: Initialize the cryptographic transformation object.
- * This function is used to initialize the cryptographic
- * transformation object. This function is called only once at
- * the instantiation time, right after the transformation context
- * was allocated. In case the cryptographic hardware has some
- * special requirements which need to be handled by software, this
- * function shall check for the precise requirement of the
- * transformation and put any software fallbacks in place.
- * @exit: Deinitialize the cryptographic transformation object. This is a
- * counterpart to @init, used to remove various changes set in
- * @init.
* @base: Common crypto API algorithm data structure
*/
struct scomp_alg {
diff --git a/include/crypto/null.h b/include/crypto/null.h
index 5757c0a4b321..15aeef6e30ef 100644
--- a/include/crypto/null.h
+++ b/include/crypto/null.h
@@ -12,14 +12,4 @@
struct crypto_skcipher *crypto_get_default_null_skcipher(void);
void crypto_put_default_null_skcipher(void);
-static inline struct crypto_skcipher *crypto_get_default_null_skcipher2(void)
-{
- return crypto_get_default_null_skcipher();
-}
-
-static inline void crypto_put_default_null_skcipher2(void)
-{
- crypto_put_default_null_skcipher();
-}
-
#endif
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index c65567d01e8e..f718a19da82f 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -31,8 +31,6 @@ struct poly1305_desc_ctx {
};
int crypto_poly1305_init(struct shash_desc *desc);
-int crypto_poly1305_setkey(struct crypto_shash *tfm,
- const u8 *key, unsigned int keylen);
unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
const u8 *src, unsigned int srclen);
int crypto_poly1305_update(struct shash_desc *desc,
diff --git a/include/crypto/salsa20.h b/include/crypto/salsa20.h
new file mode 100644
index 000000000000..19ed48aefc86
--- /dev/null
+++ b/include/crypto/salsa20.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for the Salsa20 algorithm
+ */
+
+#ifndef _CRYPTO_SALSA20_H
+#define _CRYPTO_SALSA20_H
+
+#include <linux/types.h>
+
+#define SALSA20_IV_SIZE 8
+#define SALSA20_MIN_KEY_SIZE 16
+#define SALSA20_MAX_KEY_SIZE 32
+#define SALSA20_BLOCK_SIZE 64
+
+struct crypto_skcipher;
+
+struct salsa20_ctx {
+ u32 initial_state[16];
+};
+
+void crypto_salsa20_init(u32 *state, const struct salsa20_ctx *ctx,
+ const u8 *iv);
+int crypto_salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize);
+
+#endif /* _CRYPTO_SALSA20_H */
diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h
index b9d9bd553b48..080f60c2e6b1 100644
--- a/include/crypto/sha3.h
+++ b/include/crypto/sha3.h
@@ -19,7 +19,6 @@
struct sha3_state {
u64 st[25];
- unsigned int md_len;
unsigned int rsiz;
unsigned int rsizw;
@@ -27,4 +26,9 @@ struct sha3_state {
u8 buf[SHA3_224_BLOCK_SIZE];
};
+int crypto_sha3_init(struct shash_desc *desc);
+int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len);
+int crypto_sha3_final(struct shash_desc *desc, u8 *out);
+
#endif
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 562001cb412b..2f327f090c3e 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -401,11 +401,6 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
return tfm->setkey(tfm, key, keylen);
}
-static inline bool crypto_skcipher_has_setkey(struct crypto_skcipher *tfm)
-{
- return tfm->keysize;
-}
-
static inline unsigned int crypto_skcipher_default_keysize(
struct crypto_skcipher *tfm)
{
@@ -442,6 +437,9 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
return tfm->encrypt(req);
}
@@ -460,6 +458,9 @@ static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
return tfm->decrypt(req);
}
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 0e0c868451a5..5176c3797680 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -364,7 +364,7 @@ int drm_open(struct inode *inode, struct file *filp);
ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
int drm_release(struct inode *inode, struct file *filp);
-unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
+__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait);
int drm_event_reserve_init_locked(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_pending_event *p,
diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h
index dd549ff04295..2cc10ae4bbb7 100644
--- a/include/dt-bindings/gpio/gpio.h
+++ b/include/dt-bindings/gpio/gpio.h
@@ -29,8 +29,8 @@
#define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_DRAIN)
#define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_SOURCE)
-/* Bit 3 express GPIO suspend/resume persistence */
-#define GPIO_SLEEP_MAINTAIN_VALUE 0
-#define GPIO_SLEEP_MAY_LOSE_VALUE 8
+/* Bit 3 express GPIO suspend/resume and reset persistence */
+#define GPIO_PERSISTENT 0
+#define GPIO_TRANSITORY 8
#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index f05b9b6cd43f..e6d41b65d396 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -453,6 +453,7 @@ void __init acpi_no_s4_hw_signature(void);
void __init acpi_old_suspend_ordering(void);
void __init acpi_nvs_nosave(void);
void __init acpi_nvs_nosave_s3(void);
+void __init acpi_sleep_no_blacklist(void);
#endif /* CONFIG_PM_SLEEP */
struct acpi_osc_context {
@@ -586,6 +587,7 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev);
+void *acpi_get_match_data(const struct device *dev);
extern bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv);
int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
@@ -643,6 +645,12 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
return false;
}
+static inline const char *
+acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
+{
+ return NULL;
+}
+
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
return false;
@@ -758,6 +766,11 @@ static inline const struct acpi_device_id *acpi_match_device(
return NULL;
}
+static inline void *acpi_get_match_data(const struct device *dev)
+{
+ return NULL;
+}
+
static inline bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv)
{
@@ -981,6 +994,11 @@ struct acpi_gpio_mapping {
const char *name;
const struct acpi_gpio_params *data;
unsigned int size;
+
+/* Ignore IoRestriction field */
+#define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0)
+
+ unsigned int quirks;
};
#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB)
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 304511267c82..2b709416de05 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -27,7 +27,7 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
DECLARE_PER_CPU(unsigned long, freq_scale);
static inline
-unsigned long topology_get_freq_scale(struct sched_domain *sd, int cpu)
+unsigned long topology_get_freq_scale(int cpu)
{
return per_cpu(freq_scale, cpu);
}
diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
new file mode 100644
index 000000000000..942afbd544b7
--- /dev/null
+++ b/include/linux/arm_sdei.h
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#ifndef __LINUX_ARM_SDEI_H
+#define __LINUX_ARM_SDEI_H
+
+#include <uapi/linux/arm_sdei.h>
+
+enum sdei_conduit_types {
+ CONDUIT_INVALID = 0,
+ CONDUIT_SMC,
+ CONDUIT_HVC,
+};
+
+#include <asm/sdei.h>
+
+/* Arch code should override this to set the entry point from firmware... */
+#ifndef sdei_arch_get_entry_point
+#define sdei_arch_get_entry_point(conduit) (0)
+#endif
+
+/*
+ * When an event occurs sdei_event_handler() will call a user-provided callback
+ * like this in NMI context on the CPU that received the event.
+ */
+typedef int (sdei_event_callback)(u32 event, struct pt_regs *regs, void *arg);
+
+/*
+ * Register your callback to claim an event. The event must be described
+ * by firmware.
+ */
+int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg);
+
+/*
+ * Calls to sdei_event_unregister() may return EINPROGRESS. Keep calling
+ * it until it succeeds.
+ */
+int sdei_event_unregister(u32 event_num);
+
+int sdei_event_enable(u32 event_num);
+int sdei_event_disable(u32 event_num);
+
+#ifdef CONFIG_ARM_SDE_INTERFACE
+/* For use by arch code when CPU hotplug notifiers are not appropriate. */
+int sdei_mask_local_cpu(void);
+int sdei_unmask_local_cpu(void);
+#else
+static inline int sdei_mask_local_cpu(void) { return 0; }
+static inline int sdei_unmask_local_cpu(void) { return 0; }
+#endif /* CONFIG_ARM_SDE_INTERFACE */
+
+
+/*
+ * This struct represents an event that has been registered. The driver
+ * maintains a list of all events, and which ones are registered. (Private
+ * events have one entry in the list, but are registered on each CPU).
+ * A pointer to this struct is passed to firmware, and back to the event
+ * handler. The event handler can then use this to invoke the registered
+ * callback, without having to walk the list.
+ *
+ * For CPU private events, this structure is per-cpu.
+ */
+struct sdei_registered_event {
+ /* For use by arch code: */
+ struct pt_regs interrupted_regs;
+
+ sdei_event_callback *callback;
+ void *callback_arg;
+ u32 event_num;
+ u8 priority;
+};
+
+/* The arch code entry point should then call this when an event arrives. */
+int notrace sdei_event_handler(struct pt_regs *regs,
+ struct sdei_registered_event *arg);
+
+/* arch code may use this to retrieve the extra registers. */
+int sdei_api_event_context(u32 query, u64 *result);
+
+#endif /* __LINUX_ARM_SDEI_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index c7a353825450..40d150ad7e07 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -448,6 +448,8 @@ enum {
ATA_SET_MAX_LOCK = 0x02,
ATA_SET_MAX_UNLOCK = 0x03,
ATA_SET_MAX_FREEZE_LOCK = 0x04,
+ ATA_SET_MAX_PASSWD_DMA = 0x05,
+ ATA_SET_MAX_UNLOCK_DMA = 0x06,
/* feature values for DEVICE CONFIGURATION OVERLAY */
ATA_DCO_RESTORE = 0xC0,
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index e54e7e0033eb..3e4ce54d84ab 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -332,7 +332,7 @@ static inline bool inode_to_wb_is_valid(struct inode *inode)
* holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
* associated wb's list_lock.
*/
-static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
+static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
{
#ifdef CONFIG_LOCKDEP
WARN_ON_ONCE(debug_locks &&
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 23d29b39f71e..d0eb659fa733 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -300,6 +300,29 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
bv->bv_len = iter.bi_bvec_done;
}
+static inline unsigned bio_pages_all(struct bio *bio)
+{
+ WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
+ return bio->bi_vcnt;
+}
+
+static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
+{
+ WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
+ return bio->bi_io_vec;
+}
+
+static inline struct page *bio_first_page_all(struct bio *bio)
+{
+ return bio_first_bvec_all(bio)->bv_page;
+}
+
+static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
+{
+ WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
+ return &bio->bi_io_vec[bio->bi_vcnt - 1];
+}
+
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
@@ -477,7 +500,6 @@ static inline void bio_flush_dcache_pages(struct bio *bi)
#endif
extern void bio_copy_data(struct bio *dst, struct bio *src);
-extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
extern void bio_free_pages(struct bio *bio);
extern struct bio *bio_copy_user_iov(struct request_queue *,
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 1030651f8309..cf2588d81148 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -16,6 +16,7 @@
#define _LINUX_BITFIELD_H
#include <linux/build_bug.h>
+#include <asm/byteorder.h>
/*
* Bitfield access macros
@@ -103,4 +104,49 @@
(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
})
+extern void __compiletime_warning("value doesn't fit into mask")
+__field_overflow(void);
+extern void __compiletime_error("bad bitfield mask")
+__bad_mask(void);
+static __always_inline u64 field_multiplier(u64 field)
+{
+ if ((field | (field - 1)) & ((field | (field - 1)) + 1))
+ __bad_mask();
+ return field & -field;
+}
+static __always_inline u64 field_mask(u64 field)
+{
+ return field / field_multiplier(field);
+}
+#define ____MAKE_OP(type,base,to,from) \
+static __always_inline __##type type##_encode_bits(base v, base field) \
+{ \
+ if (__builtin_constant_p(v) && (v & ~field_multiplier(field))) \
+ __field_overflow(); \
+ return to((v & field_mask(field)) * field_multiplier(field)); \
+} \
+static __always_inline __##type type##_replace_bits(__##type old, \
+ base val, base field) \
+{ \
+ return (old & ~to(field)) | type##_encode_bits(val, field); \
+} \
+static __always_inline void type##p_replace_bits(__##type *p, \
+ base val, base field) \
+{ \
+ *p = (*p & ~to(field)) | type##_encode_bits(val, field); \
+} \
+static __always_inline base type##_get_bits(__##type v, base field) \
+{ \
+ return (from(v) & field)/field_multiplier(field); \
+}
+#define __MAKE_OP(size) \
+ ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
+ ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
+ ____MAKE_OP(u##size,u##size,,)
+__MAKE_OP(16)
+__MAKE_OP(32)
+__MAKE_OP(64)
+#undef __MAKE_OP
+#undef ____MAKE_OP
+
#endif
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index e9825ff57b15..69bea82ebeb1 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -660,12 +660,14 @@ static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
struct blkg_rwstat *from)
{
- struct blkg_rwstat v = blkg_rwstat_read(from);
+ u64 sum[BLKG_RWSTAT_NR];
int i;
for (i = 0; i < BLKG_RWSTAT_NR; i++)
- atomic64_add(atomic64_read(&v.aux_cnt[i]) +
- atomic64_read(&from->aux_cnt[i]),
+ sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
&to->aux_cnt[i]);
}
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 95c9a5c862e2..8efcf49796a3 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -51,6 +51,7 @@ struct blk_mq_hw_ctx {
unsigned int queue_num;
atomic_t nr_active;
+ unsigned int nr_expired;
struct hlist_node cpuhp_dead;
struct kobject kobj;
@@ -65,7 +66,7 @@ struct blk_mq_hw_ctx {
#endif
/* Must be the last member - see also blk_mq_hw_ctx_size(). */
- struct srcu_struct queue_rq_srcu[0];
+ struct srcu_struct srcu[0];
};
struct blk_mq_tag_set {
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 9e7d8bd776d2..c5d3db0d83f8 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -39,6 +39,34 @@ typedef u8 __bitwise blk_status_t;
#define BLK_STS_AGAIN ((__force blk_status_t)12)
+/**
+ * blk_path_error - returns true if error may be path related
+ * @error: status the request was completed with
+ *
+ * Description:
+ * This classifies block error status into non-retryable errors and ones
+ * that may be successful if retried on a failover path.
+ *
+ * Return:
+ * %false - retrying failover path will not help
+ * %true - may succeed if retried
+ */
+static inline bool blk_path_error(blk_status_t error)
+{
+ switch (error) {
+ case BLK_STS_NOTSUPP:
+ case BLK_STS_NOSPC:
+ case BLK_STS_TARGET:
+ case BLK_STS_NEXUS:
+ case BLK_STS_MEDIUM:
+ case BLK_STS_PROTECTION:
+ return false;
+ }
+
+ /* Anything else could be a path failure, so should be retried */
+ return true;
+}
+
struct blk_issue_stat {
u64 stat;
};
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0ce8a372d506..4f3df807cf8f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -27,6 +27,8 @@
#include <linux/percpu-refcount.h>
#include <linux/scatterlist.h>
#include <linux/blkzoned.h>
+#include <linux/seqlock.h>
+#include <linux/u64_stats_sync.h>
struct module;
struct scsi_ioctl_command;
@@ -121,6 +123,12 @@ typedef __u32 __bitwise req_flags_t;
/* Look at ->special_vec for the actual data payload instead of the
bio chain. */
#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
+/* The per-zone write lock is held for this request */
+#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
+/* timeout is expired */
+#define RQF_MQ_TIMEOUT_EXPIRED ((__force req_flags_t)(1 << 20))
+/* already slept for hybrid poll */
+#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 21))
/* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \
@@ -133,12 +141,6 @@ typedef __u32 __bitwise req_flags_t;
* especially blk_mq_rq_ctx_init() to take care of the added fields.
*/
struct request {
- struct list_head queuelist;
- union {
- struct __call_single_data csd;
- u64 fifo_time;
- };
-
struct request_queue *q;
struct blk_mq_ctx *mq_ctx;
@@ -148,8 +150,6 @@ struct request {
int internal_tag;
- unsigned long atomic_flags;
-
/* the following two fields are internal, NEVER access directly */
unsigned int __data_len; /* total data len */
int tag;
@@ -158,6 +158,8 @@ struct request {
struct bio *bio;
struct bio *biotail;
+ struct list_head queuelist;
+
/*
* The hash is used inside the scheduler, and killed once the
* request reaches the dispatch list. The ipi_list is only used
@@ -205,19 +207,16 @@ struct request {
struct hd_struct *part;
unsigned long start_time;
struct blk_issue_stat issue_stat;
-#ifdef CONFIG_BLK_CGROUP
- struct request_list *rl; /* rl this rq is alloced from */
- unsigned long long start_time_ns;
- unsigned long long io_start_time_ns; /* when passed to hardware */
-#endif
/* Number of scatter-gather DMA addr+len pairs after
* physical address coalescing is performed.
*/
unsigned short nr_phys_segments;
+
#if defined(CONFIG_BLK_DEV_INTEGRITY)
unsigned short nr_integrity_segments;
#endif
+ unsigned short write_hint;
unsigned short ioprio;
unsigned int timeout;
@@ -226,11 +225,37 @@ struct request {
unsigned int extra_len; /* length of alignment and padding */
- unsigned short write_hint;
+ /*
+ * On blk-mq, the lower bits of ->gstate (generation number and
+ * state) carry the MQ_RQ_* state value and the upper bits the
+ * generation number which is monotonically incremented and used to
+ * distinguish the reuse instances.
+ *
+ * ->gstate_seq allows updates to ->gstate and other fields
+ * (currently ->deadline) during request start to be read
+ * atomically from the timeout path, so that it can operate on a
+ * coherent set of information.
+ */
+ seqcount_t gstate_seq;
+ u64 gstate;
+
+ /*
+ * ->aborted_gstate is used by the timeout to claim a specific
+ * recycle instance of this request. See blk_mq_timeout_work().
+ */
+ struct u64_stats_sync aborted_gstate_sync;
+ u64 aborted_gstate;
+
+ /* access through blk_rq_set_deadline, blk_rq_deadline */
+ unsigned long __deadline;
- unsigned long deadline;
struct list_head timeout_list;
+ union {
+ struct __call_single_data csd;
+ u64 fifo_time;
+ };
+
/*
* completion callback.
*/
@@ -239,6 +264,12 @@ struct request {
/* for bidi */
struct request *next_rq;
+
+#ifdef CONFIG_BLK_CGROUP
+ struct request_list *rl; /* rl this rq is alloced from */
+ unsigned long long start_time_ns;
+ unsigned long long io_start_time_ns; /* when passed to hardware */
+#endif
};
static inline bool blk_op_is_scsi(unsigned int op)
@@ -564,6 +595,22 @@ struct request_queue {
struct queue_limits limits;
/*
+ * Zoned block device information for request dispatch control.
+ * nr_zones is the total number of zones of the device. This is always
+ * 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones
+ * bits which indicates if a zone is conventional (bit clear) or
+ * sequential (bit set). seq_zones_wlock is a bitmap of nr_zones
+ * bits which indicates if a zone is write locked, that is, if a write
+ * request targeting the zone was dispatched. All three fields are
+ * initialized by the low level device driver (e.g. scsi/sd.c).
+ * Stacking drivers (device mappers) may or may not initialize
+ * these fields.
+ */
+ unsigned int nr_zones;
+ unsigned long *seq_zones_bitmap;
+ unsigned long *seq_zones_wlock;
+
+ /*
* sg stuff
*/
unsigned int sg_timeout;
@@ -807,6 +854,27 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
}
+static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
+{
+ return q->nr_zones;
+}
+
+static inline unsigned int blk_queue_zone_no(struct request_queue *q,
+ sector_t sector)
+{
+ if (!blk_queue_is_zoned(q))
+ return 0;
+ return sector >> ilog2(q->limits.chunk_sectors);
+}
+
+static inline bool blk_queue_zone_is_seq(struct request_queue *q,
+ sector_t sector)
+{
+ if (!blk_queue_is_zoned(q) || !q->seq_zones_bitmap)
+ return false;
+ return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
+}
+
static inline bool rq_is_sync(struct request *rq)
{
return op_is_sync(rq->cmd_flags);
@@ -1046,6 +1114,16 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
return blk_rq_cur_bytes(rq) >> 9;
}
+static inline unsigned int blk_rq_zone_no(struct request *rq)
+{
+ return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
+}
+
+static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
+{
+ return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
+}
+
/*
* Some commands like WRITE SAME have a payload or data transfer size which
* is different from the size of the request. Any driver that supports such
@@ -1595,7 +1673,15 @@ static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
if (q)
return blk_queue_zone_sectors(q);
+ return 0;
+}
+
+static inline unsigned int bdev_nr_zones(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ if (q)
+ return blk_queue_nr_zones(q);
return 0;
}
@@ -1731,8 +1817,6 @@ static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_work_on(int cpu, struct work_struct *work);
-int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
-int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
#ifdef CONFIG_BLK_CGROUP
@@ -1971,6 +2055,60 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+bool blk_req_needs_zone_write_lock(struct request *rq);
+void __blk_req_zone_write_lock(struct request *rq);
+void __blk_req_zone_write_unlock(struct request *rq);
+
+static inline void blk_req_zone_write_lock(struct request *rq)
+{
+ if (blk_req_needs_zone_write_lock(rq))
+ __blk_req_zone_write_lock(rq);
+}
+
+static inline void blk_req_zone_write_unlock(struct request *rq)
+{
+ if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
+ __blk_req_zone_write_unlock(rq);
+}
+
+static inline bool blk_req_zone_is_write_locked(struct request *rq)
+{
+ return rq->q->seq_zones_wlock &&
+ test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
+}
+
+static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
+{
+ if (!blk_req_needs_zone_write_lock(rq))
+ return true;
+ return !blk_req_zone_is_write_locked(rq);
+}
+#else
+static inline bool blk_req_needs_zone_write_lock(struct request *rq)
+{
+ return false;
+}
+
+static inline void blk_req_zone_write_lock(struct request *rq)
+{
+}
+
+static inline void blk_req_zone_write_unlock(struct request *rq)
+{
+}
+static inline bool blk_req_zone_is_write_locked(struct request *rq)
+{
+ return false;
+}
+
+static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
+{
+ return true;
+}
+#endif /* CONFIG_BLK_DEV_ZONED */
+
#else /* CONFIG_BLOCK */
struct block_device;
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 8b1bf8d3d4a2..58a82f58e44e 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -151,7 +151,6 @@ void buffer_check_dirty_writeback(struct page *page,
void mark_buffer_dirty(struct buffer_head *bh);
void mark_buffer_write_io_error(struct buffer_head *bh);
-void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
void touch_buffer(struct buffer_head *bh);
void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset);
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index ec8a4d7af6bd..fe7a22dd133b 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -125,4 +125,13 @@ static inline bool bvec_iter_rewind(const struct bio_vec *bv,
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
+/* for iterating one bio from start to end */
+#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
+{ \
+ .bi_sector = 0, \
+ .bi_size = UINT_MAX, \
+ .bi_idx = 0, \
+ .bi_bvec_done = 0, \
+}
+
#endif /* __LINUX_BVEC_ITER_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 8b7fd8eeccee..9f242b876fde 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -561,7 +561,7 @@ struct cftype {
/*
* Control Group subsystem type.
- * See Documentation/cgroups/cgroups.txt for details
+ * See Documentation/cgroup-v1/cgroups.txt for details
*/
struct cgroup_subsys {
struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 0fc36406f32c..8a9643857c4a 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -157,6 +157,104 @@ struct compat_sigaction {
compat_sigset_t sa_mask __packed;
};
+typedef union compat_sigval {
+ compat_int_t sival_int;
+ compat_uptr_t sival_ptr;
+} compat_sigval_t;
+
+typedef struct compat_siginfo {
+ int si_signo;
+#ifndef __ARCH_HAS_SWAPPED_SIGINFO
+ int si_errno;
+ int si_code;
+#else
+ int si_code;
+ int si_errno;
+#endif
+
+ union {
+ int _pad[128/sizeof(int) - 3];
+
+ /* kill() */
+ struct {
+ compat_pid_t _pid; /* sender's pid */
+ __compat_uid32_t _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ compat_timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ compat_sigval_t _sigval; /* same as below */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ compat_pid_t _pid; /* sender's pid */
+ __compat_uid32_t _uid; /* sender's uid */
+ compat_sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ compat_pid_t _pid; /* which child */
+ __compat_uid32_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_clock_t _utime;
+ compat_clock_t _stime;
+ } _sigchld;
+
+#ifdef CONFIG_X86_X32_ABI
+ /* SIGCHLD (x32 version) */
+ struct {
+ compat_pid_t _pid; /* which child */
+ __compat_uid32_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_s64 _utime;
+ compat_s64 _stime;
+ } _sigchld_x32;
+#endif
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
+ struct {
+ compat_uptr_t _addr; /* faulting insn/memory ref. */
+#ifdef __ARCH_SI_TRAPNO
+ int _trapno; /* TRAP # which caused the signal */
+#endif
+ union {
+ /*
+ * used when si_code=BUS_MCEERR_AR or
+ * used when si_code=BUS_MCEERR_AO
+ */
+ short int _addr_lsb; /* Valid LSB of the reported address. */
+ /* used when si_code=SEGV_BNDERR */
+ struct {
+ short _dummy_bnd;
+ compat_uptr_t _lower;
+ compat_uptr_t _upper;
+ } _addr_bnd;
+ /* used when si_code=SEGV_PKUERR */
+ struct {
+ short _dummy_pkey;
+ u32 _pkey;
+ } _addr_pkey;
+ };
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+
+ struct {
+ compat_uptr_t _call_addr; /* calling user insn */
+ int _syscall; /* triggering system call number */
+ unsigned int _arch; /* AUDIT_ARCH_* of syscall */
+ } _sigsys;
+ } _sifields;
+} compat_siginfo_t;
+
/*
* These functions operate on 32- or 64-bit specs depending on
* COMPAT_USE_64BIT_TIME, hence the void user pointer arguments.
@@ -412,7 +510,7 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
unsigned long bitmap_size);
long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
unsigned long bitmap_size);
-int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from);
+int copy_siginfo_from_user32(siginfo_t *to, const struct compat_siginfo __user *from);
int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from);
int get_compat_sigevent(struct sigevent *event,
const struct compat_sigevent __user *u_event);
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 723e952fde0d..d14ef4e77c8a 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -275,6 +275,50 @@ enum {
#define CPER_ARM_INFO_FLAGS_PROPAGATED BIT(2)
#define CPER_ARM_INFO_FLAGS_OVERFLOW BIT(3)
+#define CPER_ARM_CACHE_ERROR 0
+#define CPER_ARM_TLB_ERROR 1
+#define CPER_ARM_BUS_ERROR 2
+#define CPER_ARM_VENDOR_ERROR 3
+#define CPER_ARM_MAX_TYPE CPER_ARM_VENDOR_ERROR
+
+#define CPER_ARM_ERR_VALID_TRANSACTION_TYPE BIT(0)
+#define CPER_ARM_ERR_VALID_OPERATION_TYPE BIT(1)
+#define CPER_ARM_ERR_VALID_LEVEL BIT(2)
+#define CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT BIT(3)
+#define CPER_ARM_ERR_VALID_CORRECTED BIT(4)
+#define CPER_ARM_ERR_VALID_PRECISE_PC BIT(5)
+#define CPER_ARM_ERR_VALID_RESTARTABLE_PC BIT(6)
+#define CPER_ARM_ERR_VALID_PARTICIPATION_TYPE BIT(7)
+#define CPER_ARM_ERR_VALID_TIME_OUT BIT(8)
+#define CPER_ARM_ERR_VALID_ADDRESS_SPACE BIT(9)
+#define CPER_ARM_ERR_VALID_MEM_ATTRIBUTES BIT(10)
+#define CPER_ARM_ERR_VALID_ACCESS_MODE BIT(11)
+
+#define CPER_ARM_ERR_TRANSACTION_SHIFT 16
+#define CPER_ARM_ERR_TRANSACTION_MASK GENMASK(1,0)
+#define CPER_ARM_ERR_OPERATION_SHIFT 18
+#define CPER_ARM_ERR_OPERATION_MASK GENMASK(3,0)
+#define CPER_ARM_ERR_LEVEL_SHIFT 22
+#define CPER_ARM_ERR_LEVEL_MASK GENMASK(2,0)
+#define CPER_ARM_ERR_PC_CORRUPT_SHIFT 25
+#define CPER_ARM_ERR_PC_CORRUPT_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_CORRECTED_SHIFT 26
+#define CPER_ARM_ERR_CORRECTED_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_PRECISE_PC_SHIFT 27
+#define CPER_ARM_ERR_PRECISE_PC_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_RESTARTABLE_PC_SHIFT 28
+#define CPER_ARM_ERR_RESTARTABLE_PC_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT 29
+#define CPER_ARM_ERR_PARTICIPATION_TYPE_MASK GENMASK(1,0)
+#define CPER_ARM_ERR_TIME_OUT_SHIFT 31
+#define CPER_ARM_ERR_TIME_OUT_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_ADDRESS_SPACE_SHIFT 32
+#define CPER_ARM_ERR_ADDRESS_SPACE_MASK GENMASK(1,0)
+#define CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT 34
+#define CPER_ARM_ERR_MEM_ATTRIBUTES_MASK GENMASK(8,0)
+#define CPER_ARM_ERR_ACCESS_MODE_SHIFT 43
+#define CPER_ARM_ERR_ACCESS_MODE_MASK GENMASK(0,0)
+
/*
* All tables and structs must be byte-packed to match CPER
* specification, since the tables are provided by the system BIOS
@@ -494,6 +538,8 @@ struct cper_sec_pcie {
/* Reset to default packing */
#pragma pack()
+extern const char * const cper_proc_error_type_strs[4];
+
u64 cper_next_record_id(void);
const char *cper_severity_str(unsigned int);
const char *cper_mem_err_type_str(unsigned int);
@@ -503,5 +549,7 @@ void cper_mem_err_pack(const struct cper_sec_mem_err *,
struct cper_mem_err_compact *);
const char *cper_mem_err_unpack(struct trace_seq *,
struct cper_mem_err_compact *);
+void cper_print_proc_arm(const char *pfx,
+ const struct cper_sec_proc_arm *proc);
#endif
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index d4292ebc5c8b..de0dafb9399d 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -30,9 +30,6 @@
struct cpufreq_policy;
-typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
- unsigned long voltage, u32 *power);
-
#ifdef CONFIG_CPU_THERMAL
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
@@ -41,43 +38,6 @@ typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy *policy);
-struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy,
- u32 capacitance, get_static_t plat_static_func);
-
-/**
- * of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
- * @np: a valid struct device_node to the cooling device device tree node.
- * @policy: cpufreq policy.
- */
-#ifdef CONFIG_THERMAL_OF
-struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy);
-
-struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func);
-#else
-static inline struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-static inline struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func)
-{
- return NULL;
-}
-#endif
-
/**
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
* @cdev: thermal cooling device pointer.
@@ -90,34 +50,27 @@ cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return ERR_PTR(-ENOSYS);
}
-static inline struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy,
- u32 capacitance, get_static_t plat_static_func)
-{
- return NULL;
-}
-static inline struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy)
+static inline
+void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
- return ERR_PTR(-ENOSYS);
+ return;
}
+#endif /* CONFIG_CPU_THERMAL */
+#if defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL)
+/**
+ * of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
+ * @policy: cpufreq policy.
+ */
+struct thermal_cooling_device *
+of_cpufreq_cooling_register(struct cpufreq_policy *policy);
+#else
static inline struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func)
+of_cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return NULL;
}
-
-static inline
-void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
-{
- return;
-}
-#endif /* CONFIG_CPU_THERMAL */
+#endif /* defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL) */
#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 1a32e558eb11..2c787c5cad3d 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -109,6 +109,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_XTENSA_STARTING,
CPUHP_AP_PERF_METAG_STARTING,
CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
+ CPUHP_AP_ARM_SDEI_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 8f7788d23b57..871f9e21810c 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -257,22 +257,30 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
{return 0;}
#endif
-#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
-({ \
- int __ret; \
- \
- if (!idx) { \
- cpu_do_idle(); \
- return idx; \
- } \
- \
- __ret = cpu_pm_enter(); \
- if (!__ret) { \
- __ret = low_level_idle_enter(idx); \
- cpu_pm_exit(); \
- } \
- \
- __ret ? -1 : idx; \
+#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \
+({ \
+ int __ret = 0; \
+ \
+ if (!idx) { \
+ cpu_do_idle(); \
+ return idx; \
+ } \
+ \
+ if (!is_retention) \
+ __ret = cpu_pm_enter(); \
+ if (!__ret) { \
+ __ret = low_level_idle_enter(idx); \
+ if (!is_retention) \
+ cpu_pm_exit(); \
+ } \
+ \
+ __ret ? -1 : idx; \
})
+#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1)
+
#endif /* _LINUX_CPUIDLE_H */
diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h
index cd4f420231ba..72c92c396bb8 100644
--- a/include/linux/crc-ccitt.h
+++ b/include/linux/crc-ccitt.h
@@ -5,12 +5,19 @@
#include <linux/types.h>
extern u16 const crc_ccitt_table[256];
+extern u16 const crc_ccitt_false_table[256];
extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len);
+extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len);
static inline u16 crc_ccitt_byte(u16 crc, const u8 c)
{
return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
}
+static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c)
+{
+ return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c];
+}
+
#endif /* _LINUX_CRC_CCITT_H */
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 78508ca4b108..7e6e84cf6383 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -107,8 +107,16 @@
#define CRYPTO_ALG_INTERNAL 0x00002000
/*
+ * Set if the algorithm has a ->setkey() method but can be used without
+ * calling it first, i.e. there is a default key.
+ */
+#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000
+
+/*
* Transform masks and values (for crt_flags).
*/
+#define CRYPTO_TFM_NEED_KEY 0x00000001
+
#define CRYPTO_TFM_REQ_MASK 0x000fff00
#define CRYPTO_TFM_RES_MASK 0xfff00000
@@ -447,7 +455,7 @@ struct crypto_alg {
unsigned int cra_alignmask;
int cra_priority;
- atomic_t cra_refcnt;
+ refcount_t cra_refcnt;
char cra_name[CRYPTO_MAX_ALG_NAME];
char cra_driver_name[CRYPTO_MAX_ALG_NAME];
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index a5538433c927..da83f64952e7 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -28,6 +28,7 @@ enum dm_queue_mode {
DM_TYPE_REQUEST_BASED = 2,
DM_TYPE_MQ_REQUEST_BASED = 3,
DM_TYPE_DAX_BIO_BASED = 4,
+ DM_TYPE_NVME_BIO_BASED = 5,
};
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
@@ -221,14 +222,6 @@ struct target_type {
#define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
/*
- * Some targets need to be sent the same WRITE bio severals times so
- * that they can send copies of it to different devices. This function
- * examines any supplied bio and returns the number of copies of it the
- * target requires.
- */
-typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
-
-/*
* A target implements own bio data integrity.
*/
#define DM_TARGET_INTEGRITY 0x00000010
@@ -291,13 +284,6 @@ struct dm_target {
*/
unsigned per_io_data_size;
- /*
- * If defined, this function is called to find out how many
- * duplicate bios should be sent to the target when writing
- * data.
- */
- dm_num_write_bios_fn num_write_bios;
-
/* target specific data */
void *private;
@@ -329,35 +315,9 @@ struct dm_target_callbacks {
int (*congested_fn) (struct dm_target_callbacks *, int);
};
-/*
- * For bio-based dm.
- * One of these is allocated for each bio.
- * This structure shouldn't be touched directly by target drivers.
- * It is here so that we can inline dm_per_bio_data and
- * dm_bio_from_per_bio_data
- */
-struct dm_target_io {
- struct dm_io *io;
- struct dm_target *ti;
- unsigned target_bio_nr;
- unsigned *len_ptr;
- struct bio clone;
-};
-
-static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
-{
- return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
-}
-
-static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
-{
- return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
-}
-
-static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
-{
- return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
-}
+void *dm_per_bio_data(struct bio *bio, size_t data_size);
+struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
+unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
int dm_register_target(struct target_type *t);
void dm_unregister_target(struct target_type *t);
@@ -500,6 +460,11 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
int dm_table_complete(struct dm_table *t);
/*
+ * Destroy the table when finished.
+ */
+void dm_table_destroy(struct dm_table *t);
+
+/*
* Target may require that it is never sent I/O larger than len.
*/
int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
@@ -585,6 +550,7 @@ do { \
#define DM_ENDIO_DONE 0
#define DM_ENDIO_INCOMPLETE 1
#define DM_ENDIO_REQUEUE 2
+#define DM_ENDIO_DELAY_REQUEUE 3
/*
* Definitions of return values from target map function.
@@ -592,7 +558,7 @@ do { \
#define DM_MAPIO_SUBMITTED 0
#define DM_MAPIO_REMAPPED 1
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
-#define DM_MAPIO_DELAY_REQUEUE 3
+#define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE
#define DM_MAPIO_KILL 4
#define dm_sector_div64(x, y)( \
diff --git a/include/linux/device.h b/include/linux/device.h
index 9d32000725da..46ac622e5c6f 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -575,6 +575,9 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
#define DEVICE_ATTR(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+#define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \
+ struct device_attribute dev_attr_##_name = \
+ __ATTR_PREALLOC(_name, _mode, _show, _store)
#define DEVICE_ATTR_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
#define DEVICE_ATTR_RO(_name) \
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 79f27d60ec66..085db2fee2d7 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -301,7 +301,7 @@ struct dma_buf {
struct dma_fence_cb cb;
wait_queue_head_t *poll;
- unsigned long active;
+ __poll_t active;
} cb_excl, cb_shared;
};
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
new file mode 100644
index 000000000000..bcdb1a3e4b1f
--- /dev/null
+++ b/include/linux/dma-direct.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_DMA_DIRECT_H
+#define _LINUX_DMA_DIRECT_H 1
+
+#include <linux/dma-mapping.h>
+
+#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
+#include <asm/dma-direct.h>
+#else
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ dma_addr_t dev_addr = (dma_addr_t)paddr;
+
+ return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
+{
+ phys_addr_t paddr = (phys_addr_t)dev_addr;
+
+ return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
+}
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return false;
+
+ return addr + size - 1 <= *dev->dma_mask;
+}
+#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
+
+#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
+void dma_mark_clean(void *addr, size_t size);
+#else
+static inline void dma_mark_clean(void *addr, size_t size)
+{
+}
+#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */
+
+void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs);
+void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs);
+int dma_direct_supported(struct device *dev, u64 mask);
+
+#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 81ed9b2d84dc..34fe8463d10e 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -136,7 +136,7 @@ struct dma_map_ops {
int is_phys;
};
-extern const struct dma_map_ops dma_noop_ops;
+extern const struct dma_map_ops dma_direct_ops;
extern const struct dma_map_ops dma_virt_ops;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
@@ -513,10 +513,18 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
void *cpu_addr;
BUG_ON(!ops);
+ WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
return cpu_addr;
+ /*
+ * Let the implementation decide on the zone to allocate from, and
+ * decide on the way of zeroing the memory given that the memory
+ * returned should always be zeroed.
+ */
+ flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_ZERO);
+
if (!arch_dma_alloc_attrs(&dev, &flag))
return NULL;
if (!ops->alloc)
@@ -568,6 +576,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return 0;
}
+/*
+ * This is a hack for the legacy x86 forbid_dac and iommu_sac_force. Please
+ * don't use this is new code.
+ */
+#ifndef arch_dma_supported
+#define arch_dma_supported(dev, mask) (1)
+#endif
+
static inline void dma_check_mask(struct device *dev, u64 mask)
{
if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
@@ -580,6 +596,9 @@ static inline int dma_supported(struct device *dev, u64 mask)
if (!ops)
return 0;
+ if (!arch_dma_supported(dev, mask))
+ return 0;
+
if (!ops->dma_supported)
return 1;
return ops->dma_supported(dev, mask);
@@ -692,7 +711,7 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
#ifndef dma_max_pfn
static inline unsigned long dma_max_pfn(struct device *dev)
{
- return *dev->dma_mask >> PAGE_SHIFT;
+ return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset;
}
#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 29fdf8029cf6..f5083aa72eae 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -475,6 +475,39 @@ typedef struct {
u64 get_all;
} apple_properties_protocol_64_t;
+typedef struct {
+ u32 get_capability;
+ u32 get_event_log;
+ u32 hash_log_extend_event;
+ u32 submit_command;
+ u32 get_active_pcr_banks;
+ u32 set_active_pcr_banks;
+ u32 get_result_of_set_active_pcr_banks;
+} efi_tcg2_protocol_32_t;
+
+typedef struct {
+ u64 get_capability;
+ u64 get_event_log;
+ u64 hash_log_extend_event;
+ u64 submit_command;
+ u64 get_active_pcr_banks;
+ u64 set_active_pcr_banks;
+ u64 get_result_of_set_active_pcr_banks;
+} efi_tcg2_protocol_64_t;
+
+typedef u32 efi_tcg2_event_log_format;
+
+typedef struct {
+ void *get_capability;
+ efi_status_t (*get_event_log)(efi_handle_t, efi_tcg2_event_log_format,
+ efi_physical_addr_t *, efi_physical_addr_t *, efi_bool_t *);
+ void *hash_log_extend_event;
+ void *submit_command;
+ void *get_active_pcr_banks;
+ void *set_active_pcr_banks;
+ void *get_result_of_set_active_pcr_banks;
+} efi_tcg2_protocol_t;
+
/*
* Types and defines for EFI ResetSystem
*/
@@ -625,6 +658,7 @@ void efi_native_runtime_setup(void);
#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
+#define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f)
#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
@@ -637,6 +671,7 @@ void efi_native_runtime_setup(void);
#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
+#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa)
typedef struct {
efi_guid_t guid;
@@ -911,6 +946,7 @@ extern struct efi {
unsigned long properties_table; /* properties table */
unsigned long mem_attr_table; /* memory attributes table */
unsigned long rng_seed; /* UEFI firmware random seed */
+ unsigned long tpm_log; /* TPM2 Event Log table */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -1536,6 +1572,8 @@ static inline void
efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) { }
#endif
+void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table);
+
/*
* Arch code can implement the following three template macros, avoiding
* reptition for the void/non-void return cases of {__,}efi_call_virt():
@@ -1603,4 +1641,12 @@ struct linux_efi_random_seed {
u8 bits[];
};
+struct linux_efi_tpm_eventlog {
+ u32 size;
+ u8 version;
+ u8 log[];
+};
+
+extern int efi_tpm_eventlog_init(void);
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 3d794b3dc532..6d9e230dffd2 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -198,8 +198,6 @@ extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
extern void elv_requeue_request(struct request_queue *, struct request *);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);
-extern int elv_register_queue(struct request_queue *q);
-extern void elv_unregister_queue(struct request_queue *q);
extern int elv_may_queue(struct request_queue *, unsigned int);
extern void elv_completed_request(struct request_queue *, struct request *);
extern int elv_set_request(struct request_queue *q, struct request *rq,
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 60b2985e8a18..7094718b653b 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -26,18 +26,16 @@
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
+struct eventfd_ctx;
struct file;
#ifdef CONFIG_EVENTFD
-struct file *eventfd_file_create(unsigned int count, int flags);
-struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx);
void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
-ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
@@ -47,10 +45,6 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
* Ugly ugly ugly error layer to support modules that uses eventfd but
* pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO.
*/
-static inline struct file *eventfd_file_create(unsigned int count, int flags)
-{
- return ERR_PTR(-ENOSYS);
-}
static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
{
@@ -67,12 +61,6 @@ static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
}
-static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait,
- __u64 *cnt)
-{
- return -ENOSYS;
-}
-
static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
wait_queue_entry_t *wait, __u64 *cnt)
{
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 43e98d30d2df..58aecb60ea51 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -117,6 +117,7 @@ struct f2fs_super_block {
/*
* For checkpoint
*/
+#define CP_NOCRC_RECOVERY_FLAG 0x00000200
#define CP_TRIMMED_FLAG 0x00000100
#define CP_NAT_BITS_FLAG 0x00000080
#define CP_CRC_RECOVERY_FLAG 0x00000040
@@ -212,6 +213,7 @@ struct f2fs_extent {
#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
+#define F2FS_PIN_FILE 0x40 /* file should not be gced */
struct f2fs_inode {
__le16 i_mode; /* file mode */
@@ -229,7 +231,13 @@ struct f2fs_inode {
__le32 i_ctime_nsec; /* change time in nano scale */
__le32 i_mtime_nsec; /* modification time in nano scale */
__le32 i_generation; /* file version (for NFS) */
- __le32 i_current_depth; /* only for directory depth */
+ union {
+ __le32 i_current_depth; /* only for directory depth */
+ __le16 i_gc_failures; /*
+ * # of gc failures on pinned file.
+ * only for regular files.
+ */
+ };
__le32 i_xattr_nid; /* nid to save xattr */
__le32 i_flags; /* file attributes */
__le32 i_pino; /* parent inode number */
@@ -245,8 +253,10 @@ struct f2fs_inode {
__le16 i_inline_xattr_size; /* inline xattr size, unit: 4 bytes */
__le32 i_projid; /* project id */
__le32 i_inode_checksum;/* inode meta checksum */
+ __le64 i_crtime; /* creation time */
+ __le32 i_crtime_nsec; /* creation time in nano scale */
__le32 i_extra_end[0]; /* for attribute size calculation */
- };
+ } __packed;
__le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
};
__le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2),
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 511fbaabf624..2a815560fda0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -639,7 +639,7 @@ struct inode {
struct hlist_head i_dentry;
struct rcu_head i_rcu;
};
- u64 i_version;
+ atomic64_t i_version;
atomic_t i_count;
atomic_t i_dio_count;
atomic_t i_writecount;
@@ -748,6 +748,11 @@ static inline void inode_lock_nested(struct inode *inode, unsigned subclass)
down_write_nested(&inode->i_rwsem, subclass);
}
+static inline void inode_lock_shared_nested(struct inode *inode, unsigned subclass)
+{
+ down_read_nested(&inode->i_rwsem, subclass);
+}
+
void lock_two_nondirectories(struct inode *, struct inode*);
void unlock_two_nondirectories(struct inode *, struct inode*);
@@ -1359,7 +1364,7 @@ struct super_block {
const struct fscrypt_operations *s_cop;
- struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
+ struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
struct backing_dev_info *s_bdi;
@@ -1608,6 +1613,10 @@ extern int vfs_whiteout(struct inode *, struct dentry *);
extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode,
int open_flag);
+int vfs_mkobj(struct dentry *, umode_t,
+ int (*f)(struct dentry *, umode_t, void *),
+ void *);
+
/*
* VFS file helper functions.
*/
@@ -1698,7 +1707,7 @@ struct file_operations {
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iterate) (struct file *, struct dir_context *);
int (*iterate_shared) (struct file *, struct dir_context *);
- unsigned int (*poll) (struct file *, struct poll_table_struct *);
+ __poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
@@ -2036,21 +2045,6 @@ static inline void inode_dec_link_count(struct inode *inode)
mark_inode_dirty(inode);
}
-/**
- * inode_inc_iversion - increments i_version
- * @inode: inode that need to be updated
- *
- * Every time the inode is modified, the i_version field will be incremented.
- * The filesystem has to be mounted with i_version flag
- */
-
-static inline void inode_inc_iversion(struct inode *inode)
-{
- spin_lock(&inode->i_lock);
- inode->i_version++;
- spin_unlock(&inode->i_lock);
-}
-
enum file_time_flags {
S_ATIME = 1,
S_MTIME = 2,
@@ -2699,7 +2693,6 @@ extern sector_t bmap(struct inode *, sector_t);
#endif
extern int notify_change(struct dentry *, struct iattr *, struct inode **);
extern int inode_permission(struct inode *, int);
-extern int __inode_permission(struct inode *, int);
extern int generic_permission(struct inode *, int);
extern int __check_sticky(struct inode *dir, struct inode *inode);
@@ -2992,6 +2985,7 @@ enum {
};
void dio_end_io(struct bio *bio);
+void dio_warn_stale_pagecache(struct file *filp);
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
@@ -3239,6 +3233,8 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
ki->ki_flags |= IOCB_DSYNC;
if (flags & RWF_SYNC)
ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
+ if (flags & RWF_APPEND)
+ ki->ki_flags |= IOCB_APPEND;
return 0;
}
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 3319df9727aa..9c3c9a319e48 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -766,9 +766,6 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-/* for init task */
-#define INIT_FTRACE_GRAPH .ret_stack = NULL,
-
/*
* Stack of return addresses for functions
* of a thread.
@@ -846,7 +843,6 @@ static inline void unpause_graph_tracing(void)
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
#define __notrace_funcgraph
-#define INIT_FTRACE_GRAPH
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
@@ -925,10 +921,6 @@ extern int tracepoint_printk;
extern void disable_trace_on_warning(void);
extern int __disable_trace_on_warning;
-#ifdef CONFIG_PREEMPT
-#define INIT_TRACE_RECURSION .trace_recursion = 0,
-#endif
-
int tracepoint_printk_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
@@ -937,10 +929,6 @@ int tracepoint_printk_sysctl(struct ctl_table *table, int write,
static inline void disable_trace_on_warning(void) { }
#endif /* CONFIG_TRACING */
-#ifndef INIT_TRACE_RECURSION
-#define INIT_TRACE_RECURSION
-#endif
-
#ifdef CONFIG_FTRACE_SYSCALLS
unsigned long arch_syscall_addr(int nr);
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 411a84c6c400..4fa1a489efe4 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -15,6 +15,7 @@
#include <linux/types.h>
struct fwnode_operations;
+struct device;
struct fwnode_handle {
struct fwnode_handle *secondary;
@@ -51,6 +52,7 @@ struct fwnode_reference_args {
* struct fwnode_operations - Operations for fwnode interface
* @get: Get a reference to an fwnode.
* @put: Put a reference to an fwnode.
+ * @device_get_match_data: Return the device driver match data.
* @property_present: Return true if a property is present.
* @property_read_integer_array: Read an array of integer properties. Return
* zero on success, a negative error code
@@ -71,6 +73,8 @@ struct fwnode_operations {
struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
void (*put)(struct fwnode_handle *fwnode);
bool (*device_is_available)(const struct fwnode_handle *fwnode);
+ void *(*device_get_match_data)(const struct fwnode_handle *fwnode,
+ const struct device *dev);
bool (*property_present)(const struct fwnode_handle *fwnode,
const char *propname);
int (*property_read_int_array)(const struct fwnode_handle *fwnode,
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index ecc2928e8046..bc738504ab4a 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -31,8 +31,7 @@ extern wait_queue_head_t genl_sk_destructing_waitq;
* @p: The pointer to read, prior to dereferencing
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the READ_ONCE(), because
- * caller holds genl mutex.
+ * the READ_ONCE(), because caller holds genl mutex.
*/
#define genl_dereference(p) \
rcu_dereference_protected(p, lockdep_genl_is_held())
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 5144ebe046c9..5e3531027b51 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -395,6 +395,11 @@ static inline void add_disk(struct gendisk *disk)
{
device_add_disk(NULL, disk);
}
+extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
+static inline void add_disk_no_queue_reg(struct gendisk *disk)
+{
+ device_add_disk_no_queue_reg(NULL, disk);
+}
extern void del_gendisk(struct gendisk *gp);
extern struct gendisk *get_gendisk(dev_t dev, int *partno);
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 8ef7fc0ce0f0..91ed23468530 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -1,4 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * <linux/gpio.h>
+ *
+ * This is the LEGACY GPIO bulk include file, including legacy APIs. It is
+ * used for GPIO drivers still referencing the global GPIO numberspace,
+ * and should not be included in new code.
+ *
+ * If you're implementing a GPIO driver, only include <linux/gpio/driver.h>
+ * If you're implementing a GPIO consumer, only include <linux/gpio/consumer.h>
+ */
#ifndef __LINUX_GPIO_H
#define __LINUX_GPIO_H
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 7447d85dbe2f..dbd065963296 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -139,6 +139,7 @@ void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
int *value_array);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
+int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
int gpiod_is_active_low(const struct gpio_desc *desc);
int gpiod_cansleep(const struct gpio_desc *desc);
@@ -150,8 +151,14 @@ struct gpio_desc *gpio_to_desc(unsigned gpio);
int desc_to_gpio(const struct gpio_desc *desc);
/* Child properties interface */
+struct device_node;
struct fwnode_handle;
+struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
+ struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label);
struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
const char *propname, int index,
enum gpiod_flags dflags,
@@ -431,6 +438,13 @@ static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
return -ENOSYS;
}
+static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -ENOSYS;
+}
+
static inline int gpiod_is_active_low(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -464,9 +478,20 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
}
/* Child properties interface */
+struct device_node;
struct fwnode_handle;
static inline
+struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
+ struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline
struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
const char *propname, int index,
enum gpiod_flags dflags,
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 7258cd676df4..1ba9a331ec51 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -436,6 +436,9 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
struct lock_class_key *lock_key,
struct lock_class_key *request_key);
+bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
+ unsigned int offset);
+
#ifdef CONFIG_LOCKDEP
/*
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index 846be7c69a52..b2f2dc638463 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -10,8 +10,8 @@ enum gpio_lookup_flags {
GPIO_ACTIVE_LOW = (1 << 0),
GPIO_OPEN_DRAIN = (1 << 1),
GPIO_OPEN_SOURCE = (1 << 2),
- GPIO_SLEEP_MAINTAIN_VALUE = (0 << 3),
- GPIO_SLEEP_MAY_LOSE_VALUE = (1 << 3),
+ GPIO_PERSISTENT = (0 << 3),
+ GPIO_TRANSITORY = (1 << 3),
};
/**
diff --git a/include/linux/hid.h b/include/linux/hid.h
index d491027a7c22..091a81cf330f 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -281,6 +281,7 @@ struct hid_item {
#define HID_DG_DEVICECONFIG 0x000d000e
#define HID_DG_DEVICESETTINGS 0x000d0023
+#define HID_DG_AZIMUTH 0x000d003f
#define HID_DG_CONFIDENCE 0x000d0047
#define HID_DG_WIDTH 0x000d0048
#define HID_DG_HEIGHT 0x000d0049
@@ -342,6 +343,7 @@ struct hid_item {
#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000
#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000
+#define HID_QUIRK_HAVE_SPECIAL_DRIVER 0x00080000
#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
#define HID_QUIRK_NO_INIT_REPORTS 0x20000000
#define HID_QUIRK_NO_IGNORE 0x40000000
@@ -671,6 +673,7 @@ struct hid_usage_id {
* to be called)
* @dyn_list: list of dynamically added device ids
* @dyn_lock: lock protecting @dyn_list
+ * @match: check if the given device is handled by this driver
* @probe: new device inserted
* @remove: device removed (NULL if not a hot-plug capable driver)
* @report_table: on which reports to call raw_event (NULL means all)
@@ -683,6 +686,8 @@ struct hid_usage_id {
* @input_mapped: invoked on input registering after mapping an usage
* @input_configured: invoked just before the device is registered
* @feature_mapping: invoked on feature registering
+ * @bus_add_driver: invoked when a HID driver is about to be added
+ * @bus_removed_driver: invoked when a HID driver has been removed
* @suspend: invoked on suspend (NULL means nop)
* @resume: invoked on resume if device was not reset (NULL means nop)
* @reset_resume: invoked on resume if device was reset (NULL means nop)
@@ -711,6 +716,7 @@ struct hid_driver {
struct list_head dyn_list;
spinlock_t dyn_lock;
+ bool (*match)(struct hid_device *dev, bool ignore_special_driver);
int (*probe)(struct hid_device *dev, const struct hid_device_id *id);
void (*remove)(struct hid_device *dev);
@@ -736,6 +742,8 @@ struct hid_driver {
void (*feature_mapping)(struct hid_device *hdev,
struct hid_field *field,
struct hid_usage *usage);
+ void (*bus_add_driver)(struct hid_driver *driver);
+ void (*bus_removed_driver)(struct hid_driver *driver);
#ifdef CONFIG_PM
int (*suspend)(struct hid_device *hdev, pm_message_t message);
int (*resume)(struct hid_device *hdev);
@@ -814,6 +822,8 @@ extern bool hid_ignore(struct hid_device *);
extern int hid_add_device(struct hid_device *);
extern void hid_destroy_device(struct hid_device *);
+extern struct bus_type hid_bus_type;
+
extern int __must_check __hid_register_driver(struct hid_driver *,
struct module *, const char *mod_name);
@@ -860,8 +870,12 @@ int hid_open_report(struct hid_device *device);
int hid_check_keys_pressed(struct hid_device *hid);
int hid_connect(struct hid_device *hid, unsigned int connect_mask);
void hid_disconnect(struct hid_device *hid);
-const struct hid_device_id *hid_match_id(struct hid_device *hdev,
+bool hid_match_one_id(const struct hid_device *hdev,
+ const struct hid_device_id *id);
+const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
const struct hid_device_id *id);
+const struct hid_device_id *hid_match_device(struct hid_device *hdev,
+ struct hid_driver *hdrv);
s32 hid_snto32(__u32 value, unsigned n);
__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
unsigned offset, unsigned n);
@@ -1098,9 +1112,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
int interrupt);
/* HID quirks API */
-u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct);
-int usbhid_quirks_init(char **quirks_param);
-void usbhid_quirks_exit(void);
+unsigned long hid_lookup_quirk(const struct hid_device *hdev);
+int hid_quirks_init(char **quirks_param, __u16 bus, int count);
+void hid_quirks_exit(__u16 bus);
#ifdef CONFIG_HID_PID
int hid_pidff_init(struct hid_device *hid);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 012c37fdb688..c7902ca7c9f4 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -28,13 +28,29 @@ struct hrtimer_cpu_base;
/*
* Mode arguments of xxx_hrtimer functions:
+ *
+ * HRTIMER_MODE_ABS - Time value is absolute
+ * HRTIMER_MODE_REL - Time value is relative to now
+ * HRTIMER_MODE_PINNED - Timer is bound to CPU (is only considered
+ * when starting the timer)
+ * HRTIMER_MODE_SOFT - Timer callback function will be executed in
+ * soft irq context
*/
enum hrtimer_mode {
- HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */
- HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */
- HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */
- HRTIMER_MODE_ABS_PINNED = 0x02,
- HRTIMER_MODE_REL_PINNED = 0x03,
+ HRTIMER_MODE_ABS = 0x00,
+ HRTIMER_MODE_REL = 0x01,
+ HRTIMER_MODE_PINNED = 0x02,
+ HRTIMER_MODE_SOFT = 0x04,
+
+ HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
+ HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,
+
+ HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT,
+ HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT,
+
+ HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT,
+ HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT,
+
};
/*
@@ -87,6 +103,7 @@ enum hrtimer_restart {
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
* @is_rel: Set if the timer was armed relative
+ * @is_soft: Set if hrtimer will be expired in soft interrupt context.
*
* The hrtimer structure must be initialized by hrtimer_init()
*/
@@ -97,6 +114,7 @@ struct hrtimer {
struct hrtimer_clock_base *base;
u8 state;
u8 is_rel;
+ u8 is_soft;
};
/**
@@ -112,9 +130,9 @@ struct hrtimer_sleeper {
};
#ifdef CONFIG_64BIT
-# define HRTIMER_CLOCK_BASE_ALIGN 64
+# define __hrtimer_clock_base_align ____cacheline_aligned
#else
-# define HRTIMER_CLOCK_BASE_ALIGN 32
+# define __hrtimer_clock_base_align
#endif
/**
@@ -123,48 +141,57 @@ struct hrtimer_sleeper {
* @index: clock type index for per_cpu support when moving a
* timer to a base on another cpu.
* @clockid: clock id for per_cpu support
+ * @seq: seqcount around __run_hrtimer
+ * @running: pointer to the currently running hrtimer
* @active: red black tree root node for the active timers
* @get_time: function to retrieve the current time of the clock
* @offset: offset of this clock to the monotonic base
*/
struct hrtimer_clock_base {
struct hrtimer_cpu_base *cpu_base;
- int index;
+ unsigned int index;
clockid_t clockid;
+ seqcount_t seq;
+ struct hrtimer *running;
struct timerqueue_head active;
ktime_t (*get_time)(void);
ktime_t offset;
-} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
+} __hrtimer_clock_base_align;
enum hrtimer_base_type {
HRTIMER_BASE_MONOTONIC,
HRTIMER_BASE_REALTIME,
HRTIMER_BASE_BOOTTIME,
HRTIMER_BASE_TAI,
+ HRTIMER_BASE_MONOTONIC_SOFT,
+ HRTIMER_BASE_REALTIME_SOFT,
+ HRTIMER_BASE_BOOTTIME_SOFT,
+ HRTIMER_BASE_TAI_SOFT,
HRTIMER_MAX_CLOCK_BASES,
};
-/*
+/**
* struct hrtimer_cpu_base - the per cpu clock bases
* @lock: lock protecting the base and associated clock bases
* and timers
- * @seq: seqcount around __run_hrtimer
- * @running: pointer to the currently running hrtimer
* @cpu: cpu number
* @active_bases: Bitfield to mark bases with active timers
* @clock_was_set_seq: Sequence counter of clock was set events
- * @migration_enabled: The migration of hrtimers to other cpus is enabled
- * @nohz_active: The nohz functionality is enabled
- * @expires_next: absolute time of the next event which was scheduled
- * via clock_set_next_event()
- * @next_timer: Pointer to the first expiring timer
- * @in_hrtirq: hrtimer_interrupt() is currently executing
* @hres_active: State of high resolution mode
+ * @in_hrtirq: hrtimer_interrupt() is currently executing
* @hang_detected: The last hrtimer interrupt detected a hang
+ * @softirq_activated: displays, if the softirq is raised - update of softirq
+ * related settings is not required then.
* @nr_events: Total number of hrtimer interrupt events
* @nr_retries: Total number of hrtimer interrupt retries
* @nr_hangs: Total number of hrtimer interrupt hangs
* @max_hang_time: Maximum time spent in hrtimer_interrupt
+ * @expires_next: absolute time of the next event, is required for remote
+ * hrtimer enqueue; it is the total first expiry time (hard
+ * and soft hrtimer are taken into account)
+ * @next_timer: Pointer to the first expiring timer
+ * @softirq_expires_next: Time to check, if soft queues needs also to be expired
+ * @softirq_next_timer: Pointer to the first expiring softirq based timer
* @clock_base: array of clock bases for this cpu
*
* Note: next_timer is just an optimization for __remove_hrtimer().
@@ -173,31 +200,28 @@ enum hrtimer_base_type {
*/
struct hrtimer_cpu_base {
raw_spinlock_t lock;
- seqcount_t seq;
- struct hrtimer *running;
unsigned int cpu;
unsigned int active_bases;
unsigned int clock_was_set_seq;
- bool migration_enabled;
- bool nohz_active;
+ unsigned int hres_active : 1,
+ in_hrtirq : 1,
+ hang_detected : 1,
+ softirq_activated : 1;
#ifdef CONFIG_HIGH_RES_TIMERS
- unsigned int in_hrtirq : 1,
- hres_active : 1,
- hang_detected : 1;
- ktime_t expires_next;
- struct hrtimer *next_timer;
unsigned int nr_events;
- unsigned int nr_retries;
- unsigned int nr_hangs;
+ unsigned short nr_retries;
+ unsigned short nr_hangs;
unsigned int max_hang_time;
#endif
+ ktime_t expires_next;
+ struct hrtimer *next_timer;
+ ktime_t softirq_expires_next;
+ struct hrtimer *softirq_next_timer;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
} ____cacheline_aligned;
static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
- BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
-
timer->node.expires = time;
timer->_softexpires = time;
}
@@ -266,16 +290,17 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
return timer->base->get_time();
}
+static inline int hrtimer_is_hres_active(struct hrtimer *timer)
+{
+ return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
+ timer->base->cpu_base->hres_active : 0;
+}
+
#ifdef CONFIG_HIGH_RES_TIMERS
struct clock_event_device;
extern void hrtimer_interrupt(struct clock_event_device *dev);
-static inline int hrtimer_is_hres_active(struct hrtimer *timer)
-{
- return timer->base->cpu_base->hres_active;
-}
-
/*
* The resolution of the clocks. The resolution value is returned in
* the clock_getres() system call to give application programmers an
@@ -298,11 +323,6 @@ extern unsigned int hrtimer_resolution;
#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
-static inline int hrtimer_is_hres_active(struct hrtimer *timer)
-{
- return 0;
-}
-
static inline void clock_was_set_delayed(void) { }
#endif
@@ -365,11 +385,12 @@ extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
u64 range_ns, const enum hrtimer_mode mode);
/**
- * hrtimer_start - (re)start an hrtimer on the current CPU
+ * hrtimer_start - (re)start an hrtimer
* @timer: the timer to be added
* @tim: expiry time
- * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
- * relative (HRTIMER_MODE_REL)
+ * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
+ * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
+ * softirq based mode is considered for debug purpose only!
*/
static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
const enum hrtimer_mode mode)
@@ -422,7 +443,7 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
*/
static inline int hrtimer_callback_running(struct hrtimer *timer)
{
- return timer->base->cpu_base->running == timer;
+ return timer->base->running == timer;
}
/* Forward a hrtimer so it expires after now: */
@@ -466,7 +487,7 @@ extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
extern int schedule_hrtimeout_range_clock(ktime_t *expires,
u64 delta,
const enum hrtimer_mode mode,
- int clock);
+ clockid_t clock_id);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
/* Soft interrupt function to run the hrtimer queues: */
diff --git a/include/linux/iio/adc/stm32-dfsdm-adc.h b/include/linux/iio/adc/stm32-dfsdm-adc.h
new file mode 100644
index 000000000000..e7dc7a542a4e
--- /dev/null
+++ b/include/linux/iio/adc/stm32-dfsdm-adc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file discribe the STM32 DFSDM IIO driver API for audio part
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#ifndef STM32_DFSDM_ADC_H
+#define STM32_DFSDM_ADC_H
+
+int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev,
+ int (*cb)(const void *data, size_t size,
+ void *private),
+ void *private);
+int stm32_dfsdm_release_buff_cb(struct iio_dev *iio_dev);
+
+#endif
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 5e347a9805fd..9887f4f8e2a8 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -134,6 +134,17 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
void *private),
void *private);
/**
+ * iio_channel_cb_set_buffer_watermark() - set the buffer watermark.
+ * @cb_buffer: The callback buffer from whom we want the channel
+ * information.
+ * @watermark: buffer watermark in bytes.
+ *
+ * This function allows to configure the buffer watermark.
+ */
+int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buffer,
+ size_t watermark);
+
+/**
* iio_channel_release_all_cb() - release and unregister the callback.
* @cb_buffer: The callback buffer that was allocated.
*/
@@ -216,6 +227,32 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val);
int iio_read_channel_processed(struct iio_channel *chan, int *val);
/**
+ * iio_write_channel_attribute() - Write values to the device attribute.
+ * @chan: The channel being queried.
+ * @val: Value being written.
+ * @val2: Value being written.val2 use depends on attribute type.
+ * @attribute: info attribute to be read.
+ *
+ * Returns an error code or 0.
+ */
+int iio_write_channel_attribute(struct iio_channel *chan, int val,
+ int val2, enum iio_chan_info_enum attribute);
+
+/**
+ * iio_read_channel_attribute() - Read values from the device attribute.
+ * @chan: The channel being queried.
+ * @val: Value being written.
+ * @val2: Value being written.Val2 use depends on attribute type.
+ * @attribute: info attribute to be written.
+ *
+ * Returns an error code if failed. Else returns a description of what is in val
+ * and val2, such as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val
+ * + val2/1e6
+ */
+int iio_read_channel_attribute(struct iio_channel *chan, int *val,
+ int *val2, enum iio_chan_info_enum attribute);
+
+/**
* iio_write_channel_raw() - write to a given channel
* @chan: The channel being queried.
* @val: Value being written.
diff --git a/include/linux/iio/hw-consumer.h b/include/linux/iio/hw-consumer.h
new file mode 100644
index 000000000000..44d48bb1d39f
--- /dev/null
+++ b/include/linux/iio/hw-consumer.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Industrial I/O in kernel hardware consumer interface
+ *
+ * Copyright 2017 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#ifndef LINUX_IIO_HW_CONSUMER_H
+#define LINUX_IIO_HW_CONSUMER_H
+
+struct iio_hw_consumer;
+
+struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev);
+void iio_hw_consumer_free(struct iio_hw_consumer *hwc);
+struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev);
+void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc);
+int iio_hw_consumer_enable(struct iio_hw_consumer *hwc);
+void iio_hw_consumer_disable(struct iio_hw_consumer *hwc);
+
+#endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 20b61347ea58..f12a61be1ede 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -20,34 +20,6 @@
* Currently assumes nano seconds.
*/
-enum iio_chan_info_enum {
- IIO_CHAN_INFO_RAW = 0,
- IIO_CHAN_INFO_PROCESSED,
- IIO_CHAN_INFO_SCALE,
- IIO_CHAN_INFO_OFFSET,
- IIO_CHAN_INFO_CALIBSCALE,
- IIO_CHAN_INFO_CALIBBIAS,
- IIO_CHAN_INFO_PEAK,
- IIO_CHAN_INFO_PEAK_SCALE,
- IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
- IIO_CHAN_INFO_AVERAGE_RAW,
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
- IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY,
- IIO_CHAN_INFO_SAMP_FREQ,
- IIO_CHAN_INFO_FREQUENCY,
- IIO_CHAN_INFO_PHASE,
- IIO_CHAN_INFO_HARDWAREGAIN,
- IIO_CHAN_INFO_HYSTERESIS,
- IIO_CHAN_INFO_INT_TIME,
- IIO_CHAN_INFO_ENABLE,
- IIO_CHAN_INFO_CALIBHEIGHT,
- IIO_CHAN_INFO_CALIBWEIGHT,
- IIO_CHAN_INFO_DEBOUNCE_COUNT,
- IIO_CHAN_INFO_DEBOUNCE_TIME,
- IIO_CHAN_INFO_CALIBEMISSIVITY,
- IIO_CHAN_INFO_OVERSAMPLING_RATIO,
-};
-
enum iio_shared_by {
IIO_SEPARATE,
IIO_SHARED_BY_TYPE,
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 2aa7b6384d64..6eb3d683ef62 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -34,4 +34,32 @@ enum iio_available_type {
IIO_AVAIL_RANGE,
};
+enum iio_chan_info_enum {
+ IIO_CHAN_INFO_RAW = 0,
+ IIO_CHAN_INFO_PROCESSED,
+ IIO_CHAN_INFO_SCALE,
+ IIO_CHAN_INFO_OFFSET,
+ IIO_CHAN_INFO_CALIBSCALE,
+ IIO_CHAN_INFO_CALIBBIAS,
+ IIO_CHAN_INFO_PEAK,
+ IIO_CHAN_INFO_PEAK_SCALE,
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
+ IIO_CHAN_INFO_AVERAGE_RAW,
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
+ IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY,
+ IIO_CHAN_INFO_SAMP_FREQ,
+ IIO_CHAN_INFO_FREQUENCY,
+ IIO_CHAN_INFO_PHASE,
+ IIO_CHAN_INFO_HARDWAREGAIN,
+ IIO_CHAN_INFO_HYSTERESIS,
+ IIO_CHAN_INFO_INT_TIME,
+ IIO_CHAN_INFO_ENABLE,
+ IIO_CHAN_INFO_CALIBHEIGHT,
+ IIO_CHAN_INFO_CALIBWEIGHT,
+ IIO_CHAN_INFO_DEBOUNCE_COUNT,
+ IIO_CHAN_INFO_DEBOUNCE_TIME,
+ IIO_CHAN_INFO_CALIBEMISSIVITY,
+ IIO_CHAN_INFO_OVERSAMPLING_RATIO,
+};
+
#endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 6a532629c983..a454b8aeb938 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -21,22 +21,11 @@
#include <asm/thread_info.h>
-#ifdef CONFIG_SMP
-# define INIT_PUSHABLE_TASKS(tsk) \
- .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO),
-#else
-# define INIT_PUSHABLE_TASKS(tsk)
-#endif
-
extern struct files_struct init_files;
extern struct fs_struct init_fs;
-
-#ifdef CONFIG_CPUSETS
-#define INIT_CPUSET_SEQ(tsk) \
- .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
-#else
-#define INIT_CPUSET_SEQ(tsk)
-#endif
+extern struct nsproxy init_nsproxy;
+extern struct group_info init_groups;
+extern struct cred init_cred;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#define INIT_PREV_CPUTIME(x) .prev_cputime = { \
@@ -47,67 +36,16 @@ extern struct fs_struct init_fs;
#endif
#ifdef CONFIG_POSIX_TIMERS
-#define INIT_POSIX_TIMERS(s) \
- .posix_timers = LIST_HEAD_INIT(s.posix_timers),
#define INIT_CPU_TIMERS(s) \
.cpu_timers = { \
LIST_HEAD_INIT(s.cpu_timers[0]), \
LIST_HEAD_INIT(s.cpu_timers[1]), \
- LIST_HEAD_INIT(s.cpu_timers[2]), \
- },
-#define INIT_CPUTIMER(s) \
- .cputimer = { \
- .cputime_atomic = INIT_CPUTIME_ATOMIC, \
- .running = false, \
- .checking_timer = false, \
+ LIST_HEAD_INIT(s.cpu_timers[2]), \
},
#else
-#define INIT_POSIX_TIMERS(s)
#define INIT_CPU_TIMERS(s)
-#define INIT_CPUTIMER(s)
#endif
-#define INIT_SIGNALS(sig) { \
- .nr_threads = 1, \
- .thread_head = LIST_HEAD_INIT(init_task.thread_node), \
- .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
- .shared_pending = { \
- .list = LIST_HEAD_INIT(sig.shared_pending.list), \
- .signal = {{0}}}, \
- INIT_POSIX_TIMERS(sig) \
- INIT_CPU_TIMERS(sig) \
- .rlim = INIT_RLIMITS, \
- INIT_CPUTIMER(sig) \
- INIT_PREV_CPUTIME(sig) \
- .cred_guard_mutex = \
- __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
-}
-
-extern struct nsproxy init_nsproxy;
-
-#define INIT_SIGHAND(sighand) { \
- .count = ATOMIC_INIT(1), \
- .action = { { { .sa_handler = SIG_DFL, } }, }, \
- .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
- .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \
-}
-
-extern struct group_info init_groups;
-
-#define INIT_STRUCT_PID { \
- .count = ATOMIC_INIT(1), \
- .tasks = { \
- { .first = NULL }, \
- { .first = NULL }, \
- { .first = NULL }, \
- }, \
- .level = 0, \
- .numbers = { { \
- .nr = 0, \
- .ns = &init_pid_ns, \
- }, } \
-}
-
#define INIT_PID_LINK(type) \
{ \
.node = { \
@@ -117,192 +55,16 @@ extern struct group_info init_groups;
.pid = &init_struct_pid, \
}
-#ifdef CONFIG_AUDITSYSCALL
-#define INIT_IDS \
- .loginuid = INVALID_UID, \
- .sessionid = (unsigned int)-1,
-#else
-#define INIT_IDS
-#endif
-
-#ifdef CONFIG_PREEMPT_RCU
-#define INIT_TASK_RCU_PREEMPT(tsk) \
- .rcu_read_lock_nesting = 0, \
- .rcu_read_unlock_special.s = 0, \
- .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
- .rcu_blocked_node = NULL,
-#else
-#define INIT_TASK_RCU_PREEMPT(tsk)
-#endif
-#ifdef CONFIG_TASKS_RCU
-#define INIT_TASK_RCU_TASKS(tsk) \
- .rcu_tasks_holdout = false, \
- .rcu_tasks_holdout_list = \
- LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), \
- .rcu_tasks_idle_cpu = -1,
-#else
-#define INIT_TASK_RCU_TASKS(tsk)
-#endif
-
-extern struct cred init_cred;
-
-#ifdef CONFIG_CGROUP_SCHED
-# define INIT_CGROUP_SCHED(tsk) \
- .sched_task_group = &root_task_group,
-#else
-# define INIT_CGROUP_SCHED(tsk)
-#endif
-
-#ifdef CONFIG_PERF_EVENTS
-# define INIT_PERF_EVENTS(tsk) \
- .perf_event_mutex = \
- __MUTEX_INITIALIZER(tsk.perf_event_mutex), \
- .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
-#else
-# define INIT_PERF_EVENTS(tsk)
-#endif
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-# define INIT_VTIME(tsk) \
- .vtime.seqcount = SEQCNT_ZERO(tsk.vtime.seqcount), \
- .vtime.starttime = 0, \
- .vtime.state = VTIME_SYS,
-#else
-# define INIT_VTIME(tsk)
-#endif
-
#define INIT_TASK_COMM "swapper"
-#ifdef CONFIG_RT_MUTEXES
-# define INIT_RT_MUTEXES(tsk) \
- .pi_waiters = RB_ROOT_CACHED, \
- .pi_top_task = NULL,
-#else
-# define INIT_RT_MUTEXES(tsk)
-#endif
-
-#ifdef CONFIG_NUMA_BALANCING
-# define INIT_NUMA_BALANCING(tsk) \
- .numa_preferred_nid = -1, \
- .numa_group = NULL, \
- .numa_faults = NULL,
-#else
-# define INIT_NUMA_BALANCING(tsk)
-#endif
-
-#ifdef CONFIG_KASAN
-# define INIT_KASAN(tsk) \
- .kasan_depth = 1,
-#else
-# define INIT_KASAN(tsk)
-#endif
-
-#ifdef CONFIG_LIVEPATCH
-# define INIT_LIVEPATCH(tsk) \
- .patch_state = KLP_UNDEFINED,
-#else
-# define INIT_LIVEPATCH(tsk)
-#endif
-
-#ifdef CONFIG_THREAD_INFO_IN_TASK
-# define INIT_TASK_TI(tsk) \
- .thread_info = INIT_THREAD_INFO(tsk), \
- .stack_refcount = ATOMIC_INIT(1),
-#else
-# define INIT_TASK_TI(tsk)
-#endif
-
-#ifdef CONFIG_SECURITY
-#define INIT_TASK_SECURITY .security = NULL,
-#else
-#define INIT_TASK_SECURITY
-#endif
-
-/*
- * INIT_TASK is used to set up the first task table, touch at
- * your own risk!. Base=0, limit=0x1fffff (=2MB)
- */
-#define INIT_TASK(tsk) \
-{ \
- INIT_TASK_TI(tsk) \
- .state = 0, \
- .stack = init_stack, \
- .usage = ATOMIC_INIT(2), \
- .flags = PF_KTHREAD, \
- .prio = MAX_PRIO-20, \
- .static_prio = MAX_PRIO-20, \
- .normal_prio = MAX_PRIO-20, \
- .policy = SCHED_NORMAL, \
- .cpus_allowed = CPU_MASK_ALL, \
- .nr_cpus_allowed= NR_CPUS, \
- .mm = NULL, \
- .active_mm = &init_mm, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
- .se = { \
- .group_node = LIST_HEAD_INIT(tsk.se.group_node), \
- }, \
- .rt = { \
- .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
- .time_slice = RR_TIMESLICE, \
- }, \
- .tasks = LIST_HEAD_INIT(tsk.tasks), \
- INIT_PUSHABLE_TASKS(tsk) \
- INIT_CGROUP_SCHED(tsk) \
- .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
- .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
- .real_parent = &tsk, \
- .parent = &tsk, \
- .children = LIST_HEAD_INIT(tsk.children), \
- .sibling = LIST_HEAD_INIT(tsk.sibling), \
- .group_leader = &tsk, \
- RCU_POINTER_INITIALIZER(real_cred, &init_cred), \
- RCU_POINTER_INITIALIZER(cred, &init_cred), \
- .comm = INIT_TASK_COMM, \
- .thread = INIT_THREAD, \
- .fs = &init_fs, \
- .files = &init_files, \
- .signal = &init_signals, \
- .sighand = &init_sighand, \
- .nsproxy = &init_nsproxy, \
- .pending = { \
- .list = LIST_HEAD_INIT(tsk.pending.list), \
- .signal = {{0}}}, \
- .blocked = {{0}}, \
- .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
- .journal_info = NULL, \
- INIT_CPU_TIMERS(tsk) \
- .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
- .timer_slack_ns = 50000, /* 50 usec default slack */ \
- .pids = { \
- [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
- [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
- [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
- }, \
- .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
- .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \
- INIT_IDS \
- INIT_PERF_EVENTS(tsk) \
- INIT_TRACE_IRQFLAGS \
- INIT_LOCKDEP \
- INIT_FTRACE_GRAPH \
- INIT_TRACE_RECURSION \
- INIT_TASK_RCU_PREEMPT(tsk) \
- INIT_TASK_RCU_TASKS(tsk) \
- INIT_CPUSET_SEQ(tsk) \
- INIT_RT_MUTEXES(tsk) \
- INIT_PREV_CPUTIME(tsk) \
- INIT_VTIME(tsk) \
- INIT_NUMA_BALANCING(tsk) \
- INIT_KASAN(tsk) \
- INIT_LIVEPATCH(tsk) \
- INIT_TASK_SECURITY \
-}
-
-
/* Attach to the init_task data structure for proper alignment */
+#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
#define __init_task_data __attribute__((__section__(".data..init_task")))
+#else
+#define __init_task_data /**/
+#endif
+/* Attach to the thread_info data structure for proper alignment */
+#define __init_thread_info __attribute__((__section__(".data..init_thread_info")))
#endif
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
index c2d6082a1a4c..858d3f4a2241 100644
--- a/include/linux/integrity.h
+++ b/include/linux/integrity.h
@@ -14,6 +14,7 @@
enum integrity_status {
INTEGRITY_PASS = 0,
+ INTEGRITY_PASS_IMMUTABLE,
INTEGRITY_FAIL,
INTEGRITY_NOLABEL,
INTEGRITY_NOXATTRS,
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 0e81035b678f..b11fcdfd0770 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -13,10 +13,13 @@
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
*/
-#define IRQ_WORK_PENDING 1UL
-#define IRQ_WORK_BUSY 2UL
-#define IRQ_WORK_FLAGS 3UL
-#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
+#define IRQ_WORK_PENDING BIT(0)
+#define IRQ_WORK_BUSY BIT(1)
+
+/* Doesn't want IPI, wait for tick: */
+#define IRQ_WORK_LAZY BIT(2)
+
+#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
struct irq_work {
unsigned long flags;
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 1b3996ff3f16..9700f00bbc04 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -40,7 +40,6 @@ do { \
do { \
current->softirq_context--; \
} while (0)
-# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
#else
# define trace_hardirqs_on() do { } while (0)
# define trace_hardirqs_off() do { } while (0)
@@ -54,7 +53,6 @@ do { \
# define trace_hardirq_exit() do { } while (0)
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
-# define INIT_TRACE_IRQFLAGS
#endif
#if defined(CONFIG_IRQSOFF_TRACER) || \
diff --git a/include/linux/iversion.h b/include/linux/iversion.h
new file mode 100644
index 000000000000..3d2fd06495ec
--- /dev/null
+++ b/include/linux/iversion.h
@@ -0,0 +1,337 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IVERSION_H
+#define _LINUX_IVERSION_H
+
+#include <linux/fs.h>
+
+/*
+ * The inode->i_version field:
+ * ---------------------------
+ * The change attribute (i_version) is mandated by NFSv4 and is mostly for
+ * knfsd, but is also used for other purposes (e.g. IMA). The i_version must
+ * appear different to observers if there was a change to the inode's data or
+ * metadata since it was last queried.
+ *
+ * Observers see the i_version as a 64-bit number that never decreases. If it
+ * remains the same since it was last checked, then nothing has changed in the
+ * inode. If it's different then something has changed. Observers cannot infer
+ * anything about the nature or magnitude of the changes from the value, only
+ * that the inode has changed in some fashion.
+ *
+ * Not all filesystems properly implement the i_version counter. Subsystems that
+ * want to use i_version field on an inode should first check whether the
+ * filesystem sets the SB_I_VERSION flag (usually via the IS_I_VERSION macro).
+ *
+ * Those that set SB_I_VERSION will automatically have their i_version counter
+ * incremented on writes to normal files. If the SB_I_VERSION is not set, then
+ * the VFS will not touch it on writes, and the filesystem can use it how it
+ * wishes. Note that the filesystem is always responsible for updating the
+ * i_version on namespace changes in directories (mkdir, rmdir, unlink, etc.).
+ * We consider these sorts of filesystems to have a kernel-managed i_version.
+ *
+ * It may be impractical for filesystems to keep i_version updates atomic with
+ * respect to the changes that cause them. They should, however, guarantee
+ * that i_version updates are never visible before the changes that caused
+ * them. Also, i_version updates should never be delayed longer than it takes
+ * the original change to reach disk.
+ *
+ * This implementation uses the low bit in the i_version field as a flag to
+ * track when the value has been queried. If it has not been queried since it
+ * was last incremented, we can skip the increment in most cases.
+ *
+ * In the event that we're updating the ctime, we will usually go ahead and
+ * bump the i_version anyway. Since that has to go to stable storage in some
+ * fashion, we might as well increment it as well.
+ *
+ * With this implementation, the value should always appear to observers to
+ * increase over time if the file has changed. It's recommended to use
+ * inode_cmp_iversion() helper to compare values.
+ *
+ * Note that some filesystems (e.g. NFS and AFS) just use the field to store
+ * a server-provided value (for the most part). For that reason, those
+ * filesystems do not set SB_I_VERSION. These filesystems are considered to
+ * have a self-managed i_version.
+ *
+ * Persistently storing the i_version
+ * ----------------------------------
+ * Queries of the i_version field are not gated on them hitting the backing
+ * store. It's always possible that the host could crash after allowing
+ * a query of the value but before it has made it to disk.
+ *
+ * To mitigate this problem, filesystems should always use
+ * inode_set_iversion_queried when loading an existing inode from disk. This
+ * ensures that the next attempted inode increment will result in the value
+ * changing.
+ *
+ * Storing the value to disk therefore does not count as a query, so those
+ * filesystems should use inode_peek_iversion to grab the value to be stored.
+ * There is no need to flag the value as having been queried in that case.
+ */
+
+/*
+ * We borrow the lowest bit in the i_version to use as a flag to tell whether
+ * it has been queried since we last incremented it. If it has, then we must
+ * increment it on the next change. After that, we can clear the flag and
+ * avoid incrementing it again until it has again been queried.
+ */
+#define I_VERSION_QUERIED_SHIFT (1)
+#define I_VERSION_QUERIED (1ULL << (I_VERSION_QUERIED_SHIFT - 1))
+#define I_VERSION_INCREMENT (1ULL << I_VERSION_QUERIED_SHIFT)
+
+/**
+ * inode_set_iversion_raw - set i_version to the specified raw value
+ * @inode: inode to set
+ * @val: new i_version value to set
+ *
+ * Set @inode's i_version field to @val. This function is for use by
+ * filesystems that self-manage the i_version.
+ *
+ * For example, the NFS client stores its NFSv4 change attribute in this way,
+ * and the AFS client stores the data_version from the server here.
+ */
+static inline void
+inode_set_iversion_raw(struct inode *inode, u64 val)
+{
+ atomic64_set(&inode->i_version, val);
+}
+
+/**
+ * inode_peek_iversion_raw - grab a "raw" iversion value
+ * @inode: inode from which i_version should be read
+ *
+ * Grab a "raw" inode->i_version value and return it. The i_version is not
+ * flagged or converted in any way. This is mostly used to access a self-managed
+ * i_version.
+ *
+ * With those filesystems, we want to treat the i_version as an entirely
+ * opaque value.
+ */
+static inline u64
+inode_peek_iversion_raw(const struct inode *inode)
+{
+ return atomic64_read(&inode->i_version);
+}
+
+/**
+ * inode_set_iversion - set i_version to a particular value
+ * @inode: inode to set
+ * @val: new i_version value to set
+ *
+ * Set @inode's i_version field to @val. This function is for filesystems with
+ * a kernel-managed i_version, for initializing a newly-created inode from
+ * scratch.
+ *
+ * In this case, we do not set the QUERIED flag since we know that this value
+ * has never been queried.
+ */
+static inline void
+inode_set_iversion(struct inode *inode, u64 val)
+{
+ inode_set_iversion_raw(inode, val << I_VERSION_QUERIED_SHIFT);
+}
+
+/**
+ * inode_set_iversion_queried - set i_version to a particular value as quereied
+ * @inode: inode to set
+ * @val: new i_version value to set
+ *
+ * Set @inode's i_version field to @val, and flag it for increment on the next
+ * change.
+ *
+ * Filesystems that persistently store the i_version on disk should use this
+ * when loading an existing inode from disk.
+ *
+ * When loading in an i_version value from a backing store, we can't be certain
+ * that it wasn't previously viewed before being stored. Thus, we must assume
+ * that it was, to ensure that we don't end up handing out the same value for
+ * different versions of the same inode.
+ */
+static inline void
+inode_set_iversion_queried(struct inode *inode, u64 val)
+{
+ inode_set_iversion_raw(inode, (val << I_VERSION_QUERIED_SHIFT) |
+ I_VERSION_QUERIED);
+}
+
+/**
+ * inode_maybe_inc_iversion - increments i_version
+ * @inode: inode with the i_version that should be updated
+ * @force: increment the counter even if it's not necessary?
+ *
+ * Every time the inode is modified, the i_version field must be seen to have
+ * changed by any observer.
+ *
+ * If "force" is set or the QUERIED flag is set, then ensure that we increment
+ * the value, and clear the queried flag.
+ *
+ * In the common case where neither is set, then we can return "false" without
+ * updating i_version.
+ *
+ * If this function returns false, and no other metadata has changed, then we
+ * can avoid logging the metadata.
+ */
+static inline bool
+inode_maybe_inc_iversion(struct inode *inode, bool force)
+{
+ u64 cur, old, new;
+
+ /*
+ * The i_version field is not strictly ordered with any other inode
+ * information, but the legacy inode_inc_iversion code used a spinlock
+ * to serialize increments.
+ *
+ * Here, we add full memory barriers to ensure that any de-facto
+ * ordering with other info is preserved.
+ *
+ * This barrier pairs with the barrier in inode_query_iversion()
+ */
+ smp_mb();
+ cur = inode_peek_iversion_raw(inode);
+ for (;;) {
+ /* If flag is clear then we needn't do anything */
+ if (!force && !(cur & I_VERSION_QUERIED))
+ return false;
+
+ /* Since lowest bit is flag, add 2 to avoid it */
+ new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT;
+
+ old = atomic64_cmpxchg(&inode->i_version, cur, new);
+ if (likely(old == cur))
+ break;
+ cur = old;
+ }
+ return true;
+}
+
+
+/**
+ * inode_inc_iversion - forcibly increment i_version
+ * @inode: inode that needs to be updated
+ *
+ * Forcbily increment the i_version field. This always results in a change to
+ * the observable value.
+ */
+static inline void
+inode_inc_iversion(struct inode *inode)
+{
+ inode_maybe_inc_iversion(inode, true);
+}
+
+/**
+ * inode_iversion_need_inc - is the i_version in need of being incremented?
+ * @inode: inode to check
+ *
+ * Returns whether the inode->i_version counter needs incrementing on the next
+ * change. Just fetch the value and check the QUERIED flag.
+ */
+static inline bool
+inode_iversion_need_inc(struct inode *inode)
+{
+ return inode_peek_iversion_raw(inode) & I_VERSION_QUERIED;
+}
+
+/**
+ * inode_inc_iversion_raw - forcibly increment raw i_version
+ * @inode: inode that needs to be updated
+ *
+ * Forcbily increment the raw i_version field. This always results in a change
+ * to the raw value.
+ *
+ * NFS will use the i_version field to store the value from the server. It
+ * mostly treats it as opaque, but in the case where it holds a write
+ * delegation, it must increment the value itself. This function does that.
+ */
+static inline void
+inode_inc_iversion_raw(struct inode *inode)
+{
+ atomic64_inc(&inode->i_version);
+}
+
+/**
+ * inode_peek_iversion - read i_version without flagging it to be incremented
+ * @inode: inode from which i_version should be read
+ *
+ * Read the inode i_version counter for an inode without registering it as a
+ * query.
+ *
+ * This is typically used by local filesystems that need to store an i_version
+ * on disk. In that situation, it's not necessary to flag it as having been
+ * viewed, as the result won't be used to gauge changes from that point.
+ */
+static inline u64
+inode_peek_iversion(const struct inode *inode)
+{
+ return inode_peek_iversion_raw(inode) >> I_VERSION_QUERIED_SHIFT;
+}
+
+/**
+ * inode_query_iversion - read i_version for later use
+ * @inode: inode from which i_version should be read
+ *
+ * Read the inode i_version counter. This should be used by callers that wish
+ * to store the returned i_version for later comparison. This will guarantee
+ * that a later query of the i_version will result in a different value if
+ * anything has changed.
+ *
+ * In this implementation, we fetch the current value, set the QUERIED flag and
+ * then try to swap it into place with a cmpxchg, if it wasn't already set. If
+ * that fails, we try again with the newly fetched value from the cmpxchg.
+ */
+static inline u64
+inode_query_iversion(struct inode *inode)
+{
+ u64 cur, old, new;
+
+ cur = inode_peek_iversion_raw(inode);
+ for (;;) {
+ /* If flag is already set, then no need to swap */
+ if (cur & I_VERSION_QUERIED) {
+ /*
+ * This barrier (and the implicit barrier in the
+ * cmpxchg below) pairs with the barrier in
+ * inode_maybe_inc_iversion().
+ */
+ smp_mb();
+ break;
+ }
+
+ new = cur | I_VERSION_QUERIED;
+ old = atomic64_cmpxchg(&inode->i_version, cur, new);
+ if (likely(old == cur))
+ break;
+ cur = old;
+ }
+ return cur >> I_VERSION_QUERIED_SHIFT;
+}
+
+/**
+ * inode_cmp_iversion_raw - check whether the raw i_version counter has changed
+ * @inode: inode to check
+ * @old: old value to check against its i_version
+ *
+ * Compare the current raw i_version counter with a previous one. Returns false
+ * if they are the same or true if they are different.
+ */
+static inline bool
+inode_cmp_iversion_raw(const struct inode *inode, u64 old)
+{
+ return inode_peek_iversion_raw(inode) != old;
+}
+
+/**
+ * inode_cmp_iversion - check whether the i_version counter has changed
+ * @inode: inode to check
+ * @old: old value to check against its i_version
+ *
+ * Compare an i_version counter with a previous one. Returns false if they are
+ * the same, and true if they are different.
+ *
+ * Note that we don't need to set the QUERIED flag in this case, as the value
+ * in the inode is not being recorded for later use.
+ */
+static inline bool
+inode_cmp_iversion(const struct inode *inode, u64 old)
+{
+ return inode_peek_iversion(inode) != old;
+}
+#endif
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index c7b368c734af..e0340ca08d98 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -160,6 +160,8 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry,
extern int jump_label_text_reserved(void *start, void *end);
extern void static_key_slow_inc(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
+extern void static_key_slow_inc_cpuslocked(struct static_key *key);
+extern void static_key_slow_dec_cpuslocked(struct static_key *key);
extern void jump_label_apply_nops(struct module *mod);
extern int static_key_count(struct static_key *key);
extern void static_key_enable(struct static_key *key);
@@ -222,6 +224,9 @@ static inline void static_key_slow_dec(struct static_key *key)
atomic_dec(&key->enabled);
}
+#define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
+#define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
+
static inline int jump_label_text_reserved(void *start, void *end)
{
return 0;
@@ -416,6 +421,8 @@ extern bool ____wrong_branch_error(void);
#define static_branch_inc(x) static_key_slow_inc(&(x)->key)
#define static_branch_dec(x) static_key_slow_dec(&(x)->key)
+#define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key)
+#define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key)
/*
* Normal usage; boolean enable/disable.
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 2d1d9de06728..7f4b60abdf27 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -50,10 +50,7 @@ struct nvm_id;
struct nvm_dev;
struct nvm_tgt_dev;
-typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
-typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
- nvm_l2p_update_fn *, void *);
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
@@ -66,7 +63,6 @@ typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
struct nvm_dev_ops {
nvm_id_fn *identity;
- nvm_get_l2p_tbl_fn *get_l2p_tbl;
nvm_op_bb_tbl_fn *get_bb_tbl;
nvm_op_set_bb_fn *set_bb_tbl;
@@ -112,8 +108,6 @@ enum {
NVM_RSP_WARN_HIGHECC = 0x4700,
/* Device opcodes */
- NVM_OP_HBREAD = 0x02,
- NVM_OP_HBWRITE = 0x81,
NVM_OP_PWRITE = 0x91,
NVM_OP_PREAD = 0x92,
NVM_OP_ERASE = 0x90,
@@ -165,12 +159,16 @@ struct nvm_id_group {
u8 fmtype;
u8 num_ch;
u8 num_lun;
- u8 num_pln;
- u16 num_blk;
- u16 num_pg;
- u16 fpg_sz;
+ u16 num_chk;
+ u16 clba;
u16 csecs;
u16 sos;
+
+ u16 ws_min;
+ u16 ws_opt;
+ u16 ws_seq;
+ u16 ws_per_chk;
+
u32 trdt;
u32 trdm;
u32 tprt;
@@ -181,7 +179,10 @@ struct nvm_id_group {
u32 mccap;
u16 cpar;
- struct nvm_id_lp_tbl lptbl;
+ /* 1.2 compatibility */
+ u8 num_pln;
+ u16 num_pg;
+ u16 fpg_sz;
};
struct nvm_addr_format {
@@ -217,6 +218,10 @@ struct nvm_target {
#define ADDR_EMPTY (~0ULL)
+#define NVM_TARGET_DEFAULT_OP (101)
+#define NVM_TARGET_MIN_OP (3)
+#define NVM_TARGET_MAX_OP (80)
+
#define NVM_VERSION_MAJOR 1
#define NVM_VERSION_MINOR 0
#define NVM_VERSION_PATCH 0
@@ -239,7 +244,6 @@ struct nvm_rq {
void *meta_list;
dma_addr_t dma_meta_list;
- struct completion *wait;
nvm_end_io_fn *end_io;
uint8_t opcode;
@@ -268,31 +272,38 @@ enum {
NVM_BLK_ST_BAD = 0x8, /* Bad block */
};
+
/* Device generic information */
struct nvm_geo {
+ /* generic geometry */
int nr_chnls;
- int nr_luns;
- int luns_per_chnl; /* -1 if channels are not symmetric */
- int nr_planes;
- int sec_per_pg; /* only sectors for a single page */
- int pgs_per_blk;
- int blks_per_lun;
- int fpg_size;
- int pfpg_size; /* size of buffer if all pages are to be read */
+ int all_luns; /* across channels */
+ int nr_luns; /* per channel */
+ int nr_chks; /* per lun */
+
int sec_size;
int oob_size;
int mccap;
- struct nvm_addr_format ppaf;
- /* Calculated/Cached values. These do not reflect the actual usable
- * blocks at run-time.
- */
+ int sec_per_chk;
+ int sec_per_lun;
+
+ int ws_min;
+ int ws_opt;
+ int ws_seq;
+ int ws_per_chk;
+
int max_rq_size;
- int plane_mode; /* drive device in single, double or quad mode */
+ int op;
+
+ struct nvm_addr_format ppaf;
+
+ /* Legacy 1.2 specific geometry */
+ int plane_mode; /* drive device in single, double or quad mode */
+ int nr_planes;
+ int sec_per_pg; /* only sectors for a single page */
int sec_per_pl; /* all sectors across planes */
- int sec_per_blk;
- int sec_per_lun;
};
/* sub-device structure */
@@ -320,10 +331,6 @@ struct nvm_dev {
/* Device information */
struct nvm_geo geo;
- /* lower page table */
- int lps_per_blk;
- int *lptbl;
-
unsigned long total_secs;
unsigned long *lun_map;
@@ -346,36 +353,6 @@ struct nvm_dev {
struct list_head targets;
};
-static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo,
- u64 pba)
-{
- struct ppa_addr l;
- int secs, pgs, blks, luns;
- sector_t ppa = pba;
-
- l.ppa = 0;
-
- div_u64_rem(ppa, geo->sec_per_pg, &secs);
- l.g.sec = secs;
-
- sector_div(ppa, geo->sec_per_pg);
- div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
- l.g.pg = pgs;
-
- sector_div(ppa, geo->pgs_per_blk);
- div_u64_rem(ppa, geo->blks_per_lun, &blks);
- l.g.blk = blks;
-
- sector_div(ppa, geo->blks_per_lun);
- div_u64_rem(ppa, geo->luns_per_chnl, &luns);
- l.g.lun = luns;
-
- sector_div(ppa, geo->luns_per_chnl);
- l.g.ch = ppa;
-
- return l;
-}
-
static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev,
struct ppa_addr r)
{
@@ -418,25 +395,6 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev,
return l;
}
-static inline int ppa_empty(struct ppa_addr ppa_addr)
-{
- return (ppa_addr.ppa == ADDR_EMPTY);
-}
-
-static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
-{
- ppa_addr->ppa = ADDR_EMPTY;
-}
-
-static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2)
-{
- if (ppa_empty(ppa1) || ppa_empty(ppa2))
- return 0;
-
- return ((ppa1.g.ch == ppa2.g.ch) && (ppa1.g.lun == ppa2.g.lun) &&
- (ppa1.g.blk == ppa2.g.blk));
-}
-
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
typedef sector_t (nvm_tgt_capacity_fn)(void *);
typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
@@ -481,17 +439,10 @@ extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *);
-extern int nvm_erase_sync(struct nvm_tgt_dev *, struct ppa_addr *, int);
-extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
- void *);
-extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
-extern void nvm_put_area(struct nvm_tgt_dev *, sector_t);
extern void nvm_end_io(struct nvm_rq *);
extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
-extern void nvm_part_to_tgt(struct nvm_dev *, sector_t *, int);
-
#else /* CONFIG_NVM */
struct nvm_dev_ops;
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index fc5c1be3f6f4..4754f01c1abb 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -40,7 +40,6 @@
* @new_func: pointer to the patched function code
* @old_sympos: a hint indicating which symbol position the old function
* can be found (optional)
- * @immediate: patch the func immediately, bypassing safety mechanisms
* @old_addr: the address of the function being patched
* @kobj: kobject for sysfs resources
* @stack_node: list node for klp_ops func_stack list
@@ -76,7 +75,6 @@ struct klp_func {
* in kallsyms for the given object is used.
*/
unsigned long old_sympos;
- bool immediate;
/* internal */
unsigned long old_addr;
@@ -137,7 +135,6 @@ struct klp_object {
* struct klp_patch - patch structure for live patching
* @mod: reference to the live patch module
* @objs: object entries for kernel objects to be patched
- * @immediate: patch all funcs immediately, bypassing safety mechanisms
* @list: list node for global list of registered patches
* @kobj: kobject for sysfs resources
* @enabled: the patch is enabled (but operation may be incomplete)
@@ -147,7 +144,6 @@ struct klp_patch {
/* external */
struct module *mod;
struct klp_object *objs;
- bool immediate;
/* internal */
struct list_head list;
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index d7d313fb9cd4..4fd95dbeb52f 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -17,6 +17,7 @@
#include <net/ipv6.h>
#include <linux/fs.h>
#include <linux/kref.h>
+#include <linux/refcount.h>
#include <linux/utsname.h>
#include <linux/lockd/bind.h>
#include <linux/lockd/xdr.h>
@@ -58,7 +59,7 @@ struct nlm_host {
u32 h_state; /* pseudo-state counter */
u32 h_nsmstate; /* true remote NSM state */
u32 h_pidcount; /* Pseudopids */
- atomic_t h_count; /* reference count */
+ refcount_t h_count; /* reference count */
struct mutex h_mutex; /* mutex for pmap binding */
unsigned long h_nextrebind; /* next portmap call */
unsigned long h_expires; /* eligible for GC */
@@ -83,7 +84,7 @@ struct nlm_host {
struct nsm_handle {
struct list_head sm_link;
- atomic_t sm_count;
+ refcount_t sm_count;
char *sm_mon_name;
char *sm_name;
struct sockaddr_storage sm_addr;
@@ -122,7 +123,7 @@ static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host)
*/
struct nlm_lockowner {
struct list_head list;
- atomic_t count;
+ refcount_t count;
struct nlm_host *host;
fl_owner_t owner;
@@ -136,7 +137,7 @@ struct nlm_wait;
*/
#define NLMCLNT_OHSIZE ((__NEW_UTS_LEN) + 10u)
struct nlm_rqst {
- atomic_t a_count;
+ refcount_t a_count;
unsigned int a_flags; /* initial RPC task flags */
struct nlm_host * a_host; /* host handle */
struct nlm_args a_args; /* arguments */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 3251d9c0d313..6fc77d4dbdcd 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -337,9 +337,9 @@ extern void lock_release(struct lockdep_map *lock, int nested,
/*
* Same "read" as for lock_acquire(), except -1 means any.
*/
-extern int lock_is_held_type(struct lockdep_map *lock, int read);
+extern int lock_is_held_type(const struct lockdep_map *lock, int read);
-static inline int lock_is_held(struct lockdep_map *lock)
+static inline int lock_is_held(const struct lockdep_map *lock)
{
return lock_is_held_type(lock, -1);
}
@@ -367,8 +367,6 @@ extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
-# define INIT_LOCKDEP .lockdep_recursion = 0,
-
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
#define lockdep_assert_held(l) do { \
@@ -426,7 +424,6 @@ static inline void lockdep_on(void)
* #ifdef the call himself.
*/
-# define INIT_LOCKDEP
# define lockdep_reset() do { debug_locks = 1; } while (0)
# define lockdep_free_key_range(start, size) do { } while (0)
# define lockdep_sys_exit() do { } while (0)
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 78dc85365c4f..080798f17ece 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -645,11 +645,6 @@ struct axp20x_dev {
const struct regmap_irq_chip *regmap_irq_chip;
};
-struct axp288_extcon_pdata {
- /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */
- struct gpio_desc *gpio_mux_cntl;
-};
-
/* generic helper function for reading 9-16 bit wide regs */
static inline int axp20x_read_variable_width(struct regmap *regmap,
unsigned int reg, unsigned int width)
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 4e887ba22635..c61535979b8f 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -322,6 +322,10 @@ extern struct attribute_group cros_ec_attr_group;
extern struct attribute_group cros_ec_lightbar_attr_group;
extern struct attribute_group cros_ec_vbc_attr_group;
+/* debugfs stuff */
+int cros_ec_debugfs_init(struct cros_ec_dev *ec);
+void cros_ec_debugfs_remove(struct cros_ec_dev *ec);
+
/* ACPI GPE handler */
#ifdef CONFIG_ACPI
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 2b16e95b9bb8..a83f6498b95e 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -2904,16 +2904,33 @@ enum usb_pd_control_mux {
USB_PD_CTRL_MUX_AUTO = 5,
};
+enum usb_pd_control_swap {
+ USB_PD_CTRL_SWAP_NONE = 0,
+ USB_PD_CTRL_SWAP_DATA = 1,
+ USB_PD_CTRL_SWAP_POWER = 2,
+ USB_PD_CTRL_SWAP_VCONN = 3,
+ USB_PD_CTRL_SWAP_COUNT
+};
+
struct ec_params_usb_pd_control {
uint8_t port;
uint8_t role;
uint8_t mux;
+ uint8_t swap;
} __packed;
#define PD_CTRL_RESP_ENABLED_COMMS (1 << 0) /* Communication enabled */
#define PD_CTRL_RESP_ENABLED_CONNECTED (1 << 1) /* Device connected */
#define PD_CTRL_RESP_ENABLED_PD_CAPABLE (1 << 2) /* Partner is PD capable */
+#define PD_CTRL_RESP_ROLE_POWER BIT(0) /* 0=SNK/1=SRC */
+#define PD_CTRL_RESP_ROLE_DATA BIT(1) /* 0=UFP/1=DFP */
+#define PD_CTRL_RESP_ROLE_VCONN BIT(2) /* Vconn status */
+#define PD_CTRL_RESP_ROLE_DR_POWER BIT(3) /* Partner is dualrole power */
+#define PD_CTRL_RESP_ROLE_DR_DATA BIT(4) /* Partner is dualrole data */
+#define PD_CTRL_RESP_ROLE_USB_COMM BIT(5) /* Partner USB comm capable */
+#define PD_CTRL_RESP_ROLE_EXT_POWERED BIT(6) /* Partner externally powerd */
+
struct ec_response_usb_pd_control_v1 {
uint8_t enabled;
uint8_t role;
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index 3c8568aa82a5..75e5c8ff85fc 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -3733,6 +3733,9 @@ enum usb_irq_events {
#define TPS65917_REGEN3_CTRL_MODE_ACTIVE 0x01
#define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00
+/* POWERHOLD Mask field for PRIMARY_SECONDARY_PAD2 register */
+#define TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK 0xC
+
/* Registers for function RESOURCE */
#define TPS65917_REGEN1_CTRL 0x2
#define TPS65917_PLLEN_CTRL 0x3
diff --git a/include/linux/mfd/rave-sp.h b/include/linux/mfd/rave-sp.h
new file mode 100644
index 000000000000..796fb9794c9e
--- /dev/null
+++ b/include/linux/mfd/rave-sp.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+/*
+ * Core definitions for RAVE SP MFD driver.
+ *
+ * Copyright (C) 2017 Zodiac Inflight Innovations
+ */
+
+#ifndef _LINUX_RAVE_SP_H_
+#define _LINUX_RAVE_SP_H_
+
+#include <linux/notifier.h>
+
+enum rave_sp_command {
+ RAVE_SP_CMD_GET_FIRMWARE_VERSION = 0x20,
+ RAVE_SP_CMD_GET_BOOTLOADER_VERSION = 0x21,
+ RAVE_SP_CMD_BOOT_SOURCE = 0x26,
+ RAVE_SP_CMD_GET_BOARD_COPPER_REV = 0x2B,
+ RAVE_SP_CMD_GET_GPIO_STATE = 0x2F,
+
+ RAVE_SP_CMD_STATUS = 0xA0,
+ RAVE_SP_CMD_SW_WDT = 0xA1,
+ RAVE_SP_CMD_PET_WDT = 0xA2,
+ RAVE_SP_CMD_RESET = 0xA7,
+ RAVE_SP_CMD_RESET_REASON = 0xA8,
+
+ RAVE_SP_CMD_REQ_COPPER_REV = 0xB6,
+ RAVE_SP_CMD_GET_I2C_DEVICE_STATUS = 0xBA,
+ RAVE_SP_CMD_GET_SP_SILICON_REV = 0xB9,
+ RAVE_SP_CMD_CONTROL_EVENTS = 0xBB,
+
+ RAVE_SP_EVNT_BASE = 0xE0,
+};
+
+struct rave_sp;
+
+static inline unsigned long rave_sp_action_pack(u8 event, u8 value)
+{
+ return ((unsigned long)value << 8) | event;
+}
+
+static inline u8 rave_sp_action_unpack_event(unsigned long action)
+{
+ return action;
+}
+
+static inline u8 rave_sp_action_unpack_value(unsigned long action)
+{
+ return action >> 8;
+}
+
+int rave_sp_exec(struct rave_sp *sp,
+ void *__data, size_t data_size,
+ void *reply_data, size_t reply_data_size);
+
+struct device;
+int devm_rave_sp_register_event_notifier(struct device *dev,
+ struct notifier_block *nb);
+
+#endif /* _LINUX_RAVE_SP_H_ */
diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h
index 77c7cf40d9b4..605f62264825 100644
--- a/include/linux/mfd/stm32-lptimer.h
+++ b/include/linux/mfd/stm32-lptimer.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* STM32 Low-Power Timer parent driver.
- *
* Copyright (C) STMicroelectronics 2017
- *
* Author: Fabrice Gasnier <fabrice.gasnier@st.com>
- *
* Inspired by Benjamin Gaignard's stm32-timers driver
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _LINUX_STM32_LPTIMER_H_
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index ce7346e7f77a..2aadab6f34a1 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -1,9 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics 2016
- *
* Author: Benjamin Gaignard <benjamin.gaignard@st.com>
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _LINUX_STM32_GPTIMER_H_
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index e1cfe9194129..396a103c8bc6 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -25,26 +25,6 @@
writew((val) >> 16, (addr) + 2); \
} while (0)
-#define CNF_CMD 0x04
-#define CNF_CTL_BASE 0x10
-#define CNF_INT_PIN 0x3d
-#define CNF_STOP_CLK_CTL 0x40
-#define CNF_GCLK_CTL 0x41
-#define CNF_SD_CLK_MODE 0x42
-#define CNF_PIN_STATUS 0x44
-#define CNF_PWR_CTL_1 0x48
-#define CNF_PWR_CTL_2 0x49
-#define CNF_PWR_CTL_3 0x4a
-#define CNF_CARD_DETECT_MODE 0x4c
-#define CNF_SD_SLOT 0x50
-#define CNF_EXT_GCLK_CTL_1 0xf0
-#define CNF_EXT_GCLK_CTL_2 0xf1
-#define CNF_EXT_GCLK_CTL_3 0xf9
-#define CNF_SD_LED_EN_1 0xfa
-#define CNF_SD_LED_EN_2 0xfe
-
-#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
-
#define sd_config_write8(base, shift, reg, val) \
tmio_iowrite8((val), (base) + ((reg) << (shift)))
#define sd_config_write16(base, shift, reg, val) \
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 409ffb14298a..e5258ee4e38b 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -79,6 +79,11 @@
<< __mlx5_dw_bit_off(typ, fld))); \
} while (0)
+#define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \
+ BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \
+ MLX5_SET(typ, p, fld[idx], v); \
+} while (0)
+
#define MLX5_SET_TO_ONES(typ, p, fld) do { \
BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
@@ -244,6 +249,8 @@ enum {
MLX5_NON_FP_BFREGS_PER_UAR,
MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
+ MLX5_MIN_DYN_BFREGS = 512,
+ MLX5_MAX_DYN_BFREGS = 1024,
};
enum {
@@ -284,6 +291,7 @@ enum {
MLX5_EVENT_QUEUE_TYPE_QP = 0,
MLX5_EVENT_QUEUE_TYPE_RQ = 1,
MLX5_EVENT_QUEUE_TYPE_SQ = 2,
+ MLX5_EVENT_QUEUE_TYPE_DCT = 6,
};
enum mlx5_event {
@@ -319,6 +327,8 @@ enum mlx5_event {
MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
+ MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
+
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
};
@@ -611,6 +621,11 @@ struct mlx5_eqe_pps {
u8 rsvd2[12];
} __packed;
+struct mlx5_eqe_dct {
+ __be32 reserved[6];
+ __be32 dctn;
+};
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -626,6 +641,7 @@ union ev_data {
struct mlx5_eqe_vport_change vport_change;
struct mlx5_eqe_port_module port_module;
struct mlx5_eqe_pps pps;
+ struct mlx5_eqe_dct dct;
} __packed;
struct mlx5_eqe {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index a0610427e168..fb7e8b205eb9 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -155,6 +155,13 @@ enum mlx5_dcbx_oper_mode {
MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
};
+enum mlx5_dct_atomic_mode {
+ MLX5_ATOMIC_MODE_DCT_OFF = 20,
+ MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF,
+};
+
enum {
MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
@@ -231,6 +238,9 @@ struct mlx5_bfreg_info {
u32 ver;
bool lib_uar_4k;
u32 num_sys_pages;
+ u32 num_static_sys_pages;
+ u32 total_num_bfregs;
+ u32 num_dyn_bfregs;
};
struct mlx5_cmd_first {
@@ -430,6 +440,7 @@ enum mlx5_res_type {
MLX5_RES_SRQ = 3,
MLX5_RES_XSRQ = 4,
MLX5_RES_XRQ = 5,
+ MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT,
};
struct mlx5_core_rsc_common {
@@ -788,6 +799,7 @@ struct mlx5_clock {
u32 nominal_c_mult;
unsigned long overflow_period;
struct delayed_work overflow_work;
+ struct mlx5_core_dev *mdev;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
struct mlx5_pps pps_info;
@@ -826,7 +838,7 @@ struct mlx5_core_dev {
struct mlx5e_resources mlx5e_res;
struct {
struct mlx5_rsvd_gids reserved_gids;
- atomic_t roce_en;
+ u32 roce_en;
} roce;
#ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga;
@@ -835,6 +847,8 @@ struct mlx5_core_dev {
struct cpu_rmap *rmap;
#endif
struct mlx5_clock clock;
+ struct mlx5_ib_clock_info *clock_info;
+ struct page *clock_info_page;
};
struct mlx5_db {
@@ -1103,7 +1117,7 @@ void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
- const u8 *mac, bool vlan, u16 vlan_id);
+ const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
static inline int fw_initializing(struct mlx5_core_dev *dev)
{
@@ -1225,6 +1239,31 @@ static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
return !!(dev->priv.rl_table.max_size);
}
+static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) &&
+ MLX5_CAP_GEN(dev, num_vhca_ports) <= 1;
+}
+
+static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, num_vhca_ports) > 1;
+}
+
+static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
+{
+ return mlx5_core_is_mp_slave(dev) ||
+ mlx5_core_is_mp_master(dev);
+}
+
+static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
+{
+ if (!mlx5_core_mp_enabled(dev))
+ return 1;
+
+ return MLX5_CAP_GEN(dev, native_port_num);
+}
+
enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 199bfcd2f2ce..f4e417686f62 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -502,7 +502,7 @@ struct mlx5_ifc_ads_bits {
u8 dei_cfi[0x1];
u8 eth_prio[0x3];
u8 sl[0x4];
- u8 port[0x8];
+ u8 vhca_port_num[0x8];
u8 rmac_47_32[0x10];
u8 rmac_31_0[0x20];
@@ -1078,7 +1078,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_5f8[0x3];
u8 log_max_xrq[0x5];
- u8 reserved_at_600[0x200];
+ u8 affiliate_nic_vport_criteria[0x8];
+ u8 native_port_num[0x8];
+ u8 num_vhca_ports[0x8];
+ u8 reserved_at_618[0x6];
+ u8 sw_owner_id[0x1];
+ u8 reserved_at_61f[0x1e1];
};
enum mlx5_flow_destination_type {
@@ -2646,7 +2651,12 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 event_on_mc_address_change[0x1];
u8 event_on_uc_address_change[0x1];
- u8 reserved_at_40[0xf0];
+ u8 reserved_at_40[0xc];
+
+ u8 affiliation_criteria[0x4];
+ u8 affiliated_vhca_id[0x10];
+
+ u8 reserved_at_60[0xd0];
u8 mtu[0x10];
@@ -3289,7 +3299,8 @@ struct mlx5_ifc_set_roce_address_in_bits {
u8 op_mod[0x10];
u8 roce_address_index[0x10];
- u8 reserved_at_50[0x10];
+ u8 reserved_at_50[0xc];
+ u8 vhca_port_num[0x4];
u8 reserved_at_60[0x20];
@@ -3909,7 +3920,8 @@ struct mlx5_ifc_query_roce_address_in_bits {
u8 op_mod[0x10];
u8 roce_address_index[0x10];
- u8 reserved_at_50[0x10];
+ u8 reserved_at_50[0xc];
+ u8 vhca_port_num[0x4];
u8 reserved_at_60[0x20];
};
@@ -5341,7 +5353,9 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
};
struct mlx5_ifc_modify_nic_vport_field_select_bits {
- u8 reserved_at_0[0x14];
+ u8 reserved_at_0[0x12];
+ u8 affiliation[0x1];
+ u8 reserved_at_e[0x1];
u8 disable_uc_local_lb[0x1];
u8 disable_mc_local_lb[0x1];
u8 node_guid[0x1];
@@ -5562,6 +5576,7 @@ struct mlx5_ifc_init_hca_in_bits {
u8 op_mod[0x10];
u8 reserved_at_40[0x40];
+ u8 sw_owner_id[4][0x20];
};
struct mlx5_ifc_init2rtr_qp_out_bits {
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 62af7512dabb..4778d41085d4 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -473,6 +473,11 @@ struct mlx5_core_qp {
int pid;
};
+struct mlx5_core_dct {
+ struct mlx5_core_qp mqp;
+ struct completion drained;
+};
+
struct mlx5_qp_path {
u8 fl_free_ar;
u8 rsvd3;
@@ -549,6 +554,9 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev,
return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
}
+int mlx5_core_create_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *qp,
+ u32 *in, int inlen);
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
u32 *in,
@@ -558,8 +566,12 @@ int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
struct mlx5_core_qp *qp);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp);
+int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct);
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
u32 *out, int outlen);
+int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
+ u32 *out, int outlen);
int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
u32 timeout_usec);
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index aaa0bb9e7655..64e193e87394 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -116,4 +116,8 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
struct mlx5_hca_vport_context *req);
int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable);
int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status);
+
+int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
+ struct mlx5_core_dev *port_mdev);
+int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev);
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ea818ff739cd..7fc92384977e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2570,8 +2570,8 @@ enum mf_flags {
MF_MUST_KILL = 1 << 2,
MF_SOFT_OFFLINE = 1 << 3,
};
-extern int memory_failure(unsigned long pfn, int trapno, int flags);
-extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
+extern int memory_failure(unsigned long pfn, int flags);
+extern void memory_failure_queue(unsigned long pfn, int flags);
extern int unpoison_memory(unsigned long pfn);
extern int get_hwpoison_page(struct page *page);
#define put_hwpoison_page(page) put_page(page)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index e7743eca1021..85146235231e 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -324,6 +324,7 @@ struct mmc_host {
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
+#define MMC_CAP_DONE_COMPLETE (1 << 27) /* RW reqs can be completed within mmc_request_done() */
#define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */
#define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */
#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
@@ -380,6 +381,7 @@ struct mmc_host {
unsigned int doing_retune:1; /* re-tuning in progress */
unsigned int retune_now:1; /* do re-tuning at next req */
unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
+ unsigned int use_blk_mq:1; /* use blk-mq */
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */
@@ -422,9 +424,6 @@ struct mmc_host {
struct dentry *debugfs_root;
- struct mmc_async_req *areq; /* active async req */
- struct mmc_context_info context_info; /* async synchronization info */
-
/* Ongoing data transfer that allows commands during transfer */
struct mmc_request *ongoing_mrq;
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 82f0d289f110..91f1ba0663c8 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -33,5 +33,6 @@ void mmc_gpio_set_cd_isr(struct mmc_host *host,
irqreturn_t (*isr)(int irq, void *dev_id));
void mmc_gpiod_request_cd_irq(struct mmc_host *host);
bool mmc_can_gpio_cd(struct mmc_host *host);
+bool mmc_can_gpio_ro(struct mmc_host *host);
#endif
diff --git a/include/linux/module.h b/include/linux/module.h
index 9642d3116718..70245f1a3590 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -807,6 +807,15 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
static inline void module_bug_cleanup(struct module *mod) {}
#endif /* CONFIG_GENERIC_BUG */
+#ifdef RETPOLINE
+extern bool retpoline_module_ok(bool has_retpoline);
+#else
+static inline bool retpoline_module_ok(bool has_retpoline)
+{
+ return true;
+}
+#endif
+
#ifdef CONFIG_MODULE_SIG
static inline bool module_sig_ok(struct module *module)
{
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 3aa56e3104bb..b5b43f94f311 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -270,75 +270,67 @@ void map_destroy(struct mtd_info *mtd);
#define INVALIDATE_CACHED_RANGE(map, from, size) \
do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
-
-static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2)
-{
- int i;
-
- for (i = 0; i < map_words(map); i++) {
- if (val1.x[i] != val2.x[i])
- return 0;
- }
-
- return 1;
-}
-
-static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2)
-{
- map_word r;
- int i;
-
- for (i = 0; i < map_words(map); i++)
- r.x[i] = val1.x[i] & val2.x[i];
-
- return r;
-}
-
-static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2)
-{
- map_word r;
- int i;
-
- for (i = 0; i < map_words(map); i++)
- r.x[i] = val1.x[i] & ~val2.x[i];
-
- return r;
-}
-
-static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2)
-{
- map_word r;
- int i;
-
- for (i = 0; i < map_words(map); i++)
- r.x[i] = val1.x[i] | val2.x[i];
-
- return r;
-}
-
-static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3)
-{
- int i;
-
- for (i = 0; i < map_words(map); i++) {
- if ((val1.x[i] & val2.x[i]) != val3.x[i])
- return 0;
- }
-
- return 1;
-}
-
-static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2)
-{
- int i;
-
- for (i = 0; i < map_words(map); i++) {
- if (val1.x[i] & val2.x[i])
- return 1;
- }
-
- return 0;
-}
+#define map_word_equal(map, val1, val2) \
+({ \
+ int i, ret = 1; \
+ for (i = 0; i < map_words(map); i++) \
+ if ((val1).x[i] != (val2).x[i]) { \
+ ret = 0; \
+ break; \
+ } \
+ ret; \
+})
+
+#define map_word_and(map, val1, val2) \
+({ \
+ map_word r; \
+ int i; \
+ for (i = 0; i < map_words(map); i++) \
+ r.x[i] = (val1).x[i] & (val2).x[i]; \
+ r; \
+})
+
+#define map_word_clr(map, val1, val2) \
+({ \
+ map_word r; \
+ int i; \
+ for (i = 0; i < map_words(map); i++) \
+ r.x[i] = (val1).x[i] & ~(val2).x[i]; \
+ r; \
+})
+
+#define map_word_or(map, val1, val2) \
+({ \
+ map_word r; \
+ int i; \
+ for (i = 0; i < map_words(map); i++) \
+ r.x[i] = (val1).x[i] | (val2).x[i]; \
+ r; \
+})
+
+#define map_word_andequal(map, val1, val2, val3) \
+({ \
+ int i, ret = 1; \
+ for (i = 0; i < map_words(map); i++) { \
+ if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \
+ ret = 0; \
+ break; \
+ } \
+ } \
+ ret; \
+})
+
+#define map_word_bitsset(map, val1, val2) \
+({ \
+ int i, ret = 0; \
+ for (i = 0; i < map_words(map); i++) { \
+ if ((val1).x[i] & (val2).x[i]) { \
+ ret = 1; \
+ break; \
+ } \
+ } \
+ ret; \
+})
static inline map_word map_word_load(struct map_info *map, const void *ptr)
{
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index cd55bf14ad51..205ededccc60 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -489,6 +489,34 @@ static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
return do_div(sz, mtd->erasesize);
}
+/**
+ * mtd_align_erase_req - Adjust an erase request to align things on eraseblock
+ * boundaries.
+ * @mtd: the MTD device this erase request applies on
+ * @req: the erase request to adjust
+ *
+ * This function will adjust @req->addr and @req->len to align them on
+ * @mtd->erasesize. Of course we expect @mtd->erasesize to be != 0.
+ */
+static inline void mtd_align_erase_req(struct mtd_info *mtd,
+ struct erase_info *req)
+{
+ u32 mod;
+
+ if (WARN_ON(!mtd->erasesize))
+ return;
+
+ mod = mtd_mod_by_eb(req->addr, mtd);
+ if (mod) {
+ req->addr -= mod;
+ req->len += mod;
+ }
+
+ mod = mtd_mod_by_eb(req->addr + req->len, mtd);
+ if (mod)
+ req->len += mtd->erasesize - mod;
+}
+
static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
{
if (mtd->writesize_shift)
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 749bb08c4772..56c5570aadbe 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -133,12 +133,6 @@ enum nand_ecc_algo {
*/
#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
#define NAND_ECC_MAXIMIZE BIT(1)
-/*
- * If your controller already sends the required NAND commands when
- * reading or writing a page, then the framework is not supposed to
- * send READ0 and SEQIN/PAGEPROG respectively.
- */
-#define NAND_ECC_CUSTOM_PAGE_ACCESS BIT(2)
/* Bit mask for flags passed to do_nand_read_ecc */
#define NAND_GET_DEVICE 0x80
@@ -191,11 +185,6 @@ enum nand_ecc_algo {
/* Non chip related options */
/* This option skips the bbt scan during initialization. */
#define NAND_SKIP_BBTSCAN 0x00010000
-/*
- * This option is defined if the board driver allocates its own buffers
- * (e.g. because it needs them DMA-coherent).
- */
-#define NAND_OWN_BUFFERS 0x00020000
/* Chip may not exist, so silence any errors in scan */
#define NAND_SCAN_SILENT_NODEV 0x00040000
/*
@@ -525,6 +514,8 @@ static const struct nand_ecc_caps __name = { \
* @postpad: padding information for syndrome based ECC generators
* @options: ECC specific options (see NAND_ECC_XXX flags defined above)
* @priv: pointer to private ECC control data
+ * @calc_buf: buffer for calculated ECC, size is oobsize.
+ * @code_buf: buffer for ECC read from flash, size is oobsize.
* @hwctl: function to control hardware ECC generator. Must only
* be provided if an hardware ECC is available
* @calculate: function for ECC calculation or readback from ECC hardware
@@ -575,6 +566,8 @@ struct nand_ecc_ctrl {
int postpad;
unsigned int options;
void *priv;
+ u8 *calc_buf;
+ u8 *code_buf;
void (*hwctl)(struct mtd_info *mtd, int mode);
int (*calculate)(struct mtd_info *mtd, const uint8_t *dat,
uint8_t *ecc_code);
@@ -602,26 +595,6 @@ struct nand_ecc_ctrl {
int page);
};
-static inline int nand_standard_page_accessors(struct nand_ecc_ctrl *ecc)
-{
- return !(ecc->options & NAND_ECC_CUSTOM_PAGE_ACCESS);
-}
-
-/**
- * struct nand_buffers - buffer structure for read/write
- * @ecccalc: buffer pointer for calculated ECC, size is oobsize.
- * @ecccode: buffer pointer for ECC read from flash, size is oobsize.
- * @databuf: buffer pointer for data, size is (page size + oobsize).
- *
- * Do not change the order of buffers. databuf and oobrbuf must be in
- * consecutive order.
- */
-struct nand_buffers {
- uint8_t *ecccalc;
- uint8_t *ecccode;
- uint8_t *databuf;
-};
-
/**
* struct nand_sdr_timings - SDR NAND chip timings
*
@@ -762,6 +735,350 @@ struct nand_manufacturer_ops {
};
/**
+ * struct nand_op_cmd_instr - Definition of a command instruction
+ * @opcode: the command to issue in one cycle
+ */
+struct nand_op_cmd_instr {
+ u8 opcode;
+};
+
+/**
+ * struct nand_op_addr_instr - Definition of an address instruction
+ * @naddrs: length of the @addrs array
+ * @addrs: array containing the address cycles to issue
+ */
+struct nand_op_addr_instr {
+ unsigned int naddrs;
+ const u8 *addrs;
+};
+
+/**
+ * struct nand_op_data_instr - Definition of a data instruction
+ * @len: number of data bytes to move
+ * @in: buffer to fill when reading from the NAND chip
+ * @out: buffer to read from when writing to the NAND chip
+ * @force_8bit: force 8-bit access
+ *
+ * Please note that "in" and "out" are inverted from the ONFI specification
+ * and are from the controller perspective, so a "in" is a read from the NAND
+ * chip while a "out" is a write to the NAND chip.
+ */
+struct nand_op_data_instr {
+ unsigned int len;
+ union {
+ void *in;
+ const void *out;
+ } buf;
+ bool force_8bit;
+};
+
+/**
+ * struct nand_op_waitrdy_instr - Definition of a wait ready instruction
+ * @timeout_ms: maximum delay while waiting for the ready/busy pin in ms
+ */
+struct nand_op_waitrdy_instr {
+ unsigned int timeout_ms;
+};
+
+/**
+ * enum nand_op_instr_type - Definition of all instruction types
+ * @NAND_OP_CMD_INSTR: command instruction
+ * @NAND_OP_ADDR_INSTR: address instruction
+ * @NAND_OP_DATA_IN_INSTR: data in instruction
+ * @NAND_OP_DATA_OUT_INSTR: data out instruction
+ * @NAND_OP_WAITRDY_INSTR: wait ready instruction
+ */
+enum nand_op_instr_type {
+ NAND_OP_CMD_INSTR,
+ NAND_OP_ADDR_INSTR,
+ NAND_OP_DATA_IN_INSTR,
+ NAND_OP_DATA_OUT_INSTR,
+ NAND_OP_WAITRDY_INSTR,
+};
+
+/**
+ * struct nand_op_instr - Instruction object
+ * @type: the instruction type
+ * @cmd/@addr/@data/@waitrdy: extra data associated to the instruction.
+ * You'll have to use the appropriate element
+ * depending on @type
+ * @delay_ns: delay the controller should apply after the instruction has been
+ * issued on the bus. Most modern controllers have internal timings
+ * control logic, and in this case, the controller driver can ignore
+ * this field.
+ */
+struct nand_op_instr {
+ enum nand_op_instr_type type;
+ union {
+ struct nand_op_cmd_instr cmd;
+ struct nand_op_addr_instr addr;
+ struct nand_op_data_instr data;
+ struct nand_op_waitrdy_instr waitrdy;
+ } ctx;
+ unsigned int delay_ns;
+};
+
+/*
+ * Special handling must be done for the WAITRDY timeout parameter as it usually
+ * is either tPROG (after a prog), tR (before a read), tRST (during a reset) or
+ * tBERS (during an erase) which all of them are u64 values that cannot be
+ * divided by usual kernel macros and must be handled with the special
+ * DIV_ROUND_UP_ULL() macro.
+ */
+#define __DIVIDE(dividend, divisor) ({ \
+ sizeof(dividend) == sizeof(u32) ? \
+ DIV_ROUND_UP(dividend, divisor) : \
+ DIV_ROUND_UP_ULL(dividend, divisor); \
+ })
+#define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
+#define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)
+
+#define NAND_OP_CMD(id, ns) \
+ { \
+ .type = NAND_OP_CMD_INSTR, \
+ .ctx.cmd.opcode = id, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_ADDR(ncycles, cycles, ns) \
+ { \
+ .type = NAND_OP_ADDR_INSTR, \
+ .ctx.addr = { \
+ .naddrs = ncycles, \
+ .addrs = cycles, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_DATA_IN(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.in = b, \
+ .force_8bit = false, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_DATA_OUT(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.out = b, \
+ .force_8bit = false, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_8BIT_DATA_IN(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.in = b, \
+ .force_8bit = true, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_8BIT_DATA_OUT(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.out = b, \
+ .force_8bit = true, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_WAIT_RDY(tout_ms, ns) \
+ { \
+ .type = NAND_OP_WAITRDY_INSTR, \
+ .ctx.waitrdy.timeout_ms = tout_ms, \
+ .delay_ns = ns, \
+ }
+
+/**
+ * struct nand_subop - a sub operation
+ * @instrs: array of instructions
+ * @ninstrs: length of the @instrs array
+ * @first_instr_start_off: offset to start from for the first instruction
+ * of the sub-operation
+ * @last_instr_end_off: offset to end at (excluded) for the last instruction
+ * of the sub-operation
+ *
+ * Both @first_instr_start_off and @last_instr_end_off only apply to data or
+ * address instructions.
+ *
+ * When an operation cannot be handled as is by the NAND controller, it will
+ * be split by the parser into sub-operations which will be passed to the
+ * controller driver.
+ */
+struct nand_subop {
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+ unsigned int first_instr_start_off;
+ unsigned int last_instr_end_off;
+};
+
+int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int op_id);
+int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int op_id);
+
+/**
+ * struct nand_op_parser_addr_constraints - Constraints for address instructions
+ * @maxcycles: maximum number of address cycles the controller can issue in a
+ * single step
+ */
+struct nand_op_parser_addr_constraints {
+ unsigned int maxcycles;
+};
+
+/**
+ * struct nand_op_parser_data_constraints - Constraints for data instructions
+ * @maxlen: maximum data length that the controller can handle in a single step
+ */
+struct nand_op_parser_data_constraints {
+ unsigned int maxlen;
+};
+
+/**
+ * struct nand_op_parser_pattern_elem - One element of a pattern
+ * @type: the instructuction type
+ * @optional: whether this element of the pattern is optional or mandatory
+ * @addr/@data: address or data constraint (number of cycles or data length)
+ */
+struct nand_op_parser_pattern_elem {
+ enum nand_op_instr_type type;
+ bool optional;
+ union {
+ struct nand_op_parser_addr_constraints addr;
+ struct nand_op_parser_data_constraints data;
+ } ctx;
+};
+
+#define NAND_OP_PARSER_PAT_CMD_ELEM(_opt) \
+ { \
+ .type = NAND_OP_CMD_INSTR, \
+ .optional = _opt, \
+ }
+
+#define NAND_OP_PARSER_PAT_ADDR_ELEM(_opt, _maxcycles) \
+ { \
+ .type = NAND_OP_ADDR_INSTR, \
+ .optional = _opt, \
+ .ctx.addr.maxcycles = _maxcycles, \
+ }
+
+#define NAND_OP_PARSER_PAT_DATA_IN_ELEM(_opt, _maxlen) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .optional = _opt, \
+ .ctx.data.maxlen = _maxlen, \
+ }
+
+#define NAND_OP_PARSER_PAT_DATA_OUT_ELEM(_opt, _maxlen) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .optional = _opt, \
+ .ctx.data.maxlen = _maxlen, \
+ }
+
+#define NAND_OP_PARSER_PAT_WAITRDY_ELEM(_opt) \
+ { \
+ .type = NAND_OP_WAITRDY_INSTR, \
+ .optional = _opt, \
+ }
+
+/**
+ * struct nand_op_parser_pattern - NAND sub-operation pattern descriptor
+ * @elems: array of pattern elements
+ * @nelems: number of pattern elements in @elems array
+ * @exec: the function that will issue a sub-operation
+ *
+ * A pattern is a list of elements, each element reprensenting one instruction
+ * with its constraints. The pattern itself is used by the core to match NAND
+ * chip operation with NAND controller operations.
+ * Once a match between a NAND controller operation pattern and a NAND chip
+ * operation (or a sub-set of a NAND operation) is found, the pattern ->exec()
+ * hook is called so that the controller driver can issue the operation on the
+ * bus.
+ *
+ * Controller drivers should declare as many patterns as they support and pass
+ * this list of patterns (created with the help of the following macro) to
+ * the nand_op_parser_exec_op() helper.
+ */
+struct nand_op_parser_pattern {
+ const struct nand_op_parser_pattern_elem *elems;
+ unsigned int nelems;
+ int (*exec)(struct nand_chip *chip, const struct nand_subop *subop);
+};
+
+#define NAND_OP_PARSER_PATTERN(_exec, ...) \
+ { \
+ .exec = _exec, \
+ .elems = (struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }, \
+ .nelems = sizeof((struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }) / \
+ sizeof(struct nand_op_parser_pattern_elem), \
+ }
+
+/**
+ * struct nand_op_parser - NAND controller operation parser descriptor
+ * @patterns: array of supported patterns
+ * @npatterns: length of the @patterns array
+ *
+ * The parser descriptor is just an array of supported patterns which will be
+ * iterated by nand_op_parser_exec_op() everytime it tries to execute an
+ * NAND operation (or tries to determine if a specific operation is supported).
+ *
+ * It is worth mentioning that patterns will be tested in their declaration
+ * order, and the first match will be taken, so it's important to order patterns
+ * appropriately so that simple/inefficient patterns are placed at the end of
+ * the list. Usually, this is where you put single instruction patterns.
+ */
+struct nand_op_parser {
+ const struct nand_op_parser_pattern *patterns;
+ unsigned int npatterns;
+};
+
+#define NAND_OP_PARSER(...) \
+ { \
+ .patterns = (struct nand_op_parser_pattern[]) { __VA_ARGS__ }, \
+ .npatterns = sizeof((struct nand_op_parser_pattern[]) { __VA_ARGS__ }) / \
+ sizeof(struct nand_op_parser_pattern), \
+ }
+
+/**
+ * struct nand_operation - NAND operation descriptor
+ * @instrs: array of instructions to execute
+ * @ninstrs: length of the @instrs array
+ *
+ * The actual operation structure that will be passed to chip->exec_op().
+ */
+struct nand_operation {
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+};
+
+#define NAND_OPERATION(_instrs) \
+ { \
+ .instrs = _instrs, \
+ .ninstrs = ARRAY_SIZE(_instrs), \
+ }
+
+int nand_op_parser_exec_op(struct nand_chip *chip,
+ const struct nand_op_parser *parser,
+ const struct nand_operation *op, bool check_only);
+
+/**
* struct nand_chip - NAND Private Flash Chip Data
* @mtd: MTD device registered to the MTD framework
* @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the
@@ -787,10 +1104,13 @@ struct nand_manufacturer_ops {
* commands to the chip.
* @waitfunc: [REPLACEABLE] hardwarespecific function for wait on
* ready.
+ * @exec_op: controller specific method to execute NAND operations.
+ * This method replaces ->cmdfunc(),
+ * ->{read,write}_{buf,byte,word}(), ->dev_ready() and
+ * ->waifunc().
* @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for
* setting the read-retry mode. Mostly needed for MLC NAND.
* @ecc: [BOARDSPECIFIC] ECC control structure
- * @buffers: buffer structure for read/write
* @buf_align: minimum buffer alignment required by a platform
* @hwcontrol: platform-specific hardware control structure
* @erase: [REPLACEABLE] erase function
@@ -830,6 +1150,7 @@ struct nand_manufacturer_ops {
* @numchips: [INTERN] number of physical chips
* @chipsize: [INTERN] the size of one chip for multichip arrays
* @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
+ * @data_buf: [INTERN] buffer for data, size is (page size + oobsize).
* @pagebuf: [INTERN] holds the pagenumber which is currently in
* data_buf.
* @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is
@@ -886,6 +1207,9 @@ struct nand_chip {
void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column,
int page_addr);
int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this);
+ int (*exec_op)(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only);
int (*erase)(struct mtd_info *mtd, int page);
int (*scan_bbt)(struct mtd_info *mtd);
int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip,
@@ -896,7 +1220,6 @@ struct nand_chip {
int (*setup_data_interface)(struct mtd_info *mtd, int chipnr,
const struct nand_data_interface *conf);
-
int chip_delay;
unsigned int options;
unsigned int bbt_options;
@@ -908,6 +1231,7 @@ struct nand_chip {
int numchips;
uint64_t chipsize;
int pagemask;
+ u8 *data_buf;
int pagebuf;
unsigned int pagebuf_bitflips;
int subpagesize;
@@ -928,7 +1252,7 @@ struct nand_chip {
u16 max_bb_per_die;
u32 blocks_per_die;
- struct nand_data_interface *data_interface;
+ struct nand_data_interface data_interface;
int read_retries;
@@ -938,7 +1262,6 @@ struct nand_chip {
struct nand_hw_control *controller;
struct nand_ecc_ctrl ecc;
- struct nand_buffers *buffers;
unsigned long buf_align;
struct nand_hw_control hwcontrol;
@@ -956,6 +1279,15 @@ struct nand_chip {
} manufacturer;
};
+static inline int nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ if (!chip->exec_op)
+ return -ENOTSUPP;
+
+ return chip->exec_op(chip, op, false);
+}
+
extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
@@ -1225,8 +1557,7 @@ static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
return le16_to_cpu(chip->onfi_params.src_sync_timing_mode);
}
-int onfi_init_data_interface(struct nand_chip *chip,
- struct nand_data_interface *iface,
+int onfi_fill_data_interface(struct nand_chip *chip,
enum nand_data_interface_type type,
int timing_mode);
@@ -1269,8 +1600,6 @@ static inline int jedec_feature(struct nand_chip *chip)
/* get timing characteristics from ONFI timing mode. */
const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
-/* get data interface from ONFI timing mode 0, used after reset. */
-const struct nand_data_interface *nand_get_default_data_interface(void);
int nand_check_erased_ecc_chunk(void *data, int datalen,
void *ecc, int ecclen,
@@ -1316,9 +1645,45 @@ int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
/* Reset and initialize a NAND device */
int nand_reset(struct nand_chip *chip, int chipnr);
+/* NAND operation helpers */
+int nand_reset_op(struct nand_chip *chip);
+int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
+ unsigned int len);
+int nand_status_op(struct nand_chip *chip, u8 *status);
+int nand_exit_status_op(struct nand_chip *chip);
+int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
+int nand_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len);
+int nand_change_read_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool force_8bit);
+int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len);
+int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len);
+int nand_prog_page_end_op(struct nand_chip *chip);
+int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len);
+int nand_change_write_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len, bool force_8bit);
+int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+ bool force_8bit);
+int nand_write_data_op(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit);
+
/* Free resources held by the NAND device */
void nand_cleanup(struct nand_chip *chip);
/* Default extended ID decoding function */
void nand_decode_ext_id(struct nand_chip *chip);
+
+/*
+ * External helper for controller drivers that have to implement the WAITRDY
+ * instruction and have no physical pin to check it.
+ */
+int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
+
#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index d0c66a0975cf..de36969eb359 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -61,6 +61,7 @@
#define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */
#define SPINOR_OP_RDCR 0x35 /* Read configuration register */
#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
+#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */
/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */
@@ -130,7 +131,10 @@
#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */
/* Flag Status Register bits */
-#define FSR_READY BIT(7)
+#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */
+#define FSR_E_ERR BIT(5) /* Erase operation status */
+#define FSR_P_ERR BIT(4) /* Program operation status */
+#define FSR_PT_ERR BIT(1) /* Protection error bit */
/* Configuration Register bits. */
#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */
@@ -399,4 +403,10 @@ struct spi_nor_hwcaps {
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps);
+/**
+ * spi_nor_restore_addr_mode() - restore the status of SPI NOR
+ * @nor: the spi_nor structure
+ */
+void spi_nor_restore(struct spi_nor *nor);
+
#endif
diff --git a/include/linux/net.h b/include/linux/net.h
index 68acc54976bf..91216b16feb7 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -147,7 +147,7 @@ struct proto_ops {
int (*getname) (struct socket *sock,
struct sockaddr *addr,
int *sockaddr_len, int peer);
- unsigned int (*poll) (struct file *file, struct socket *sock,
+ __poll_t (*poll) (struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int (*ioctl) (struct socket *sock, unsigned int cmd,
unsigned long arg);
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 495ba4dd9da5..34551f8aaf9d 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -67,8 +67,7 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
* @ss: The nfnetlink subsystem ID
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the READ_ONCE(), because
- * caller holds the NFNL subsystem mutex.
+ * the READ_ONCE(), because caller holds the NFNL subsystem mutex.
*/
#define nfnl_dereference(p, ss) \
rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 47adac640191..57ffaa20d564 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -457,7 +457,12 @@ enum lock_type4 {
#define NFS4_DEBUG 1
-/* Index of predefined Linux client operations */
+/*
+ * Index of predefined Linux client operations
+ *
+ * To ensure that /proc/net/rpc/nfs remains correctly ordered, please
+ * append only to this enum when adding new client operations.
+ */
enum {
NFSPROC4_CLNT_NULL = 0, /* Unused */
@@ -480,7 +485,6 @@ enum {
NFSPROC4_CLNT_ACCESS,
NFSPROC4_CLNT_GETATTR,
NFSPROC4_CLNT_LOOKUP,
- NFSPROC4_CLNT_LOOKUPP,
NFSPROC4_CLNT_LOOKUP_ROOT,
NFSPROC4_CLNT_REMOVE,
NFSPROC4_CLNT_RENAME,
@@ -500,7 +504,6 @@ enum {
NFSPROC4_CLNT_SECINFO,
NFSPROC4_CLNT_FSID_PRESENT,
- /* nfs41 */
NFSPROC4_CLNT_EXCHANGE_ID,
NFSPROC4_CLNT_CREATE_SESSION,
NFSPROC4_CLNT_DESTROY_SESSION,
@@ -518,13 +521,14 @@ enum {
NFSPROC4_CLNT_BIND_CONN_TO_SESSION,
NFSPROC4_CLNT_DESTROY_CLIENTID,
- /* nfs42 */
NFSPROC4_CLNT_SEEK,
NFSPROC4_CLNT_ALLOCATE,
NFSPROC4_CLNT_DEALLOCATE,
NFSPROC4_CLNT_LAYOUTSTATS,
NFSPROC4_CLNT_CLONE,
NFSPROC4_CLNT_COPY,
+
+ NFSPROC4_CLNT_LOOKUPP,
};
/* nfs41 types */
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index 11ce6b1117a8..6e8200215321 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -5,20 +5,36 @@
Originally written by Alan Cox.
Hacked to death by C. Scott Ananian and David Huggins-Daines.
-
- Some of the constants in here are from the corresponding
- NetBSD/OpenBSD header file, by Allen Briggs. We figured out the
- rest of them on our own. */
+*/
+
#ifndef LINUX_NUBUS_H
#define LINUX_NUBUS_H
+#include <linux/device.h>
#include <asm/nubus.h>
#include <uapi/linux/nubus.h>
+struct proc_dir_entry;
+struct seq_file;
+
+struct nubus_dir {
+ unsigned char *base;
+ unsigned char *ptr;
+ int done;
+ int mask;
+ struct proc_dir_entry *procdir;
+};
+
+struct nubus_dirent {
+ unsigned char *base;
+ unsigned char type;
+ __u32 data; /* Actually 24 bits used */
+ int mask;
+};
+
struct nubus_board {
- struct nubus_board* next;
- struct nubus_dev* first_dev;
-
+ struct device dev;
+
/* Only 9-E actually exist, though 0-8 are also theoretically
possible, and 0 is a special case which represents the
motherboard and onboard peripherals (Ethernet, video) */
@@ -27,10 +43,10 @@ struct nubus_board {
char name[64];
/* Format block */
- unsigned char* fblock;
+ unsigned char *fblock;
/* Root directory (does *not* always equal fblock + doffset!) */
- unsigned char* directory;
-
+ unsigned char *directory;
+
unsigned long slot_addr;
/* Offset to root directory (sometimes) */
unsigned long doffset;
@@ -41,15 +57,15 @@ struct nubus_board {
unsigned char rev;
unsigned char format;
unsigned char lanes;
-};
-struct nubus_dev {
- /* Next link in device list */
- struct nubus_dev* next;
/* Directory entry in /proc/bus/nubus */
- struct proc_dir_entry* procdir;
+ struct proc_dir_entry *procdir;
+};
+
+struct nubus_rsrc {
+ struct list_head list;
- /* The functional resource ID of this device */
+ /* The functional resource ID */
unsigned char resid;
/* These are mostly here for convenience; we could always read
them from the ROMs if we wanted to */
@@ -57,79 +73,116 @@ struct nubus_dev {
unsigned short type;
unsigned short dr_sw;
unsigned short dr_hw;
- /* This is the device's name rather than the board's.
- Sometimes they are different. Usually the board name is
- more correct. */
- char name[64];
- /* MacOS driver (I kid you not) */
- unsigned char* driver;
- /* Actually this is an offset */
- unsigned long iobase;
- unsigned long iosize;
- unsigned char flags, hwdevid;
-
+
/* Functional directory */
- unsigned char* directory;
+ unsigned char *directory;
/* Much of our info comes from here */
- struct nubus_board* board;
+ struct nubus_board *board;
+};
+
+/* This is all NuBus functional resources (used to find devices later on) */
+extern struct list_head nubus_func_rsrcs;
+
+struct nubus_driver {
+ struct device_driver driver;
+ int (*probe)(struct nubus_board *board);
+ int (*remove)(struct nubus_board *board);
};
-/* This is all NuBus devices (used to find devices later on) */
-extern struct nubus_dev* nubus_devices;
-/* This is all NuBus cards */
-extern struct nubus_board* nubus_boards;
+extern struct bus_type nubus_bus_type;
/* Generic NuBus interface functions, modelled after the PCI interface */
-void nubus_scan_bus(void);
#ifdef CONFIG_PROC_FS
-extern void nubus_proc_init(void);
+void nubus_proc_init(void);
+struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board);
+struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ struct nubus_board *board);
+void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ unsigned int size);
+void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent);
#else
static inline void nubus_proc_init(void) {}
+static inline
+struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board)
+{ return NULL; }
+static inline
+struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ struct nubus_board *board)
+{ return NULL; }
+static inline void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ unsigned int size) {}
+static inline void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent) {}
#endif
-int get_nubus_list(char *buf);
-int nubus_proc_attach_device(struct nubus_dev *dev);
-/* If we need more precision we can add some more of these */
-struct nubus_dev* nubus_find_device(unsigned short category,
- unsigned short type,
- unsigned short dr_hw,
- unsigned short dr_sw,
- const struct nubus_dev* from);
-struct nubus_dev* nubus_find_type(unsigned short category,
- unsigned short type,
- const struct nubus_dev* from);
-/* Might have more than one device in a slot, you know... */
-struct nubus_dev* nubus_find_slot(unsigned int slot,
- const struct nubus_dev* from);
+
+struct nubus_rsrc *nubus_first_rsrc_or_null(void);
+struct nubus_rsrc *nubus_next_rsrc_or_null(struct nubus_rsrc *from);
+
+#define for_each_func_rsrc(f) \
+ for (f = nubus_first_rsrc_or_null(); f; f = nubus_next_rsrc_or_null(f))
+
+#define for_each_board_func_rsrc(b, f) \
+ for_each_func_rsrc(f) if (f->board != b) {} else
/* These are somewhat more NuBus-specific. They all return 0 for
success and -1 for failure, as you'd expect. */
/* The root directory which contains the board and functional
directories */
-int nubus_get_root_dir(const struct nubus_board* board,
- struct nubus_dir* dir);
+int nubus_get_root_dir(const struct nubus_board *board,
+ struct nubus_dir *dir);
/* The board directory */
-int nubus_get_board_dir(const struct nubus_board* board,
- struct nubus_dir* dir);
+int nubus_get_board_dir(const struct nubus_board *board,
+ struct nubus_dir *dir);
/* The functional directory */
-int nubus_get_func_dir(const struct nubus_dev* dev,
- struct nubus_dir* dir);
+int nubus_get_func_dir(const struct nubus_rsrc *fres, struct nubus_dir *dir);
/* These work on any directory gotten via the above */
-int nubus_readdir(struct nubus_dir* dir,
- struct nubus_dirent* ent);
-int nubus_find_rsrc(struct nubus_dir* dir,
+int nubus_readdir(struct nubus_dir *dir,
+ struct nubus_dirent *ent);
+int nubus_find_rsrc(struct nubus_dir *dir,
unsigned char rsrc_type,
- struct nubus_dirent* ent);
-int nubus_rewinddir(struct nubus_dir* dir);
+ struct nubus_dirent *ent);
+int nubus_rewinddir(struct nubus_dir *dir);
/* Things to do with directory entries */
-int nubus_get_subdir(const struct nubus_dirent* ent,
- struct nubus_dir* dir);
-void nubus_get_rsrc_mem(void* dest,
- const struct nubus_dirent *dirent,
- int len);
-void nubus_get_rsrc_str(void* dest,
- const struct nubus_dirent *dirent,
- int maxlen);
+int nubus_get_subdir(const struct nubus_dirent *ent,
+ struct nubus_dir *dir);
+void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent,
+ unsigned int len);
+unsigned int nubus_get_rsrc_str(char *dest, const struct nubus_dirent *dirent,
+ unsigned int len);
+void nubus_seq_write_rsrc_mem(struct seq_file *m,
+ const struct nubus_dirent *dirent,
+ unsigned int len);
+unsigned char *nubus_dirptr(const struct nubus_dirent *nd);
+
+/* Declarations relating to driver model objects */
+int nubus_bus_register(void);
+int nubus_device_register(struct nubus_board *board);
+int nubus_driver_register(struct nubus_driver *ndrv);
+void nubus_driver_unregister(struct nubus_driver *ndrv);
+int nubus_proc_show(struct seq_file *m, void *data);
+
+static inline void nubus_set_drvdata(struct nubus_board *board, void *data)
+{
+ dev_set_drvdata(&board->dev, data);
+}
+
+static inline void *nubus_get_drvdata(struct nubus_board *board)
+{
+ return dev_get_drvdata(&board->dev);
+}
+
+/* Returns a pointer to the "standard" slot space. */
+static inline void *nubus_slot_addr(int slot)
+{
+ return (void *)(0xF0000000 | (slot << 24));
+}
+
#endif /* LINUX_NUBUS_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index aea87f0d917b..4112e2bd747f 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -124,14 +124,20 @@ enum {
#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
-#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
-#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
-
-#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
-#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
-#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
-#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
-#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
+
+enum {
+ NVME_CMBSZ_SQS = 1 << 0,
+ NVME_CMBSZ_CQS = 1 << 1,
+ NVME_CMBSZ_LISTS = 1 << 2,
+ NVME_CMBSZ_RDS = 1 << 3,
+ NVME_CMBSZ_WDS = 1 << 4,
+
+ NVME_CMBSZ_SZ_SHIFT = 12,
+ NVME_CMBSZ_SZ_MASK = 0xfffff,
+
+ NVME_CMBSZ_SZU_SHIFT = 8,
+ NVME_CMBSZ_SZU_MASK = 0xf,
+};
/*
* Submission and Completion Queue Entry Sizes for the NVM command set.
diff --git a/include/linux/of.h b/include/linux/of.h
index d3dea1d1e3a9..173102dafb07 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -544,6 +544,8 @@ const char *of_prop_next_string(struct property *prop, const char *cur);
bool of_console_check(struct device_node *dn, char *name, int index);
+extern int of_cpu_node_to_id(struct device_node *np);
+
#else /* CONFIG_OF */
static inline void of_core_init(void)
@@ -916,6 +918,11 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
{
}
+static inline int of_cpu_node_to_id(struct device_node *np)
+{
+ return -ENODEV;
+}
+
#define of_match_ptr(_ptr) NULL
#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 1fe205582111..18a7f03e1182 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -31,7 +31,7 @@ enum of_gpio_flags {
OF_GPIO_ACTIVE_LOW = 0x1,
OF_GPIO_SINGLE_ENDED = 0x2,
OF_GPIO_OPEN_DRAIN = 0x4,
- OF_GPIO_SLEEP_MAY_LOSE_VALUE = 0x8,
+ OF_GPIO_TRANSITORY = 0x8,
};
#ifdef CONFIG_OF_GPIO
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index edfa280c3d56..053feb41510a 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -25,15 +25,43 @@ struct gpmc_nand_ops {
struct gpmc_nand_regs;
+struct gpmc_onenand_info {
+ bool sync_read;
+ bool sync_write;
+ int burst_len;
+};
+
#if IS_ENABLED(CONFIG_OMAP_GPMC)
struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
int cs);
+/**
+ * gpmc_omap_onenand_set_timings - set optimized sync timings.
+ * @cs: Chip Select Region
+ * @freq: Chip frequency
+ * @latency: Burst latency cycle count
+ * @info: Structure describing parameters used
+ *
+ * Sets optimized timings for the @cs region based on @freq and @latency.
+ * Updates the @info structure based on the GPMC settings.
+ */
+int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
+ int latency,
+ struct gpmc_onenand_info *info);
+
#else
static inline struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
int cs)
{
return NULL;
}
+
+static inline
+int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
+ int latency,
+ struct gpmc_onenand_info *info)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_OMAP_GPMC */
extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 6658d9ee5257..864d167a1073 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -139,12 +139,12 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
* when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
* between contaminating the pointer value, meaning that
* READ_ONCE() is required when fetching it.
+ *
+ * The smp_read_barrier_depends() implied by READ_ONCE() pairs
+ * with smp_store_release() in __percpu_ref_switch_to_percpu().
*/
percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
- /* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */
- smp_read_barrier_depends();
-
/*
* Theoretically, the following could test just ATOMIC; however,
* then we'd have to mask off DEAD separately as DEAD may be
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 73a7bf30fe9a..4f052496cdfd 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -86,7 +86,7 @@ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
return 0;
}
-static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
{
return (fbc->counters != NULL);
}
@@ -167,9 +167,9 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
return percpu_counter_read(fbc);
}
-static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
{
- return 1;
+ return true;
}
#endif /* CONFIG_SMP */
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index ec6dadcc1fde..6c0680641108 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -94,6 +94,7 @@
* or latch delay (on outputs) this parameter (in a custom format)
* specifies the clock skew or latch delay. It typically controls how
* many double inverters are put in front of the line.
+ * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset
* @PIN_CONFIG_END: this is the last enumerator for pin configurations, if
* you need to pass in custom configurations to the pin controller, use
* PIN_CONFIG_END+1 as the base offset.
@@ -122,6 +123,7 @@ enum pin_config_param {
PIN_CONFIG_SLEEP_HARDWARE_STATE,
PIN_CONFIG_SLEW_RATE,
PIN_CONFIG_SKEW_DELAY,
+ PIN_CONFIG_PERSIST_STATE,
PIN_CONFIG_END = 0x7F,
PIN_CONFIG_MAX = 0xFF,
};
diff --git a/include/linux/platform_data/mtd-onenand-omap2.h b/include/linux/platform_data/mtd-onenand-omap2.h
deleted file mode 100644
index 56ff0e6f5ad1..000000000000
--- a/include/linux/platform_data/mtd-onenand-omap2.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2006 Nokia Corporation
- * Author: Juha Yrjola
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MTD_ONENAND_OMAP2_H
-#define __MTD_ONENAND_OMAP2_H
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-
-#define ONENAND_SYNC_READ (1 << 0)
-#define ONENAND_SYNC_READWRITE (1 << 1)
-#define ONENAND_IN_OMAP34XX (1 << 2)
-
-struct omap_onenand_platform_data {
- int cs;
- int gpio_irq;
- struct mtd_partition *parts;
- int nr_parts;
- int (*onenand_setup)(void __iomem *, int *freq_ptr);
- int dma_channel;
- u8 flags;
- u8 regulator_can_sleep;
- u8 skip_initial_unlocking;
-
- /* for passing the partitions */
- struct device_node *of_node;
-};
-#endif
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h
index da79774078a7..773daf7915a3 100644
--- a/include/linux/platform_data/spi-s3c64xx.h
+++ b/include/linux/platform_data/spi-s3c64xx.h
@@ -1,10 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
/*
* Copyright (C) 2009 Samsung Electronics Ltd.
* Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __SPI_S3C64XX_H
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 492ed473ba7e..e723b78d8357 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -556,9 +556,10 @@ struct pm_subsys_data {
* These flags can be set by device drivers at the probe time. They need not be
* cleared by the drivers as the driver core will take care of that.
*
- * NEVER_SKIP: Do not skip system suspend/resume callbacks for the device.
+ * NEVER_SKIP: Do not skip all system suspend/resume callbacks for the device.
* SMART_PREPARE: Check the return value of the driver's ->prepare callback.
* SMART_SUSPEND: No need to resume the device from runtime suspend.
+ * LEAVE_SUSPENDED: Avoid resuming the device during system resume if possible.
*
* Setting SMART_PREPARE instructs bus types and PM domains which may want
* system suspend/resume callbacks to be skipped for the device to return 0 from
@@ -572,10 +573,14 @@ struct pm_subsys_data {
* necessary from the driver's perspective. It also may cause them to skip
* invocations of the ->suspend_late and ->suspend_noirq callbacks provided by
* the driver if they decide to leave the device in runtime suspend.
+ *
+ * Setting LEAVE_SUSPENDED informs the PM core and middle-layer code that the
+ * driver prefers the device to be left in suspend after system resume.
*/
-#define DPM_FLAG_NEVER_SKIP BIT(0)
-#define DPM_FLAG_SMART_PREPARE BIT(1)
-#define DPM_FLAG_SMART_SUSPEND BIT(2)
+#define DPM_FLAG_NEVER_SKIP BIT(0)
+#define DPM_FLAG_SMART_PREPARE BIT(1)
+#define DPM_FLAG_SMART_SUSPEND BIT(2)
+#define DPM_FLAG_LEAVE_SUSPENDED BIT(3)
struct dev_pm_info {
pm_message_t power_state;
@@ -597,6 +602,8 @@ struct dev_pm_info {
bool wakeup_path:1;
bool syscore:1;
bool no_pm_callbacks:1; /* Owned by the PM core */
+ unsigned int must_resume:1; /* Owned by the PM core */
+ unsigned int may_skip_resume:1; /* Set by subsystems */
#else
unsigned int should_wakeup:1;
#endif
@@ -766,6 +773,7 @@ extern int pm_generic_poweroff(struct device *dev);
extern void pm_generic_complete(struct device *dev);
extern void dev_pm_skip_next_resume_phases(struct device *dev);
+extern bool dev_pm_may_skip_resume(struct device *dev);
extern bool dev_pm_smart_suspend_and_suspended(struct device *dev);
#else /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 4c2cba7ec1d4..4238dde0aaf0 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -88,6 +88,11 @@ static inline bool device_may_wakeup(struct device *dev)
return dev->power.can_wakeup && !!dev->power.wakeup;
}
+static inline void device_set_wakeup_path(struct device *dev)
+{
+ dev->power.wakeup_path = true;
+}
+
/* drivers/base/power/wakeup.c */
extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name);
extern struct wakeup_source *wakeup_source_create(const char *name);
@@ -174,6 +179,8 @@ static inline bool device_may_wakeup(struct device *dev)
return dev->power.can_wakeup && dev->power.should_wakeup;
}
+static inline void device_set_wakeup_path(struct device *dev) {}
+
static inline void __pm_stay_awake(struct wakeup_source *ws) {}
static inline void pm_stay_awake(struct device *dev) {}
diff --git a/include/linux/poll.h b/include/linux/poll.h
index d384f12abdd5..04781a753326 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -37,7 +37,7 @@ typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_
*/
typedef struct poll_table_struct {
poll_queue_proc _qproc;
- unsigned long _key;
+ __poll_t _key;
} poll_table;
static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
@@ -62,20 +62,20 @@ static inline bool poll_does_not_wait(const poll_table *p)
* to be started implicitly on poll(). You typically only want to do that
* if the application is actually polling for POLLIN and/or POLLOUT.
*/
-static inline unsigned long poll_requested_events(const poll_table *p)
+static inline __poll_t poll_requested_events(const poll_table *p)
{
- return p ? p->_key : ~0UL;
+ return p ? p->_key : ~(__poll_t)0;
}
static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
{
pt->_qproc = qproc;
- pt->_key = ~0UL; /* all events enabled */
+ pt->_key = ~(__poll_t)0; /* all events enabled */
}
struct poll_table_entry {
struct file *filp;
- unsigned long key;
+ __poll_t key;
wait_queue_entry_t wait;
wait_queue_head_t *wait_address;
};
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 38d8225510f1..3a3bc71017d5 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -68,7 +68,7 @@ struct posix_clock_operations {
int (*open) (struct posix_clock *pc, fmode_t f_mode);
- uint (*poll) (struct posix_clock *pc,
+ __poll_t (*poll) (struct posix_clock *pc,
struct file *file, poll_table *wait);
int (*release) (struct posix_clock *pc);
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 672c4f32311e..c85704fcdbd2 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -42,13 +42,26 @@ struct cpu_timer_list {
#define CLOCKFD CPUCLOCK_MAX
#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
-#define MAKE_PROCESS_CPUCLOCK(pid, clock) \
- ((~(clockid_t) (pid) << 3) | (clockid_t) (clock))
-#define MAKE_THREAD_CPUCLOCK(tid, clock) \
- MAKE_PROCESS_CPUCLOCK((tid), (clock) | CPUCLOCK_PERTHREAD_MASK)
+static inline clockid_t make_process_cpuclock(const unsigned int pid,
+ const clockid_t clock)
+{
+ return ((~pid) << 3) | clock;
+}
+static inline clockid_t make_thread_cpuclock(const unsigned int tid,
+ const clockid_t clock)
+{
+ return make_process_cpuclock(tid, clock | CPUCLOCK_PERTHREAD_MASK);
+}
-#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD)
-#define CLOCKID_TO_FD(clk) ((unsigned int) ~((clk) >> 3))
+static inline clockid_t fd_to_clockid(const int fd)
+{
+ return make_process_cpuclock((unsigned int) fd, CLOCKFD);
+}
+
+static inline int clockid_to_fd(const clockid_t clk)
+{
+ return ~(clk >> 3);
+}
#define REQUEUE_PENDING 1
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index b2b7255ec7f5..540595a321a7 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -12,6 +12,7 @@
#include <linux/bug.h>
#include <linux/slab.h>
#include <linux/rcupdate.h>
+#include <linux/refcount.h>
#include <uapi/linux/posix_acl.h>
struct posix_acl_entry {
@@ -24,7 +25,7 @@ struct posix_acl_entry {
};
struct posix_acl {
- atomic_t a_refcount;
+ refcount_t a_refcount;
struct rcu_head a_rcu;
unsigned int a_count;
struct posix_acl_entry a_entries[0];
@@ -41,7 +42,7 @@ static inline struct posix_acl *
posix_acl_dup(struct posix_acl *acl)
{
if (acl)
- atomic_inc(&acl->a_refcount);
+ refcount_inc(&acl->a_refcount);
return acl;
}
@@ -51,7 +52,7 @@ posix_acl_dup(struct posix_acl *acl)
static inline void
posix_acl_release(struct posix_acl *acl)
{
- if (acl && atomic_dec_and_test(&acl->a_refcount))
+ if (acl && refcount_dec_and_test(&acl->a_refcount))
kfree_rcu(acl, a_rcu);
}
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index e6187f524f2c..01fbf1b16258 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -16,6 +16,7 @@ enum bq27xxx_chip {
BQ27520G2, /* bq27520G2 */
BQ27520G3, /* bq27520G3 */
BQ27520G4, /* bq27520G4 */
+ BQ27521, /* bq27521 */
BQ27530, /* bq27530, bq27531 */
BQ27531,
BQ27541, /* bq27541, bq27542, bq27546, bq27742 */
diff --git a/include/linux/property.h b/include/linux/property.h
index 5b0563ad79a5..1106bc62dd8c 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -283,6 +283,8 @@ bool device_dma_supported(struct device *dev);
enum dev_dma_attr device_get_dma_attr(struct device *dev);
+void *device_get_match_data(struct device *dev);
+
int device_get_phy_mode(struct device *dev);
void *device_get_mac_address(struct device *dev, char *addr, int alen);
diff --git a/include/linux/psci.h b/include/linux/psci.h
index bdea1cb5e1db..f724fd8c78e8 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -26,6 +26,7 @@ int psci_cpu_init_idle(unsigned int cpu);
int psci_cpu_suspend_enter(unsigned long index);
struct psci_operations {
+ u32 (*get_version)(void);
int (*cpu_suspend)(u32 state, unsigned long entry_point);
int (*cpu_off)(u32 state);
int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
@@ -46,10 +47,11 @@ static inline int psci_dt_init(void) { return 0; }
#if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI)
int __init psci_acpi_init(void);
bool __init acpi_psci_present(void);
-bool __init acpi_psci_use_hvc(void);
+bool acpi_psci_use_hvc(void);
#else
static inline int psci_acpi_init(void) { return 0; }
static inline bool acpi_psci_present(void) { return false; }
+static inline bool acpi_psci_use_hvc(void) {return false; }
#endif
#endif /* __LINUX_PSCI_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index a6ddc42f87a5..043d04784675 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -197,7 +197,7 @@ static inline void exit_tasks_rcu_finish(void) { }
#define cond_resched_rcu_qs() \
do { \
if (!cond_resched()) \
- rcu_note_voluntary_context_switch(current); \
+ rcu_note_voluntary_context_switch_lite(current); \
} while (0)
/*
@@ -433,12 +433,12 @@ static inline void rcu_preempt_sleep_check(void) { }
* @p: The pointer to read
*
* Return the value of the specified RCU-protected pointer, but omit the
- * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful
- * when the value of this pointer is accessed, but the pointer is not
- * dereferenced, for example, when testing an RCU-protected pointer against
- * NULL. Although rcu_access_pointer() may also be used in cases where
- * update-side locks prevent the value of the pointer from changing, you
- * should instead use rcu_dereference_protected() for this use case.
+ * lockdep checks for being in an RCU read-side critical section. This is
+ * useful when the value of this pointer is accessed, but the pointer is
+ * not dereferenced, for example, when testing an RCU-protected pointer
+ * against NULL. Although rcu_access_pointer() may also be used in cases
+ * where update-side locks prevent the value of the pointer from changing,
+ * you should instead use rcu_dereference_protected() for this use case.
*
* It is also permissible to use rcu_access_pointer() when read-side
* access to the pointer was removed at least one grace period ago, as
@@ -521,12 +521,11 @@ static inline void rcu_preempt_sleep_check(void) { }
* @c: The conditions under which the dereference will take place
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the READ_ONCE(). This
- * is useful in cases where update-side locks prevent the value of the
- * pointer from changing. Please note that this primitive does *not*
- * prevent the compiler from repeating this reference or combining it
- * with other references, so it should not be used without protection
- * of appropriate locks.
+ * the READ_ONCE(). This is useful in cases where update-side locks
+ * prevent the value of the pointer from changing. Please note that this
+ * primitive does *not* prevent the compiler from repeating this reference
+ * or combining it with other references, so it should not be used without
+ * protection of appropriate locks.
*
* This function is only for update-side use. Using this function
* when protected only by rcu_read_lock() will result in infrequent
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index b3dbf9502fd0..ce9beec35e34 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -111,7 +111,6 @@ static inline void rcu_cpu_stall_reset(void) { }
static inline void rcu_idle_enter(void) { }
static inline void rcu_idle_exit(void) { }
static inline void rcu_irq_enter(void) { }
-static inline bool rcu_irq_enter_disabled(void) { return false; }
static inline void rcu_irq_exit_irqson(void) { }
static inline void rcu_irq_enter_irqson(void) { }
static inline void rcu_irq_exit(void) { }
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 37d6fd3b7ff8..fd996cdf1833 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -85,7 +85,6 @@ void rcu_irq_enter(void);
void rcu_irq_exit(void);
void rcu_irq_enter_irqson(void);
void rcu_irq_exit_irqson(void);
-bool rcu_irq_enter_disabled(void);
void exit_rcu(void);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 15eddc1353ba..20268b7d5001 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -30,6 +30,7 @@ struct regmap;
struct regmap_range_cfg;
struct regmap_field;
struct snd_ac97;
+struct sdw_slave;
/* An enum of all the supported cache types */
enum regcache_type {
@@ -264,6 +265,9 @@ typedef void (*regmap_unlock)(void *);
* field is NULL but precious_table (see below) is not, the
* check is performed on such table (a register is precious if
* it belongs to one of the ranges specified by precious_table).
+ * @disable_locking: This regmap is either protected by external means or
+ * is guaranteed not be be accessed from multiple threads.
+ * Don't use any locking mechanisms.
* @lock: Optional lock callback (overrides regmap's default lock
* function, based on spinlock or mutex).
* @unlock: As above for unlocking.
@@ -296,7 +300,10 @@ typedef void (*regmap_unlock)(void *);
* a read.
* @write_flag_mask: Mask to be set in the top bytes of the register when doing
* a write. If both read_flag_mask and write_flag_mask are
- * empty the regmap_bus default masks are used.
+ * empty and zero_flag_mask is not set the regmap_bus default
+ * masks are used.
+ * @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even
+ * if they are both empty.
* @use_single_rw: If set, converts the bulk read and write operations into
* a series of single read and write operations. This is useful
* for device that does not support bulk read and write.
@@ -317,6 +324,7 @@ typedef void (*regmap_unlock)(void *);
*
* @ranges: Array of configuration entries for virtual address ranges.
* @num_ranges: Number of range configuration entries.
+ * @use_hwlock: Indicate if a hardware spinlock should be used.
* @hwlock_id: Specify the hardware spinlock id.
* @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE,
* HWLOCK_IRQ or 0.
@@ -333,6 +341,8 @@ struct regmap_config {
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
bool (*precious_reg)(struct device *dev, unsigned int reg);
+
+ bool disable_locking;
regmap_lock lock;
regmap_unlock unlock;
void *lock_arg;
@@ -355,6 +365,7 @@ struct regmap_config {
unsigned long read_flag_mask;
unsigned long write_flag_mask;
+ bool zero_flag_mask;
bool use_single_rw;
bool can_multi_write;
@@ -365,6 +376,7 @@ struct regmap_config {
const struct regmap_range_cfg *ranges;
unsigned int num_ranges;
+ bool use_hwlock;
unsigned int hwlock_id;
unsigned int hwlock_mode;
};
@@ -524,6 +536,10 @@ struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init(struct device *dev,
const struct regmap_bus *bus,
@@ -561,6 +577,10 @@ struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
/*
* Wrapper for regmap_init macros to include a unique lockdep key and name
@@ -710,6 +730,20 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
/**
+ * regmap_init_sdw() - Initialise register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_sdw(sdw, config) \
+ __regmap_lockdep_wrapper(__regmap_init_sdw, #config, \
+ sdw, config)
+
+
+/**
* devm_regmap_init() - Initialise managed register map
*
* @dev: Device that will be interacted with
@@ -839,6 +873,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
__regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \
ac97, config)
+/**
+ * devm_regmap_init_sdw() - Initialise managed register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sdw(sdw, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sdw, #config, \
+ sdw, config)
+
void regmap_exit(struct regmap *map);
int regmap_reinit_cache(struct regmap *map,
const struct regmap_config *config);
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 94417b4226bd..4c00486b7a78 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -214,6 +214,8 @@ struct regulator_ops {
/* set regulator suspend operating mode (defined in consumer.h) */
int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode);
+ int (*resume_early)(struct regulator_dev *rdev);
+
int (*set_pull_down) (struct regulator_dev *);
};
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 9cd4fef37203..93a04893c739 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -42,6 +42,16 @@ struct regulator;
#define REGULATOR_CHANGE_DRMS 0x10
#define REGULATOR_CHANGE_BYPASS 0x20
+/*
+ * operations in suspend mode
+ * DO_NOTHING_IN_SUSPEND - the default value
+ * DISABLE_IN_SUSPEND - turn off regulator in suspend states
+ * ENABLE_IN_SUSPEND - keep regulator on in suspend states
+ */
+#define DO_NOTHING_IN_SUSPEND (-1)
+#define DISABLE_IN_SUSPEND 0
+#define ENABLE_IN_SUSPEND 1
+
/* Regulator active discharge flags */
enum regulator_active_discharge {
REGULATOR_ACTIVE_DISCHARGE_DEFAULT,
@@ -56,16 +66,24 @@ enum regulator_active_discharge {
* state. One of enabled or disabled must be set for the
* configuration to be applied.
*
- * @uV: Operating voltage during suspend.
+ * @uV: Default operating voltage during suspend, it can be adjusted
+ * among <min_uV, max_uV>.
+ * @min_uV: Minimum suspend voltage may be set.
+ * @max_uV: Maximum suspend voltage may be set.
* @mode: Operating mode during suspend.
- * @enabled: Enabled during suspend.
- * @disabled: Disabled during suspend.
+ * @enabled: operations during suspend.
+ * - DO_NOTHING_IN_SUSPEND
+ * - DISABLE_IN_SUSPEND
+ * - ENABLE_IN_SUSPEND
+ * @changeable: Is this state can be switched between enabled/disabled,
*/
struct regulator_state {
- int uV; /* suspend voltage */
- unsigned int mode; /* suspend regulator operating mode */
- int enabled; /* is regulator enabled in this suspend state */
- int disabled; /* is the regulator disabled in this suspend state */
+ int uV;
+ int min_uV;
+ int max_uV;
+ unsigned int mode;
+ int enabled;
+ bool changeable;
};
/**
@@ -225,12 +243,12 @@ struct regulator_init_data {
#ifdef CONFIG_REGULATOR
void regulator_has_full_constraints(void);
-int regulator_suspend_prepare(suspend_state_t state);
-int regulator_suspend_finish(void);
#else
static inline void regulator_has_full_constraints(void)
{
}
+#endif
+
static inline int regulator_suspend_prepare(suspend_state_t state)
{
return 0;
@@ -239,6 +257,5 @@ static inline int regulator_suspend_finish(void)
{
return 0;
}
-#endif
#endif
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 289e4d54e3e0..7d9eb39fa76a 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -96,7 +96,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
})
int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
-int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table);
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 10d6ae8bbb7d..ca07366c4c33 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -157,7 +157,7 @@ int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
void *data, int len);
-unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
+__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
#else
@@ -258,7 +258,7 @@ static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
return -ENXIO;
}
-static inline unsigned int rpmsg_poll(struct rpmsg_endpoint *ept,
+static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept,
struct file *filp, poll_table *wait)
{
/* This shouldn't be possible */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 0514cc36ac34..1fdcde96eb65 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -71,8 +71,7 @@ static inline bool lockdep_rtnl_is_held(void)
* @p: The pointer to read, prior to dereferencing
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the READ_ONCE(), because
- * caller holds RTNL.
+ * the READ_ONCE(), because caller holds RTNL.
*/
#define rtnl_dereference(p) \
rcu_dereference_protected(p, lockdep_rtnl_is_held())
diff --git a/include/linux/mfd/rtsx_common.h b/include/linux/rtsx_common.h
index 443176ee1ab0..443176ee1ab0 100644
--- a/include/linux/mfd/rtsx_common.h
+++ b/include/linux/rtsx_common.h
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/rtsx_pci.h
index c3d3f04d8cc6..478acf6efac6 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -24,7 +24,7 @@
#include <linux/sched.h>
#include <linux/pci.h>
-#include <linux/mfd/rtsx_common.h>
+#include <linux/rtsx_common.h>
#define MAX_RW_REG_CNT 1024
@@ -203,6 +203,7 @@
#define SD_DDR_MODE 0x04
#define SD_30_MODE 0x08
#define SD_CLK_DIVIDE_MASK 0xC0
+#define SD_MODE_SELECT_MASK 0x0C
#define SD_CFG2 0xFDA1
#define SD_CALCULATE_CRC7 0x00
#define SD_NO_CALCULATE_CRC7 0x80
@@ -226,6 +227,7 @@
#define SD_RSP_TYPE_R6 0x01
#define SD_RSP_TYPE_R7 0x01
#define SD_CFG3 0xFDA2
+#define SD30_CLK_END_EN 0x10
#define SD_RSP_80CLK_TIMEOUT_EN 0x01
#define SD_STAT1 0xFDA3
@@ -309,6 +311,12 @@
#define SD_DATA_STATE 0xFDB6
#define SD_DATA_IDLE 0x80
+#define REG_SD_STOP_SDCLK_CFG 0xFDB8
+#define SD30_CLK_STOP_CFG_EN 0x04
+#define SD30_CLK_STOP_CFG1 0x02
+#define SD30_CLK_STOP_CFG0 0x01
+#define REG_PRE_RW_MODE 0xFD70
+#define EN_INFINITE_MODE 0x01
#define SRCTL 0xFC13
@@ -434,6 +442,7 @@
#define CARD_CLK_EN 0xFD69
#define SD_CLK_EN 0x04
#define MS_CLK_EN 0x08
+#define SD40_CLK_EN 0x10
#define SDIO_CTRL 0xFD6B
#define CD_PAD_CTL 0xFD73
#define CD_DISABLE_MASK 0x07
@@ -453,8 +462,8 @@
#define FPDCTL 0xFC00
#define SSC_POWER_DOWN 0x01
#define SD_OC_POWER_DOWN 0x02
-#define ALL_POWER_DOWN 0x07
-#define OC_POWER_DOWN 0x06
+#define ALL_POWER_DOWN 0x03
+#define OC_POWER_DOWN 0x02
#define PDINFO 0xFC01
#define CLK_CTL 0xFC02
@@ -490,6 +499,9 @@
#define FPGA_PULL_CTL 0xFC1D
#define OLT_LED_CTL 0xFC1E
+#define LED_SHINE_MASK 0x08
+#define LED_SHINE_EN 0x08
+#define LED_SHINE_DISABLE 0x00
#define GPIO_CTL 0xFC1F
#define LDO_CTL 0xFC1E
@@ -511,7 +523,11 @@
#define BPP_LDO_ON 0x00
#define BPP_LDO_SUSPEND 0x02
#define BPP_LDO_OFF 0x03
+#define EFUSE_CTL 0xFC30
+#define EFUSE_ADD 0xFC31
#define SYS_VER 0xFC32
+#define EFUSE_DATAL 0xFC34
+#define EFUSE_DATAH 0xFC35
#define CARD_PULL_CTL1 0xFD60
#define CARD_PULL_CTL2 0xFD61
@@ -553,6 +569,9 @@
#define RBBC1 0xFE2F
#define RBDAT 0xFE30
#define RBCTL 0xFE34
+#define U_AUTO_DMA_EN_MASK 0x20
+#define U_AUTO_DMA_DISABLE 0x00
+#define RB_FLUSH 0x80
#define CFGADDR0 0xFE35
#define CFGADDR1 0xFE36
#define CFGDATA0 0xFE37
@@ -581,6 +600,8 @@
#define LTR_LATENCY_MODE_HW 0
#define LTR_LATENCY_MODE_SW BIT(6)
#define OBFF_CFG 0xFE4C
+#define OBFF_EN_MASK 0x03
+#define OBFF_DISABLE 0x00
#define CDRESUMECTL 0xFE52
#define WAKE_SEL_CTL 0xFE54
@@ -595,6 +616,7 @@
#define FORCE_ASPM_L0_EN 0x01
#define FORCE_ASPM_NO_ASPM 0x00
#define PM_CLK_FORCE_CTL 0xFE58
+#define CLK_PM_EN 0x01
#define FUNC_FORCE_CTL 0xFE59
#define FUNC_FORCE_UPME_XMT_DBG 0x02
#define PERST_GLITCH_WIDTH 0xFE5C
@@ -620,14 +642,23 @@
#define LDO_PWR_SEL 0xFE78
#define L1SUB_CONFIG1 0xFE8D
+#define AUX_CLK_ACTIVE_SEL_MASK 0x01
+#define MAC_CKSW_DONE 0x00
#define L1SUB_CONFIG2 0xFE8E
#define L1SUB_AUTO_CFG 0x02
#define L1SUB_CONFIG3 0xFE8F
#define L1OFF_MBIAS2_EN_5250 BIT(7)
#define DUMMY_REG_RESET_0 0xFE90
+#define IC_VERSION_MASK 0x0F
+#define REG_VREF 0xFE97
+#define PWD_SUSPND_EN 0x10
+#define RTS5260_DMA_RST_CTL_0 0xFEBF
+#define RTS5260_DMA_RST 0x80
+#define RTS5260_ADMA3_RST 0x40
#define AUTOLOAD_CFG_BASE 0xFF00
+#define RELINK_TIME_MASK 0x01
#define PETXCFG 0xFF03
#define FORCE_CLKREQ_DELINK_MASK BIT(7)
#define FORCE_CLKREQ_LOW 0x80
@@ -667,15 +698,24 @@
#define LDO_DV18_CFG 0xFF70
#define LDO_DV18_SR_MASK 0xC0
#define LDO_DV18_SR_DF 0x40
+#define DV331812_MASK 0x70
+#define DV331812_33 0x70
+#define DV331812_17 0x30
#define LDO_CONFIG2 0xFF71
#define LDO_D3318_MASK 0x07
#define LDO_D3318_33V 0x07
#define LDO_D3318_18V 0x02
+#define DV331812_VDD1 0x04
+#define DV331812_POWERON 0x08
+#define DV331812_POWEROFF 0x00
#define LDO_VCC_CFG0 0xFF72
#define LDO_VCC_LMTVTH_MASK 0x30
#define LDO_VCC_LMTVTH_2A 0x10
+/*RTS5260*/
+#define RTS5260_DVCC_TUNE_MASK 0x70
+#define RTS5260_DVCC_33 0x70
#define LDO_VCC_CFG1 0xFF73
#define LDO_VCC_REF_TUNE_MASK 0x30
@@ -684,6 +724,10 @@
#define LDO_VCC_1V8 0x04
#define LDO_VCC_3V3 0x07
#define LDO_VCC_LMT_EN 0x08
+/*RTS5260*/
+#define LDO_POW_SDVDD1_MASK 0x08
+#define LDO_POW_SDVDD1_ON 0x08
+#define LDO_POW_SDVDD1_OFF 0x00
#define LDO_VIO_CFG 0xFF75
#define LDO_VIO_SR_MASK 0xC0
@@ -711,6 +755,160 @@
#define SD_VIO_LDO_1V8 0x40
#define SD_VIO_LDO_3V3 0x70
+#define RTS5260_AUTOLOAD_CFG4 0xFF7F
+#define RTS5260_MIMO_DISABLE 0x8A
+
+#define RTS5260_REG_GPIO_CTL0 0xFC1A
+#define RTS5260_REG_GPIO_MASK 0x01
+#define RTS5260_REG_GPIO_ON 0x01
+#define RTS5260_REG_GPIO_OFF 0x00
+
+#define PWR_GLOBAL_CTRL 0xF200
+#define PCIE_L1_2_EN 0x0C
+#define PCIE_L1_1_EN 0x0A
+#define PCIE_L1_0_EN 0x09
+#define PWR_FE_CTL 0xF201
+#define PCIE_L1_2_PD_FE_EN 0x0C
+#define PCIE_L1_1_PD_FE_EN 0x0A
+#define PCIE_L1_0_PD_FE_EN 0x09
+#define CFG_PCIE_APHY_OFF_0 0xF204
+#define CFG_PCIE_APHY_OFF_0_DEFAULT 0xBF
+#define CFG_PCIE_APHY_OFF_1 0xF205
+#define CFG_PCIE_APHY_OFF_1_DEFAULT 0xFF
+#define CFG_PCIE_APHY_OFF_2 0xF206
+#define CFG_PCIE_APHY_OFF_2_DEFAULT 0x01
+#define CFG_PCIE_APHY_OFF_3 0xF207
+#define CFG_PCIE_APHY_OFF_3_DEFAULT 0x00
+#define CFG_L1_0_PCIE_MAC_RET_VALUE 0xF20C
+#define CFG_L1_0_PCIE_DPHY_RET_VALUE 0xF20E
+#define CFG_L1_0_SYS_RET_VALUE 0xF210
+#define CFG_L1_0_CRC_MISC_RET_VALUE 0xF212
+#define CFG_L1_0_CRC_SD30_RET_VALUE 0xF214
+#define CFG_L1_0_CRC_SD40_RET_VALUE 0xF216
+#define CFG_LP_FPWM_VALUE 0xF219
+#define CFG_LP_FPWM_VALUE_DEFAULT 0x18
+#define PWC_CDR 0xF253
+#define PWC_CDR_DEFAULT 0x03
+#define CFG_L1_0_RET_VALUE_DEFAULT 0x1B
+#define CFG_L1_0_CRC_MISC_RET_VALUE_DEFAULT 0x0C
+
+/* OCPCTL */
+#define SD_DETECT_EN 0x08
+#define SD_OCP_INT_EN 0x04
+#define SD_OCP_INT_CLR 0x02
+#define SD_OC_CLR 0x01
+
+#define SDVIO_DETECT_EN (1 << 7)
+#define SDVIO_OCP_INT_EN (1 << 6)
+#define SDVIO_OCP_INT_CLR (1 << 5)
+#define SDVIO_OC_CLR (1 << 4)
+
+/* OCPSTAT */
+#define SD_OCP_DETECT 0x08
+#define SD_OC_NOW 0x04
+#define SD_OC_EVER 0x02
+
+#define SDVIO_OC_NOW (1 << 6)
+#define SDVIO_OC_EVER (1 << 5)
+
+#define REG_OCPCTL 0xFD6A
+#define REG_OCPSTAT 0xFD6E
+#define REG_OCPGLITCH 0xFD6C
+#define REG_OCPPARA1 0xFD6B
+#define REG_OCPPARA2 0xFD6D
+
+/* rts5260 DV3318 OCP-related registers */
+#define REG_DV3318_OCPCTL 0xFD89
+#define DV3318_OCP_TIME_MASK 0xF0
+#define DV3318_DETECT_EN 0x08
+#define DV3318_OCP_INT_EN 0x04
+#define DV3318_OCP_INT_CLR 0x02
+#define DV3318_OCP_CLR 0x01
+
+#define REG_DV3318_OCPSTAT 0xFD8A
+#define DV3318_OCP_GlITCH_TIME_MASK 0xF0
+#define DV3318_OCP_DETECT 0x08
+#define DV3318_OCP_NOW 0x04
+#define DV3318_OCP_EVER 0x02
+
+#define SD_OCP_GLITCH_MASK 0x0F
+
+/* OCPPARA1 */
+#define SDVIO_OCP_TIME_60 0x00
+#define SDVIO_OCP_TIME_100 0x10
+#define SDVIO_OCP_TIME_200 0x20
+#define SDVIO_OCP_TIME_400 0x30
+#define SDVIO_OCP_TIME_600 0x40
+#define SDVIO_OCP_TIME_800 0x50
+#define SDVIO_OCP_TIME_1100 0x60
+#define SDVIO_OCP_TIME_MASK 0x70
+
+#define SD_OCP_TIME_60 0x00
+#define SD_OCP_TIME_100 0x01
+#define SD_OCP_TIME_200 0x02
+#define SD_OCP_TIME_400 0x03
+#define SD_OCP_TIME_600 0x04
+#define SD_OCP_TIME_800 0x05
+#define SD_OCP_TIME_1100 0x06
+#define SD_OCP_TIME_MASK 0x07
+
+/* OCPPARA2 */
+#define SDVIO_OCP_THD_190 0x00
+#define SDVIO_OCP_THD_250 0x10
+#define SDVIO_OCP_THD_320 0x20
+#define SDVIO_OCP_THD_380 0x30
+#define SDVIO_OCP_THD_440 0x40
+#define SDVIO_OCP_THD_500 0x50
+#define SDVIO_OCP_THD_570 0x60
+#define SDVIO_OCP_THD_630 0x70
+#define SDVIO_OCP_THD_MASK 0x70
+
+#define SD_OCP_THD_450 0x00
+#define SD_OCP_THD_550 0x01
+#define SD_OCP_THD_650 0x02
+#define SD_OCP_THD_750 0x03
+#define SD_OCP_THD_850 0x04
+#define SD_OCP_THD_950 0x05
+#define SD_OCP_THD_1050 0x06
+#define SD_OCP_THD_1150 0x07
+#define SD_OCP_THD_MASK 0x07
+
+#define SDVIO_OCP_GLITCH_MASK 0xF0
+#define SDVIO_OCP_GLITCH_NONE 0x00
+#define SDVIO_OCP_GLITCH_50U 0x10
+#define SDVIO_OCP_GLITCH_100U 0x20
+#define SDVIO_OCP_GLITCH_200U 0x30
+#define SDVIO_OCP_GLITCH_600U 0x40
+#define SDVIO_OCP_GLITCH_800U 0x50
+#define SDVIO_OCP_GLITCH_1M 0x60
+#define SDVIO_OCP_GLITCH_2M 0x70
+#define SDVIO_OCP_GLITCH_3M 0x80
+#define SDVIO_OCP_GLITCH_4M 0x90
+#define SDVIO_OCP_GLIVCH_5M 0xA0
+#define SDVIO_OCP_GLITCH_6M 0xB0
+#define SDVIO_OCP_GLITCH_7M 0xC0
+#define SDVIO_OCP_GLITCH_8M 0xD0
+#define SDVIO_OCP_GLITCH_9M 0xE0
+#define SDVIO_OCP_GLITCH_10M 0xF0
+
+#define SD_OCP_GLITCH_MASK 0x0F
+#define SD_OCP_GLITCH_NONE 0x00
+#define SD_OCP_GLITCH_50U 0x01
+#define SD_OCP_GLITCH_100U 0x02
+#define SD_OCP_GLITCH_200U 0x03
+#define SD_OCP_GLITCH_600U 0x04
+#define SD_OCP_GLITCH_800U 0x05
+#define SD_OCP_GLITCH_1M 0x06
+#define SD_OCP_GLITCH_2M 0x07
+#define SD_OCP_GLITCH_3M 0x08
+#define SD_OCP_GLITCH_4M 0x09
+#define SD_OCP_GLIVCH_5M 0x0A
+#define SD_OCP_GLITCH_6M 0x0B
+#define SD_OCP_GLITCH_7M 0x0C
+#define SD_OCP_GLITCH_8M 0x0D
+#define SD_OCP_GLITCH_9M 0x0E
+#define SD_OCP_GLITCH_10M 0x0F
+
/* Phy register */
#define PHY_PCR 0x00
#define PHY_PCR_FORCE_CODE 0xB000
@@ -857,6 +1055,7 @@
#define PCR_ASPM_SETTING_REG1 0x160
#define PCR_ASPM_SETTING_REG2 0x168
+#define PCR_ASPM_SETTING_5260 0x178
#define PCR_SETTING_REG1 0x724
#define PCR_SETTING_REG2 0x814
@@ -890,6 +1089,7 @@ struct pcr_ops {
int (*conv_clk_and_div_n)(int clk, int dir);
void (*fetch_vendor_settings)(struct rtsx_pcr *pcr);
void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state);
+ void (*stop_cmd)(struct rtsx_pcr *pcr);
void (*set_aspm)(struct rtsx_pcr *pcr, bool enable);
int (*set_ltr_latency)(struct rtsx_pcr *pcr, u32 latency);
@@ -897,6 +1097,12 @@ struct pcr_ops {
void (*set_l1off_cfg_sub_d0)(struct rtsx_pcr *pcr, int active);
void (*full_on)(struct rtsx_pcr *pcr);
void (*power_saving)(struct rtsx_pcr *pcr);
+ void (*enable_ocp)(struct rtsx_pcr *pcr);
+ void (*disable_ocp)(struct rtsx_pcr *pcr);
+ void (*init_ocp)(struct rtsx_pcr *pcr);
+ void (*process_ocp)(struct rtsx_pcr *pcr);
+ int (*get_ocpstat)(struct rtsx_pcr *pcr, u8 *val);
+ void (*clear_ocpstat)(struct rtsx_pcr *pcr);
};
enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
@@ -935,6 +1141,9 @@ enum dev_aspm_mode {
* @l1_snooze_delay: l1 snooze delay
* @ltr_l1off_sspwrgate: ltr l1off sspwrgate
* @ltr_l1off_snooze_sspwrgate: ltr l1off snooze sspwrgate
+ * @ocp_en: enable ocp flag
+ * @sd_400mA_ocp_thd: 400mA ocp thd
+ * @sd_800mA_ocp_thd: 800mA ocp thd
*/
struct rtsx_cr_option {
u32 dev_flags;
@@ -949,6 +1158,19 @@ struct rtsx_cr_option {
u32 l1_snooze_delay;
u8 ltr_l1off_sspwrgate;
u8 ltr_l1off_snooze_sspwrgate;
+ bool ocp_en;
+ u8 sd_400mA_ocp_thd;
+ u8 sd_800mA_ocp_thd;
+};
+
+/*
+ * struct rtsx_hw_param - card reader hardware param
+ * @interrupt_en: indicate which interrutp enable
+ * @ocp_glitch: ocp glitch time
+ */
+struct rtsx_hw_param {
+ u32 interrupt_en;
+ u8 ocp_glitch;
};
#define rtsx_set_dev_flag(cr, flag) \
@@ -963,6 +1185,7 @@ struct rtsx_pcr {
unsigned int id;
int pcie_cap;
struct rtsx_cr_option option;
+ struct rtsx_hw_param hw_param;
/* pci resources */
unsigned long addr;
@@ -1042,12 +1265,15 @@ struct rtsx_pcr {
struct rtsx_slot *slots;
u8 dma_error_count;
+ u8 ocp_stat;
+ u8 ocp_stat2;
};
#define PID_524A 0x524A
-#define PID_5249 0x5249
-#define PID_5250 0x5250
+#define PID_5249 0x5249
+#define PID_5250 0x5250
#define PID_525A 0x525A
+#define PID_5260 0x5260
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
#define PCI_VID(pcr) ((pcr)->pci->vendor)
diff --git a/include/linux/mfd/rtsx_usb.h b/include/linux/rtsx_usb.h
index c446e4fd6b5c..c446e4fd6b5c 100644
--- a/include/linux/mfd/rtsx_usb.h
+++ b/include/linux/rtsx_usb.h
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index b7c83254c566..22b2131bcdcd 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -276,6 +276,17 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
unsigned int n_pages, unsigned int offset,
unsigned long size, gfp_t gfp_mask);
+#ifdef CONFIG_SGL_ALLOC
+struct scatterlist *sgl_alloc_order(unsigned long long length,
+ unsigned int order, bool chainable,
+ gfp_t gfp, unsigned int *nent_p);
+struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
+ unsigned int *nent_p);
+void sgl_free_n_order(struct scatterlist *sgl, int nents, int order);
+void sgl_free_order(struct scatterlist *sgl, int order);
+void sgl_free(struct scatterlist *sgl);
+#endif /* CONFIG_SGL_ALLOC */
+
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
size_t buflen, off_t skip, bool to_buffer);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d2588263a989..166144c04ef6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -472,11 +472,15 @@ struct sched_dl_entity {
* has not been executed yet. This flag is useful to avoid race
* conditions between the inactive timer handler and the wakeup
* code.
+ *
+ * @dl_overrun tells if the task asked to be informed about runtime
+ * overruns.
*/
unsigned int dl_throttled : 1;
unsigned int dl_boosted : 1;
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
+ unsigned int dl_overrun : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -1427,6 +1431,7 @@ extern int idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
+extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
extern struct task_struct *idle_task(int cpu);
/**
@@ -1446,12 +1451,21 @@ extern void ia64_set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
+#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
+ struct task_struct task;
+#endif
#ifndef CONFIG_THREAD_INFO_IN_TASK
struct thread_info thread_info;
#endif
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
+#ifndef CONFIG_THREAD_INFO_IN_TASK
+extern struct thread_info init_thread_info;
+#endif
+
+extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
+
#ifdef CONFIG_THREAD_INFO_IN_TASK
static inline struct thread_info *task_thread_info(struct task_struct *task)
{
diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index d1ad3d825561..0b55834efd46 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -12,8 +12,6 @@
#define SCHED_CPUFREQ_DL (1U << 1)
#define SCHED_CPUFREQ_IOWAIT (1U << 2)
-#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
-
#ifdef CONFIG_CPU_FREQ
struct update_util_data {
void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 0aa4548fb492..23b4f9cb82db 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -285,6 +285,34 @@ static inline void kernel_signal_stop(void)
schedule();
}
+#ifdef __ARCH_SI_TRAPNO
+# define ___ARCH_SI_TRAPNO(_a1) , _a1
+#else
+# define ___ARCH_SI_TRAPNO(_a1)
+#endif
+#ifdef __ia64__
+# define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3
+#else
+# define ___ARCH_SI_IA64(_a1, _a2, _a3)
+#endif
+
+int force_sig_fault(int sig, int code, void __user *addr
+ ___ARCH_SI_TRAPNO(int trapno)
+ ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
+ , struct task_struct *t);
+int send_sig_fault(int sig, int code, void __user *addr
+ ___ARCH_SI_TRAPNO(int trapno)
+ ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
+ , struct task_struct *t);
+
+int force_sig_mceerr(int code, void __user *, short, struct task_struct *);
+int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
+
+int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
+int force_sig_pkuerr(void __user *addr, u32 pkey);
+
+int force_sig_ptrace_errno_trap(int errno, void __user *addr);
+
extern int send_sig_info(int, struct siginfo *, struct task_struct *);
extern int force_sigsegv(int, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index cb4828aaa34f..6a841929073f 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -78,7 +78,7 @@ static inline void put_task_stack(struct task_struct *tsk) {}
#define task_stack_end_corrupted(task) \
(*(end_of_stack(task)) != STACK_END_MAGIC)
-static inline int object_is_on_stack(void *obj)
+static inline int object_is_on_stack(const void *obj)
{
void *stack = task_stack_page(current);
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index cf257c2e728d..26347741ba50 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -7,6 +7,12 @@
#include <linux/sched/idle.h>
/*
+ * Increase resolution of cpu_capacity calculations
+ */
+#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
+#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
+
+/*
* sched-domains (multiprocessor balancing) declarations:
*/
#ifdef CONFIG_SMP
@@ -27,12 +33,6 @@
#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
#define SD_NUMA 0x4000 /* cross-node balancing */
-/*
- * Increase resolution of cpu_capacity calculations
- */
-#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
-#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
-
#ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void)
{
diff --git a/include/linux/scif.h b/include/linux/scif.h
index 49a35d6edc94..7046111b8d0a 100644
--- a/include/linux/scif.h
+++ b/include/linux/scif.h
@@ -123,8 +123,8 @@ struct scif_range {
*/
struct scif_pollepd {
scif_epd_t epd;
- short events;
- short revents;
+ __poll_t events;
+ __poll_t revents;
};
/**
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 10f25f7e4304..c723a5c4e3ff 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -95,11 +95,19 @@ static inline void get_seccomp_filter(struct task_struct *tsk)
#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
extern long seccomp_get_filter(struct task_struct *task,
unsigned long filter_off, void __user *data);
+extern long seccomp_get_metadata(struct task_struct *task,
+ unsigned long filter_off, void __user *data);
#else
static inline long seccomp_get_filter(struct task_struct *task,
unsigned long n, void __user *data)
{
return -EINVAL;
}
+static inline long seccomp_get_metadata(struct task_struct *task,
+ unsigned long filter_off,
+ void __user *data)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */
#endif /* _LINUX_SECCOMP_H */
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index f189a8a3bbb8..bcf4cf26b8c8 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -278,9 +278,8 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
static inline int raw_read_seqcount_latch(seqcount_t *s)
{
- int seq = READ_ONCE(s->sequence);
/* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
- smp_read_barrier_depends();
+ int seq = READ_ONCE(s->sequence); /* ^^^ */
return seq;
}
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index a73d87b483b4..531031a15cdd 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -200,6 +200,7 @@ static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
int serdev_device_open(struct serdev_device *);
void serdev_device_close(struct serdev_device *);
+int devm_serdev_device_open(struct device *, struct serdev_device *);
unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
void serdev_device_set_flow_control(struct serdev_device *, bool);
int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 042968dd98f0..a9bc7e1b077e 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -11,13 +11,14 @@ struct task_struct;
/* for sysctl */
extern int print_fatal_signals;
-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
+static inline void copy_siginfo(struct siginfo *to, const struct siginfo *from)
{
- if (from->si_code < 0)
- memcpy(to, from, sizeof(*to));
- else
- /* _sigchld is currently the largest know union member */
- memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
+ memcpy(to, from, sizeof(*to));
+}
+
+static inline void clear_siginfo(struct siginfo *info)
+{
+ memset(info, 0, sizeof(*info));
}
int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
@@ -29,9 +30,7 @@ enum siginfo_layout {
SIL_FAULT,
SIL_CHLD,
SIL_RT,
-#ifdef __ARCH_SIGSYS
SIL_SYS,
-#endif
};
enum siginfo_layout siginfo_layout(int sig, int si_code);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b8e0da6c27d6..ac89a93b7c83 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3246,7 +3246,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
int *peeked, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
-unsigned int datagram_poll(struct file *file, struct socket *sock,
+__poll_t datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
struct iov_iter *to, int size);
diff --git a/include/linux/sound.h b/include/linux/sound.h
index 3c6d393c7f29..ec85b7a1f8d1 100644
--- a/include/linux/sound.h
+++ b/include/linux/sound.h
@@ -12,11 +12,9 @@ struct device;
extern int register_sound_special(const struct file_operations *fops, int unit);
extern int register_sound_special_device(const struct file_operations *fops, int unit, struct device *dev);
extern int register_sound_mixer(const struct file_operations *fops, int dev);
-extern int register_sound_midi(const struct file_operations *fops, int dev);
extern int register_sound_dsp(const struct file_operations *fops, int dev);
extern void unregister_sound_special(int unit);
extern void unregister_sound_mixer(int unit);
-extern void unregister_sound_midi(int unit);
extern void unregister_sound_dsp(int unit);
#endif /* _LINUX_SOUND_H */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 62be8966e837..33c1c698df09 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -92,7 +92,7 @@ void synchronize_srcu(struct srcu_struct *sp);
* relies on normal RCU, it can be called from the CPU which
* is in the idle loop from an RCU point of view or offline.
*/
-static inline int srcu_read_lock_held(struct srcu_struct *sp)
+static inline int srcu_read_lock_held(const struct srcu_struct *sp)
{
if (!debug_lockdep_rcu_enabled())
return 1;
@@ -101,7 +101,7 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-static inline int srcu_read_lock_held(struct srcu_struct *sp)
+static inline int srcu_read_lock_held(const struct srcu_struct *sp)
{
return 1;
}
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index a949f4f9e4d7..4eda108abee0 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -40,7 +40,7 @@ struct srcu_data {
unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */
/* Update-side state. */
- raw_spinlock_t __private lock ____cacheline_internodealigned_in_smp;
+ spinlock_t __private lock ____cacheline_internodealigned_in_smp;
struct rcu_segcblist srcu_cblist; /* List of callbacks.*/
unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
@@ -58,7 +58,7 @@ struct srcu_data {
* Node in SRCU combining tree, similar in function to rcu_data.
*/
struct srcu_node {
- raw_spinlock_t __private lock;
+ spinlock_t __private lock;
unsigned long srcu_have_cbs[4]; /* GP seq for children */
/* having CBs, but only */
/* is > ->srcu_gq_seq. */
@@ -78,7 +78,7 @@ struct srcu_struct {
struct srcu_node *level[RCU_NUM_LVLS + 1];
/* First node at each level. */
struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
- raw_spinlock_t __private lock; /* Protect counters */
+ spinlock_t __private lock; /* Protect counters */
struct mutex srcu_gp_mutex; /* Serialize GP work. */
unsigned int srcu_idx; /* Current rdr array element. */
unsigned long srcu_gp_seq; /* Grace-period seq #. */
@@ -107,7 +107,7 @@ struct srcu_struct {
#define __SRCU_STRUCT_INIT(name) \
{ \
.sda = &name##_srcu_data, \
- .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
.srcu_gp_seq_needed = 0 - 1, \
__SRCU_DEP_MAP_INIT(name) \
}
diff --git a/include/linux/string.h b/include/linux/string.h
index cfd83eb2f926..dd39a690c841 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -11,6 +11,7 @@
extern char *strndup_user(const char __user *, long);
extern void *memdup_user(const void __user *, size_t);
+extern void *vmemdup_user(const void __user *, size_t);
extern void *memdup_user_nul(const void __user *, size_t);
/*
@@ -28,7 +29,7 @@ extern char * strncpy(char *,const char *, __kernel_size_t);
size_t strlcpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRSCPY
-ssize_t __must_check strscpy(char *, const char *, size_t);
+ssize_t strscpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 71c237e8240e..ed761f751ecb 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -179,7 +179,6 @@ struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
int rpc_restart_call_prepare(struct rpc_task *);
int rpc_restart_call(struct rpc_task *);
void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
-int rpc_protocol(struct rpc_clnt *);
struct net * rpc_net_ns(struct rpc_clnt *);
size_t rpc_max_payload(struct rpc_clnt *);
size_t rpc_max_bc_payload(struct rpc_clnt *);
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index 221b7a2e5406..5859563e3c1f 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -64,7 +64,7 @@ enum rpcrdma_memreg {
RPCRDMA_MEMWINDOWS,
RPCRDMA_MEMWINDOWS_ASYNC,
RPCRDMA_MTHCAFMR,
- RPCRDMA_FRMR,
+ RPCRDMA_FRWR,
RPCRDMA_ALLPHYSICAL,
RPCRDMA_LAST
};
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index d60b0f5c38d5..cc22a24516d6 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -443,32 +443,8 @@ extern bool pm_save_wakeup_count(unsigned int count);
extern void pm_wakep_autosleep_enabled(bool set);
extern void pm_print_active_wakeup_sources(void);
-static inline void lock_system_sleep(void)
-{
- current->flags |= PF_FREEZER_SKIP;
- mutex_lock(&pm_mutex);
-}
-
-static inline void unlock_system_sleep(void)
-{
- /*
- * Don't use freezer_count() because we don't want the call to
- * try_to_freeze() here.
- *
- * Reason:
- * Fundamentally, we just don't need it, because freezing condition
- * doesn't come into effect until we release the pm_mutex lock,
- * since the freezer always works with pm_mutex held.
- *
- * More importantly, in the case of hibernation,
- * unlock_system_sleep() gets called in snapshot_read() and
- * snapshot_write() when the freezing condition is still in effect.
- * Which means, if we use try_to_freeze() here, it would make them
- * enter the refrigerator, thus causing hibernation to lockup.
- */
- current->flags &= ~PF_FREEZER_SKIP;
- mutex_unlock(&pm_mutex);
-}
+extern void lock_system_sleep(void);
+extern void unlock_system_sleep(void);
#else /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 24ed817082ee..5b1f2a00491c 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -66,6 +66,12 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
enum dma_sync_target target);
/* Accessory functions. */
+
+void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flags, unsigned long attrs);
+void swiotlb_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_addr, unsigned long attrs);
+
extern void
*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags);
@@ -115,10 +121,10 @@ extern int
swiotlb_dma_supported(struct device *hwdev, u64 mask);
#ifdef CONFIG_SWIOTLB
-extern void __init swiotlb_free(void);
+extern void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void);
#else
-static inline void swiotlb_free(void) { }
+static inline void swiotlb_exit(void) { }
static inline unsigned int swiotlb_max_segment(void) { return 0; }
#endif
@@ -126,4 +132,6 @@ extern void swiotlb_print_info(void);
extern int is_swiotlb_buffer(phys_addr_t paddr);
extern void swiotlb_set_max_segment(unsigned int);
+extern const struct dma_map_ops swiotlb_dma_ops;
+
#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/torture.h b/include/linux/torture.h
index a45702eb3e7b..66272862070b 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -79,7 +79,7 @@ void stutter_wait(const char *title);
int torture_stutter_init(int s);
/* Initialization and cleanup. */
-bool torture_init_begin(char *ttype, bool v, int *runnable);
+bool torture_init_begin(char *ttype, bool v);
void torture_init_end(void);
bool torture_cleanup_begin(void);
void torture_cleanup_end(void);
@@ -96,4 +96,10 @@ void _torture_stop_kthread(char *m, struct task_struct **tp);
#define torture_stop_kthread(n, tp) \
_torture_stop_kthread("Stopping " #n " task", &(tp))
+#ifdef CONFIG_PREEMPT
+#define torture_preempt_schedule() preempt_schedule()
+#else
+#define torture_preempt_schedule()
+#endif
+
#endif /* __LINUX_TORTURE_H */
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 5a090f5ab335..bcdd3790e94d 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -24,11 +24,6 @@
#define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */
-/*
- * Chip num is this value or a valid tpm idx
- */
-#define TPM_ANY_NUM 0xFFFF
-
struct tpm_chip;
struct trusted_key_payload;
struct trusted_key_options;
@@ -50,46 +45,52 @@ struct tpm_class_ops {
unsigned long *timeout_cap);
int (*request_locality)(struct tpm_chip *chip, int loc);
void (*relinquish_locality)(struct tpm_chip *chip, int loc);
+ void (*clk_enable)(struct tpm_chip *chip, bool value);
};
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
-extern int tpm_is_tpm2(u32 chip_num);
-extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf);
-extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash);
-extern int tpm_send(u32 chip_num, void *cmd, size_t buflen);
-extern int tpm_get_random(u32 chip_num, u8 *data, size_t max);
-extern int tpm_seal_trusted(u32 chip_num,
+extern int tpm_is_tpm2(struct tpm_chip *chip);
+extern int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf);
+extern int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash);
+extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen);
+extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max);
+extern int tpm_seal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options);
-extern int tpm_unseal_trusted(u32 chip_num,
+extern int tpm_unseal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options);
#else
-static inline int tpm_is_tpm2(u32 chip_num)
+static inline int tpm_is_tpm2(struct tpm_chip *chip)
{
return -ENODEV;
}
-static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) {
+static inline int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
+{
return -ENODEV;
}
-static inline int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) {
+static inline int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx,
+ const u8 *hash)
+{
return -ENODEV;
}
-static inline int tpm_send(u32 chip_num, void *cmd, size_t buflen) {
+static inline int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
+{
return -ENODEV;
}
-static inline int tpm_get_random(u32 chip_num, u8 *data, size_t max) {
+static inline int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max)
+{
return -ENODEV;
}
-static inline int tpm_seal_trusted(u32 chip_num,
+static inline int tpm_seal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options)
{
return -ENODEV;
}
-static inline int tpm_unseal_trusted(u32 chip_num,
+static inline int tpm_unseal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options)
{
diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
new file mode 100644
index 000000000000..20d9da77fc11
--- /dev/null
+++ b/include/linux/tpm_eventlog.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_TPM_EVENTLOG_H__
+#define __LINUX_TPM_EVENTLOG_H__
+
+#include <crypto/hash_info.h>
+
+#define TCG_EVENT_NAME_LEN_MAX 255
+#define MAX_TEXT_EVENT 1000 /* Max event string length */
+#define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */
+#define TPM2_ACTIVE_PCR_BANKS 3
+
+#define EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2 0x1
+#define EFI_TCG2_EVENT_LOG_FORMAT_TCG_2 0x2
+
+#ifdef CONFIG_PPC64
+#define do_endian_conversion(x) be32_to_cpu(x)
+#else
+#define do_endian_conversion(x) x
+#endif
+
+enum bios_platform_class {
+ BIOS_CLIENT = 0x00,
+ BIOS_SERVER = 0x01,
+};
+
+struct tcpa_event {
+ u32 pcr_index;
+ u32 event_type;
+ u8 pcr_value[20]; /* SHA1 */
+ u32 event_size;
+ u8 event_data[0];
+};
+
+enum tcpa_event_types {
+ PREBOOT = 0,
+ POST_CODE,
+ UNUSED,
+ NO_ACTION,
+ SEPARATOR,
+ ACTION,
+ EVENT_TAG,
+ SCRTM_CONTENTS,
+ SCRTM_VERSION,
+ CPU_MICROCODE,
+ PLATFORM_CONFIG_FLAGS,
+ TABLE_OF_DEVICES,
+ COMPACT_HASH,
+ IPL,
+ IPL_PARTITION_DATA,
+ NONHOST_CODE,
+ NONHOST_CONFIG,
+ NONHOST_INFO,
+};
+
+struct tcpa_pc_event {
+ u32 event_id;
+ u32 event_size;
+ u8 event_data[0];
+};
+
+enum tcpa_pc_event_ids {
+ SMBIOS = 1,
+ BIS_CERT,
+ POST_BIOS_ROM,
+ ESCD,
+ CMOS,
+ NVRAM,
+ OPTION_ROM_EXEC,
+ OPTION_ROM_CONFIG,
+ OPTION_ROM_MICROCODE = 10,
+ S_CRTM_VERSION,
+ S_CRTM_CONTENTS,
+ POST_CONTENTS,
+ HOST_TABLE_OF_DEVICES,
+};
+
+/* http://www.trustedcomputinggroup.org/tcg-efi-protocol-specification/ */
+
+struct tcg_efi_specid_event_algs {
+ u16 alg_id;
+ u16 digest_size;
+} __packed;
+
+struct tcg_efi_specid_event {
+ u8 signature[16];
+ u32 platform_class;
+ u8 spec_version_minor;
+ u8 spec_version_major;
+ u8 spec_errata;
+ u8 uintnsize;
+ u32 num_algs;
+ struct tcg_efi_specid_event_algs digest_sizes[TPM2_ACTIVE_PCR_BANKS];
+ u8 vendor_info_size;
+ u8 vendor_info[0];
+} __packed;
+
+struct tcg_pcr_event {
+ u32 pcr_idx;
+ u32 event_type;
+ u8 digest[20];
+ u32 event_size;
+ u8 event[0];
+} __packed;
+
+struct tcg_event_field {
+ u32 event_size;
+ u8 event[0];
+} __packed;
+
+struct tpm2_digest {
+ u16 alg_id;
+ u8 digest[SHA512_DIGEST_SIZE];
+} __packed;
+
+struct tcg_pcr_event2 {
+ u32 pcr_idx;
+ u32 event_type;
+ u32 count;
+ struct tpm2_digest digests[TPM2_ACTIVE_PCR_BANKS];
+ struct tcg_event_field event;
+} __packed;
+
+#endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index a26ffbe09e71..c94f466d57ef 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -137,11 +137,8 @@ extern void syscall_unregfunc(void);
\
if (!(cond)) \
return; \
- if (rcucheck) { \
- if (WARN_ON_ONCE(rcu_irq_enter_disabled())) \
- return; \
+ if (rcucheck) \
rcu_irq_enter_irqson(); \
- } \
rcu_read_lock_sched_notrace(); \
it_func_ptr = rcu_dereference_sched((tp)->funcs); \
if (it_func_ptr) { \
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 3bc5144b1c7e..1ef64d4ad887 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -187,7 +187,7 @@ struct tty_ldisc_ops {
long (*compat_ioctl)(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
void (*set_termios)(struct tty_struct *tty, struct ktermios *old);
- unsigned int (*poll)(struct tty_struct *, struct file *,
+ __poll_t (*poll)(struct tty_struct *, struct file *,
struct poll_table_struct *);
int (*hangup)(struct tty_struct *tty);
diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h
index d58594a32324..78901ecd2f95 100644
--- a/include/linux/w1-gpio.h
+++ b/include/linux/w1-gpio.h
@@ -10,16 +10,15 @@
#ifndef _LINUX_W1_GPIO_H
#define _LINUX_W1_GPIO_H
+struct gpio_desc;
+
/**
* struct w1_gpio_platform_data - Platform-dependent data for w1-gpio
- * @pin: GPIO pin to use
- * @is_open_drain: GPIO pin is configured as open drain
*/
struct w1_gpio_platform_data {
- unsigned int pin;
- unsigned int is_open_drain:1;
+ struct gpio_desc *gpiod;
+ struct gpio_desc *pullup_gpiod;
void (*enable_external_pullup)(int enable);
- unsigned int ext_pullup_enable_pin;
unsigned int pullup_duration;
};
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 158715445ffb..55a611486bac 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -206,14 +206,16 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
/*
* Wakeup macros to be used to report events to the targets.
*/
+#define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
+#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
#define wake_up_poll(x, m) \
- __wake_up(x, TASK_NORMAL, 1, (void *) (m))
+ __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
#define wake_up_locked_poll(x, m) \
- __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
+ __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
#define wake_up_interruptible_poll(x, m) \
- __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
+ __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
#define wake_up_interruptible_sync_poll(x, m) \
- __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
+ __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m))
#define ___wait_cond_timeout(condition) \
({ \
diff --git a/include/media/lirc_dev.h b/include/media/lirc_dev.h
index 857da67bd931..d9c143d17f70 100644
--- a/include/media/lirc_dev.h
+++ b/include/media/lirc_dev.h
@@ -185,7 +185,7 @@ void *lirc_get_pdata(struct file *file);
*/
int lirc_dev_fop_open(struct inode *inode, struct file *file);
int lirc_dev_fop_close(struct inode *inode, struct file *file);
-unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait);
+__poll_t lirc_dev_fop_poll(struct file *file, poll_table *wait);
long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
ssize_t lirc_dev_fop_read(struct file *file, char __user *buffer, size_t length,
loff_t *ppos);
diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h
index 511615d3bf6f..dc2f64e1b08f 100644
--- a/include/media/media-devnode.h
+++ b/include/media/media-devnode.h
@@ -56,7 +56,7 @@ struct media_file_operations {
struct module *owner;
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
- unsigned int (*poll) (struct file *, struct poll_table_struct *);
+ __poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*open) (struct file *);
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index 4d8cb0796bc6..b7e42a1b0910 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -117,7 +117,7 @@ struct soc_camera_host_ops {
int (*get_parm)(struct soc_camera_device *, struct v4l2_streamparm *);
int (*set_parm)(struct soc_camera_device *, struct v4l2_streamparm *);
int (*enum_framesizes)(struct soc_camera_device *, struct v4l2_frmsizeenum *);
- unsigned int (*poll)(struct file *, poll_table *);
+ __poll_t (*poll)(struct file *, poll_table *);
};
#define SOCAM_SENSOR_INVERT_PCLK (1 << 0)
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index dacfe54057f8..a9ced6bbee55 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -1037,7 +1037,7 @@ int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
* @file: pointer to struct file
* @wait: pointer to struct poll_table_struct
*/
-unsigned int v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait);
+__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait);
/* Helpers for ioctl_ops */
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 28a686eb7d09..fa99f6f66712 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -152,7 +152,7 @@ struct v4l2_file_operations {
struct module *owner;
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
- unsigned int (*poll) (struct file *, struct poll_table_struct *);
+ __poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
#ifdef CONFIG_COMPAT
long (*compat_ioctl32) (struct file *, unsigned int, unsigned long);
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index e157d5c9b224..3d07ba3a8262 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -297,7 +297,7 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
* indicate that a non-blocking write can be performed, while read will be
* returned in case of the destination queue.
*/
-unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct poll_table_struct *wait);
/**
@@ -601,7 +601,7 @@ int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
enum v4l2_buf_type type);
int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
-unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
+__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
#endif /* _MEDIA_V4L2_MEM2MEM_H */
diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h
index d760aa73ebbb..0bda0adc744f 100644
--- a/include/media/videobuf-core.h
+++ b/include/media/videobuf-core.h
@@ -219,7 +219,7 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
ssize_t videobuf_read_one(struct videobuf_queue *q,
char __user *data, size_t count, loff_t *ppos,
int nonblocking);
-unsigned int videobuf_poll_stream(struct file *file,
+__poll_t videobuf_poll_stream(struct file *file,
struct videobuf_queue *q,
poll_table *wait);
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index ef9b64398c8c..e55efc62a950 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -871,7 +871,7 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
* The return values from this function are intended to be directly returned
* from poll handler in driver.
*/
-unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
+__poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
poll_table *wait);
size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index 036127c54bbf..c2fa55657440 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -226,8 +226,7 @@ void vb2_queue_release(struct vb2_queue *q);
* The return values from this function are intended to be directly returned
* from poll handler in driver.
*/
-unsigned int vb2_poll(struct vb2_queue *q, struct file *file,
- poll_table *wait);
+__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait);
/*
* The following functions are not part of the vb2 core API, but are simple
@@ -262,7 +261,7 @@ ssize_t vb2_fop_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos);
ssize_t vb2_fop_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
-unsigned int vb2_fop_poll(struct file *file, poll_table *wait);
+__poll_t vb2_fop_poll(struct file *file, poll_table *wait);
#ifndef CONFIG_MMU
unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index 480d50a0b8ba..b712be544f8c 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -267,7 +267,7 @@ int cxl_fd_open(struct inode *inode, struct file *file);
int cxl_fd_release(struct inode *inode, struct file *file);
long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm);
-unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll);
+__poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll);
ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
loff_t *off);
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index e89cff0c4c23..ec9d6bc65855 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -271,7 +271,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags);
int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags);
-uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
+__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 8e1bf9ae4a5e..6692d67e9245 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -307,7 +307,7 @@ void inet_csk_prepare_forced_close(struct sock *sk);
/*
* LISTEN is a special case for poll..
*/
-static inline unsigned int inet_csk_listen_poll(const struct sock *sk)
+static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
{
return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ?
(POLLIN | POLLRDNORM) : 0;
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index 070e93a17c59..f4c21b5a1242 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -153,7 +153,7 @@ struct iucv_sock_list {
atomic_t autobind_name;
};
-unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
+__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
poll_table *wait);
void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 20c0c1be2ca7..f7ae6b0a21d0 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -107,7 +107,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
int sctp_inet_listen(struct socket *sock, int backlog);
void sctp_write_space(struct sock *sk);
void sctp_data_ready(struct sock *sk);
-unsigned int sctp_poll(struct file *file, struct socket *sock,
+__poll_t sctp_poll(struct file *file, struct socket *sock,
poll_table *wait);
void sctp_sock_rfree(struct sk_buff *skb);
void sctp_copy_sock(struct sock *newsk, struct sock *sk,
diff --git a/include/net/sock.h b/include/net/sock.h
index 73b7830b0bb8..63731289186a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1447,10 +1447,8 @@ do { \
} while (0)
#ifdef CONFIG_LOCKDEP
-static inline bool lockdep_sock_is_held(const struct sock *csk)
+static inline bool lockdep_sock_is_held(const struct sock *sk)
{
- struct sock *sk = (struct sock *)csk;
-
return lockdep_is_held(&sk->sk_lock) ||
lockdep_is_held(&sk->sk_lock.slock);
}
@@ -1585,7 +1583,7 @@ int sock_no_connect(struct socket *, struct sockaddr *, int, int);
int sock_no_socketpair(struct socket *, struct socket *);
int sock_no_accept(struct socket *, struct socket *, int, bool);
int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
-unsigned int sock_no_poll(struct file *, struct socket *,
+__poll_t sock_no_poll(struct file *, struct socket *,
struct poll_table_struct *);
int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
int sock_no_listen(struct socket *, int);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 093e967a2960..58278669cc55 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -387,7 +387,7 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
void tcp_close(struct sock *sk, long timeout);
void tcp_init_sock(struct sock *sk);
void tcp_init_transfer(struct sock *sk, int bpf_op);
-unsigned int tcp_poll(struct file *file, struct socket *sock,
+__poll_t tcp_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int tcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
diff --git a/include/net/udp.h b/include/net/udp.h
index 6c759c8594e2..850a8e581cce 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -275,7 +275,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
int udp_init_sock(struct sock *sk);
int __udp_disconnect(struct sock *sk, int flags);
int udp_disconnect(struct sock *sk, int flags);
-unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
+__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features,
bool is_ipv6);
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 18c564f60e93..d656809f1217 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -94,7 +94,7 @@ struct rdma_dev_addr {
* The dev_addr->net field must be initialized.
*/
int rdma_translate_ip(const struct sockaddr *addr,
- struct rdma_dev_addr *dev_addr, u16 *vlan_id);
+ struct rdma_dev_addr *dev_addr);
/**
* rdma_resolve_ip - Resolve source and destination IP addresses to
@@ -131,10 +131,9 @@ void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
int rdma_addr_size(struct sockaddr *addr);
-int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id);
int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
const union ib_gid *dgid,
- u8 *smac, u16 *vlan_id, int *if_index,
+ u8 *dmac, const struct net_device *ndev,
int *hoplimit);
static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr)
@@ -198,34 +197,15 @@ static inline void rdma_gid2ip(struct sockaddr *out, const union ib_gid *gid)
}
}
-static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
- union ib_gid *gid)
-{
- struct net_device *dev;
- struct in_device *ip4;
-
- dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
- if (dev) {
- ip4 = in_dev_get(dev);
- if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address)
- ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address,
- (struct in6_addr *)gid);
-
- if (ip4)
- in_dev_put(ip4);
-
- dev_put(dev);
- }
-}
-
+/*
+ * rdma_get/set_sgid/dgid() APIs are applicable to IB, and iWarp.
+ * They are not applicable to RoCE.
+ * RoCE GIDs are derived from the IP addresses.
+ */
static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
{
- if (dev_addr->transport == RDMA_TRANSPORT_IB &&
- dev_addr->dev_type != ARPHRD_INFINIBAND)
- iboe_addr_get_sgid(dev_addr, gid);
- else
- memcpy(gid, dev_addr->src_dev_addr +
- rdma_addr_gid_offset(dev_addr), sizeof *gid);
+ memcpy(gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr),
+ sizeof(*gid));
}
static inline void rdma_addr_set_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 1f7f604db5aa..811cfcfcbe3d 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -549,12 +549,12 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
struct rdma_ah_attr *ah_attr);
/**
- * ib_init_ah_from_path - Initialize address handle attributes based on an SA
- * path record.
+ * ib_init_ah_attr_from_path - Initialize address handle attributes based on
+ * an SA path record.
*/
-int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
- struct sa_path_rec *rec,
- struct rdma_ah_attr *ah_attr);
+int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
+ struct sa_path_rec *rec,
+ struct rdma_ah_attr *ah_attr);
/**
* ib_sa_pack_path - Conert a path record from struct ib_sa_path_rec
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fd84cda5ed7c..5263c86fd103 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -63,6 +63,7 @@
#include <linux/uaccess.h>
#include <linux/cgroup_rdma.h>
#include <uapi/rdma/ib_user_verbs.h>
+#include <rdma/restrack.h>
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
@@ -300,11 +301,6 @@ struct ib_tm_caps {
u32 max_sge;
};
-enum ib_cq_creation_flags {
- IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
- IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
-};
-
struct ib_cq_init_attr {
unsigned int cqe;
int comp_vector;
@@ -983,9 +979,9 @@ struct ib_wc {
u32 invalidate_rkey;
} ex;
u32 src_qp;
+ u32 slid;
int wc_flags;
u16 pkey_index;
- u32 slid;
u8 sl;
u8 dlid_path_bits;
u8 port_num; /* valid only for DR SMPs on switches */
@@ -1082,6 +1078,7 @@ enum ib_qp_type {
IB_QPT_XRC_INI = 9,
IB_QPT_XRC_TGT,
IB_QPT_MAX,
+ IB_QPT_DRIVER = 0xFF,
/* Reserve a range for qp types internal to the low level driver.
* These qp types will not be visible at the IB core layer, so the
* IB_QPT_MAX usages should not be affected in the core layer
@@ -1529,6 +1526,7 @@ struct ib_pd {
* Implementation details of the RDMA core, don't use in drivers:
*/
struct ib_mr *__internal_mr;
+ struct rdma_restrack_entry res;
};
struct ib_xrcd {
@@ -1538,6 +1536,10 @@ struct ib_xrcd {
struct mutex tgt_qp_mutex;
struct list_head tgt_qp_list;
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
};
struct ib_ah {
@@ -1569,6 +1571,10 @@ struct ib_cq {
struct irq_poll iop;
struct work_struct work;
};
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
};
struct ib_srq {
@@ -1745,6 +1751,11 @@ struct ib_qp {
struct ib_rwq_ind_table *rwq_ind_tbl;
struct ib_qp_security *qp_sec;
u8 port;
+
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
};
struct ib_mr {
@@ -2351,6 +2362,10 @@ struct ib_device {
#endif
u32 index;
+ /*
+ * Implementation details of the RDMA core, don't use in drivers
+ */
+ struct rdma_restrack_root res;
/**
* The following mandatory functions are used only at device
@@ -2836,8 +2851,7 @@ int ib_modify_port(struct ib_device *device,
struct ib_port_modify *port_modify);
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
- enum ib_gid_type gid_type, struct net_device *ndev,
- u8 *port_num, u16 *index);
+ struct net_device *ndev, u8 *port_num, u16 *index);
int ib_find_pkey(struct ib_device *device,
u8 port_num, u16 pkey, u16 *index);
@@ -2858,7 +2872,7 @@ enum ib_pd_flags {
struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
const char *caller);
#define ib_alloc_pd(device, flags) \
- __ib_alloc_pd((device), (flags), __func__)
+ __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
void ib_dealloc_pd(struct ib_pd *pd);
/**
@@ -2905,7 +2919,7 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
/**
- * ib_init_ah_from_wc - Initializes address handle attributes from a
+ * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
* work completion.
* @device: Device on which the received message arrived.
* @port_num: Port on which the received message arrived.
@@ -2915,9 +2929,9 @@ int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
* @ah_attr: Returned attributes that can be used when creating an address
* handle for replying to the message.
*/
-int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
- const struct ib_wc *wc, const struct ib_grh *grh,
- struct rdma_ah_attr *ah_attr);
+int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
+ const struct ib_wc *wc, const struct ib_grh *grh,
+ struct rdma_ah_attr *ah_attr);
/**
* ib_create_ah_from_wc - Creates an address handle associated with the
@@ -3135,8 +3149,12 @@ static inline int ib_post_recv(struct ib_qp *qp,
return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
}
-struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
- int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx);
+struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
+ int nr_cqe, int comp_vector,
+ enum ib_poll_context poll_ctx, const char *caller);
+#define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \
+ __ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME)
+
void ib_free_cq(struct ib_cq *cq);
int ib_process_cq_direct(struct ib_cq *cq, int budget);
@@ -3560,8 +3578,11 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
/**
* ib_alloc_xrcd - Allocates an XRC domain.
* @device: The device on which to allocate the XRC domain.
+ * @caller: Module name for kernel consumers
*/
-struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
+struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
+#define ib_alloc_xrcd(device) \
+ __ib_alloc_xrcd((device), KBUILD_MODNAME)
/**
* ib_dealloc_xrcd - Deallocates an XRC domain.
@@ -3793,8 +3814,7 @@ static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
u32 port_num)
{
- if ((rdma_protocol_roce(dev, port_num)) ||
- (rdma_protocol_iwarp(dev, port_num)))
+ if (rdma_protocol_roce(dev, port_num))
return RDMA_AH_ATTR_TYPE_ROCE;
else if ((rdma_protocol_ib(dev, port_num)) &&
(rdma_cap_opa_ah(dev, port_num)))
@@ -3850,4 +3870,12 @@ ib_get_vector_affinity(struct ib_device *device, int comp_vector)
}
+/**
+ * rdma_roce_rescan_device - Rescan all of the network devices in the system
+ * and add their gids, as needed, to the relevant RoCE devices.
+ *
+ * @device: the rdma device
+ */
+void rdma_roce_rescan_device(struct ib_device *ibdev);
+
#endif /* IB_VERBS_H */
diff --git a/include/rdma/opa_addr.h b/include/rdma/opa_addr.h
index f68fca296631..2bbb7a67e643 100644
--- a/include/rdma/opa_addr.h
+++ b/include/rdma/opa_addr.h
@@ -114,4 +114,20 @@ static inline u32 opa_get_mcast_base(u32 nr_top_bits)
return (be32_to_cpu(OPA_LID_PERMISSIVE) << (32 - nr_top_bits));
}
+/* Check for a valid unicast LID for non-SM traffic types */
+static inline bool rdma_is_valid_unicast_lid(struct rdma_ah_attr *attr)
+{
+ if (attr->type == RDMA_AH_ATTR_TYPE_IB) {
+ if (!rdma_ah_get_dlid(attr) ||
+ rdma_ah_get_dlid(attr) >=
+ be32_to_cpu(IB_MULTICAST_LID_BASE))
+ return false;
+ } else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) {
+ if (!rdma_ah_get_dlid(attr) ||
+ rdma_ah_get_dlid(attr) >=
+ opa_get_mcast_base(OPA_MCAST_NR))
+ return false;
+ }
+ return true;
+}
#endif /* OPA_ADDR_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 3d2eed3c4e75..6538a5cc27b6 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -413,4 +413,23 @@ bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason);
const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
struct rdma_cm_event *ev, u8 *data_len);
+/**
+ * rdma_read_gids - Return the SGID and DGID used for establishing
+ * connection. This can be used after rdma_resolve_addr()
+ * on client side. This can be use on new connection
+ * on server side. This is applicable to IB, RoCE, iWarp.
+ * If cm_id is not bound yet to the RDMA device, it doesn't
+ * copy and SGID or DGID to the given pointers.
+ * @id: Communication identifier whose GIDs are queried.
+ * @sgid: Pointer to SGID where SGID will be returned. It is optional.
+ * @dgid: Pointer to DGID where DGID will be returned. It is optional.
+ * Note: This API should not be used by any new ULPs or new code.
+ * Instead, users interested in querying GIDs should refer to path record
+ * of the rdma_cm_id to query the GIDs.
+ * This API is provided for compatibility for existing users.
+ */
+
+void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
+ union ib_gid *dgid);
+
#endif /* RDMA_CM_H */
diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h
index 6947a6ba2557..6a69d71a21a5 100644
--- a/include/rdma/rdma_cm_ib.h
+++ b/include/rdma/rdma_cm_ib.h
@@ -36,17 +36,17 @@
#include <rdma/rdma_cm.h>
/**
- * rdma_set_ib_paths - Manually sets the path records used to establish a
+ * rdma_set_ib_path - Manually sets the path record used to establish a
* connection.
* @id: Connection identifier associated with the request.
* @path_rec: Reference to the path record
*
* This call permits a user to specify routing information for rdma_cm_id's
- * bound to Infiniband devices. It is called on the client side of a
+ * bound to InfiniBand devices. It is called on the client side of a
* connection and replaces the call to rdma_resolve_route.
*/
-int rdma_set_ib_paths(struct rdma_cm_id *id,
- struct sa_path_rec *path_rec, int num_paths);
+int rdma_set_ib_path(struct rdma_cm_id *id,
+ struct sa_path_rec *path_rec);
/* Global qkey for UDP QPs and multicast groups. */
#define RDMA_UDP_QKEY 0x01234567
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 1ba84a78f1c5..4118324a0310 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -228,13 +228,6 @@ struct rvt_driver_provided {
int (*port_callback)(struct ib_device *, u8, struct kobject *);
/*
- * Returns a string to represent the device for which is being
- * registered. This is primarily used for error and debug messages on
- * the console.
- */
- const char * (*get_card_name)(struct rvt_dev_info *rdi);
-
- /*
* Returns a pointer to the undelying hardware's PCI device. This is
* used to display information as to what hardware is being referenced
* in an output message
@@ -419,6 +412,30 @@ struct rvt_dev_info {
};
+/**
+ * rvt_set_ibdev_name - Craft an IB device name from client info
+ * @rdi: pointer to the client rvt_dev_info structure
+ * @name: client specific name
+ * @unit: client specific unit number.
+ */
+static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
+ const char *fmt, const char *name,
+ const int unit)
+{
+ snprintf(rdi->ibdev.name, sizeof(rdi->ibdev.name), fmt, name, unit);
+}
+
+/**
+ * rvt_get_ibdev_name - return the IB name
+ * @rdi: rdmavt device
+ *
+ * Return the registered name of the device.
+ */
+static inline const char *rvt_get_ibdev_name(const struct rvt_dev_info *rdi)
+{
+ return rdi->ibdev.name;
+}
+
static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct rvt_pd, ibpd);
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
new file mode 100644
index 000000000000..c2d81167c858
--- /dev/null
+++ b/include/rdma/restrack.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef _RDMA_RESTRACK_H_
+#define _RDMA_RESTRACK_H_
+
+#include <linux/typecheck.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/kref.h>
+#include <linux/completion.h>
+
+/**
+ * enum rdma_restrack_type - HW objects to track
+ */
+enum rdma_restrack_type {
+ /**
+ * @RDMA_RESTRACK_PD: Protection domain (PD)
+ */
+ RDMA_RESTRACK_PD,
+ /**
+ * @RDMA_RESTRACK_CQ: Completion queue (CQ)
+ */
+ RDMA_RESTRACK_CQ,
+ /**
+ * @RDMA_RESTRACK_QP: Queue pair (QP)
+ */
+ RDMA_RESTRACK_QP,
+ /**
+ * @RDMA_RESTRACK_XRCD: XRC domain (XRCD)
+ */
+ RDMA_RESTRACK_XRCD,
+ /**
+ * @RDMA_RESTRACK_MAX: Last entry, used for array dclarations
+ */
+ RDMA_RESTRACK_MAX
+};
+
+#define RDMA_RESTRACK_HASH_BITS 8
+/**
+ * struct rdma_restrack_root - main resource tracking management
+ * entity, per-device
+ */
+struct rdma_restrack_root {
+ /*
+ * @rwsem: Read/write lock to protect lists
+ */
+ struct rw_semaphore rwsem;
+ /**
+ * @hash: global database for all resources per-device
+ */
+ DECLARE_HASHTABLE(hash, RDMA_RESTRACK_HASH_BITS);
+};
+
+/**
+ * struct rdma_restrack_entry - metadata per-entry
+ */
+struct rdma_restrack_entry {
+ /**
+ * @valid: validity indicator
+ *
+ * The entries are filled during rdma_restrack_add,
+ * can be attempted to be free during rdma_restrack_del.
+ *
+ * As an example for that, see mlx5 QPs with type MLX5_IB_QPT_HW_GSI
+ */
+ bool valid;
+ /*
+ * @kref: Protect destroy of the resource
+ */
+ struct kref kref;
+ /*
+ * @comp: Signal that all consumers of resource are completed their work
+ */
+ struct completion comp;
+ /**
+ * @task: owner of resource tracking entity
+ *
+ * There are two types of entities: created by user and created
+ * by kernel.
+ *
+ * This is relevant for the entities created by users.
+ * For the entities created by kernel, this pointer will be NULL.
+ */
+ struct task_struct *task;
+ /**
+ * @kern_name: name of owner for the kernel created entities.
+ */
+ const char *kern_name;
+ /**
+ * @node: hash table entry
+ */
+ struct hlist_node node;
+ /**
+ * @type: various objects in restrack database
+ */
+ enum rdma_restrack_type type;
+};
+
+/**
+ * rdma_restrack_init() - initialize resource tracking
+ * @res: resource tracking root
+ */
+void rdma_restrack_init(struct rdma_restrack_root *res);
+
+/**
+ * rdma_restrack_clean() - clean resource tracking
+ * @res: resource tracking root
+ */
+void rdma_restrack_clean(struct rdma_restrack_root *res);
+
+/**
+ * rdma_restrack_count() - the current usage of specific object
+ * @res: resource entry
+ * @type: actual type of object to operate
+ * @ns: PID namespace
+ */
+int rdma_restrack_count(struct rdma_restrack_root *res,
+ enum rdma_restrack_type type,
+ struct pid_namespace *ns);
+
+/**
+ * rdma_restrack_add() - add object to the reource tracking database
+ * @res: resource entry
+ */
+void rdma_restrack_add(struct rdma_restrack_entry *res);
+
+/**
+ * rdma_restrack_del() - delete object from the reource tracking database
+ * @res: resource entry
+ * @type: actual type of object to operate
+ */
+void rdma_restrack_del(struct rdma_restrack_entry *res);
+
+/**
+ * rdma_is_kernel_res() - check the owner of resource
+ * @res: resource entry
+ */
+static inline bool rdma_is_kernel_res(struct rdma_restrack_entry *res)
+{
+ return !res->task;
+}
+
+/**
+ * rdma_restrack_get() - grab to protect resource from release
+ * @res: resource entry
+ */
+int __must_check rdma_restrack_get(struct rdma_restrack_entry *res);
+
+/**
+ * rdma_restrack_put() - relase resource
+ * @res: resource entry
+ */
+int rdma_restrack_put(struct rdma_restrack_entry *res);
+#endif /* _RDMA_RESTRACK_H_ */
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 6df6fe0c2198..225ab7783dfd 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -75,16 +75,15 @@ enum phy_event {
PHYE_OOB_ERROR,
PHYE_SPINUP_HOLD, /* hot plug SATA, no COMWAKE sent */
PHYE_RESUME_TIMEOUT,
+ PHYE_SHUTDOWN,
PHY_NUM_EVENTS,
};
enum discover_event {
DISCE_DISCOVER_DOMAIN = 0U,
DISCE_REVALIDATE_DOMAIN,
- DISCE_PROBE,
DISCE_SUSPEND,
DISCE_RESUME,
- DISCE_DESTRUCT,
DISC_NUM_EVENTS,
};
@@ -261,6 +260,7 @@ struct asd_sas_port {
struct list_head dev_list;
struct list_head disco_list;
struct list_head destroy_list;
+ struct list_head sas_port_del_list;
enum sas_linkrate linkrate;
struct sas_work work;
@@ -292,6 +292,7 @@ struct asd_sas_port {
struct asd_sas_event {
struct sas_work work;
struct asd_sas_phy *phy;
+ int event;
};
static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work)
@@ -301,17 +302,24 @@ static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work)
return ev;
}
+static inline void INIT_SAS_EVENT(struct asd_sas_event *ev,
+ void (*fn)(struct work_struct *),
+ struct asd_sas_phy *phy, int event)
+{
+ INIT_SAS_WORK(&ev->work, fn);
+ ev->phy = phy;
+ ev->event = event;
+}
+
+#define SAS_PHY_SHUTDOWN_THRES 1024
+
/* The phy pretty much is controlled by the LLDD.
* The class only reads those fields.
*/
struct asd_sas_phy {
/* private: */
- struct asd_sas_event port_events[PORT_NUM_EVENTS];
- struct asd_sas_event phy_events[PHY_NUM_EVENTS];
-
- unsigned long port_events_pending;
- unsigned long phy_events_pending;
-
+ atomic_t event_nr;
+ int in_shutdown;
int error;
int suspended;
@@ -380,6 +388,9 @@ struct sas_ha_struct {
struct device *dev; /* should be set */
struct module *lldd_module; /* should be set */
+ struct workqueue_struct *event_q;
+ struct workqueue_struct *disco_q;
+
u8 *sas_addr; /* must be set */
u8 hashed_sas_addr[HASHED_SAS_ADDR_SIZE];
@@ -399,6 +410,8 @@ struct sas_ha_struct {
struct list_head eh_done_q; /* complete via scsi_eh_flush_done_q */
struct list_head eh_ata_q; /* scmds to promote from sas to ata eh */
+
+ int event_thres;
};
#define SHOST_TO_SAS_HA(_shost) (*(struct sas_ha_struct **)(_shost)->hostdata)
@@ -670,6 +683,7 @@ extern int sas_bios_param(struct scsi_device *,
sector_t capacity, int *hsc);
extern struct scsi_transport_template *
sas_domain_attach_transport(struct sas_domain_function_template *);
+extern struct device_attribute dev_attr_phy_event_threshold;
int sas_discover_root_expander(struct domain_device *);
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 7fb57e905526..949a016dd7fa 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -171,7 +171,6 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
extern void scsi_kunmap_atomic_sg(void *virt);
extern int scsi_init_io(struct scsi_cmnd *cmd);
-extern void scsi_initialize_rq(struct request *rq);
extern int scsi_dma_map(struct scsi_cmnd *cmd);
extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index a8b7bf879ced..1a1df0d21ee3 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -571,6 +571,8 @@ struct Scsi_Host {
struct blk_mq_tag_set tag_set;
};
+ struct rcu_head rcu;
+
atomic_t host_busy; /* commands actually active on low-level */
atomic_t host_blocked;
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index 1df8efb0ee01..c36860111932 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -236,6 +236,7 @@ struct scsi_varlen_cdb_hdr {
#define UNIT_ATTENTION 0x06
#define DATA_PROTECT 0x07
#define BLANK_CHECK 0x08
+#define VENDOR_SPECIFIC 0x09
#define COPY_ABORTED 0x0a
#define ABORTED_COMMAND 0x0b
#define VOLUME_OVERFLOW 0x0d
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 8cf30215c177..15da45dc2a5d 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -139,8 +139,8 @@ enum fc_vport_state {
#define FC_PORTSPEED_50GBIT 0x200
#define FC_PORTSPEED_100GBIT 0x400
#define FC_PORTSPEED_25GBIT 0x800
-#define FC_PORTSPEED_64BIT 0x1000
-#define FC_PORTSPEED_128BIT 0x2000
+#define FC_PORTSPEED_64GBIT 0x1000
+#define FC_PORTSPEED_128GBIT 0x2000
#define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */
/*
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 62895b405933..05ec927a3c72 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -156,6 +156,7 @@ struct sas_port {
struct mutex phy_list_mutex;
struct list_head phy_list;
+ struct list_head del_list; /* libsas only */
};
#define dev_to_sas_port(d) \
diff --git a/include/scsi/srp.h b/include/scsi/srp.h
index 5be834de491a..c16a3c9a4d9b 100644
--- a/include/scsi/srp.h
+++ b/include/scsi/srp.h
@@ -129,6 +129,23 @@ struct srp_login_req {
u8 target_port_id[16];
};
+/**
+ * struct srp_login_req_rdma - RDMA/CM login parameters.
+ *
+ * RDMA/CM over InfiniBand can only carry 92 - 36 = 56 bytes of private
+ * data. The %srp_login_req_rdma structure contains the same information as
+ * %srp_login_req but with the reserved data removed.
+ */
+struct srp_login_req_rdma {
+ u64 tag;
+ __be16 req_buf_fmt;
+ u8 req_flags;
+ u8 opcode;
+ __be32 req_it_iu_len;
+ u8 initiator_port_id[16];
+ u8 target_port_id[16];
+};
+
/*
* The SRP spec defines the size of the LOGIN_RSP structure to be 52
* bytes, so it needs to be packed to avoid having it padded to 56
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index ca00130cb028..9c14e21dda85 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -193,7 +193,7 @@ struct hda_dai_map {
* @pvt_data - private data, for asoc contains asoc codec object
*/
struct hdac_ext_device {
- struct hdac_device hdac;
+ struct hdac_device hdev;
struct hdac_ext_bus *ebus;
/* soc-dai to nid map */
@@ -213,7 +213,7 @@ struct hdac_ext_dma_params {
u8 stream_tag;
};
#define to_ehdac_device(dev) (container_of((dev), \
- struct hdac_ext_device, hdac))
+ struct hdac_ext_device, hdev))
/*
* HD-audio codec base driver
*/
diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
index ab9fcb2f97f0..afeca593188a 100644
--- a/include/sound/hwdep.h
+++ b/include/sound/hwdep.h
@@ -37,7 +37,7 @@ struct snd_hwdep_ops {
long count, loff_t *offset);
int (*open)(struct snd_hwdep *hw, struct file * file);
int (*release)(struct snd_hwdep *hw, struct file * file);
- unsigned int (*poll)(struct snd_hwdep *hw, struct file *file,
+ __poll_t (*poll)(struct snd_hwdep *hw, struct file *file,
poll_table *wait);
int (*ioctl)(struct snd_hwdep *hw, struct file *file,
unsigned int cmd, unsigned long arg);
diff --git a/include/sound/info.h b/include/sound/info.h
index 67390ee846aa..becdf66d2825 100644
--- a/include/sound/info.h
+++ b/include/sound/info.h
@@ -62,7 +62,7 @@ struct snd_info_entry_ops {
loff_t (*llseek)(struct snd_info_entry *entry,
void *file_private_data, struct file *file,
loff_t offset, int orig);
- unsigned int (*poll)(struct snd_info_entry *entry,
+ __poll_t (*poll)(struct snd_info_entry *entry,
void *file_private_data, struct file *file,
poll_table *wait);
int (*ioctl)(struct snd_info_entry *entry, void *file_private_data,
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 24febf9e177c..e054c583d3b3 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -169,6 +169,10 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_IMA_ADPCM _SNDRV_PCM_FMTBIT(IMA_ADPCM)
#define SNDRV_PCM_FMTBIT_MPEG _SNDRV_PCM_FMTBIT(MPEG)
#define SNDRV_PCM_FMTBIT_GSM _SNDRV_PCM_FMTBIT(GSM)
+#define SNDRV_PCM_FMTBIT_S20_LE _SNDRV_PCM_FMTBIT(S20_LE)
+#define SNDRV_PCM_FMTBIT_U20_LE _SNDRV_PCM_FMTBIT(U20_LE)
+#define SNDRV_PCM_FMTBIT_S20_BE _SNDRV_PCM_FMTBIT(S20_BE)
+#define SNDRV_PCM_FMTBIT_U20_BE _SNDRV_PCM_FMTBIT(U20_BE)
#define SNDRV_PCM_FMTBIT_SPECIAL _SNDRV_PCM_FMTBIT(SPECIAL)
#define SNDRV_PCM_FMTBIT_S24_3LE _SNDRV_PCM_FMTBIT(S24_3LE)
#define SNDRV_PCM_FMTBIT_U24_3LE _SNDRV_PCM_FMTBIT(U24_3LE)
@@ -202,6 +206,8 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_FLOAT SNDRV_PCM_FMTBIT_FLOAT_LE
#define SNDRV_PCM_FMTBIT_FLOAT64 SNDRV_PCM_FMTBIT_FLOAT64_LE
#define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE
+#define SNDRV_PCM_FMTBIT_S20 SNDRV_PCM_FMTBIT_S20_LE
+#define SNDRV_PCM_FMTBIT_U20 SNDRV_PCM_FMTBIT_U20_LE
#endif
#ifdef SNDRV_BIG_ENDIAN
#define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_BE
@@ -213,6 +219,8 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_FLOAT SNDRV_PCM_FMTBIT_FLOAT_BE
#define SNDRV_PCM_FMTBIT_FLOAT64 SNDRV_PCM_FMTBIT_FLOAT64_BE
#define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE
+#define SNDRV_PCM_FMTBIT_S20 SNDRV_PCM_FMTBIT_S20_BE
+#define SNDRV_PCM_FMTBIT_U20 SNDRV_PCM_FMTBIT_U20_BE
#endif
struct snd_pcm_file {
diff --git a/include/sound/rt5514.h b/include/sound/rt5514.h
index ef18494769ee..64d027dbaaca 100644
--- a/include/sound/rt5514.h
+++ b/include/sound/rt5514.h
@@ -14,6 +14,8 @@
struct rt5514_platform_data {
unsigned int dmic_init_delay;
+ const char *dsp_calib_clk_name;
+ unsigned int dsp_calib_clk_rate;
};
#endif
diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h
index d0c33a9972b9..f218c742f08e 100644
--- a/include/sound/rt5645.h
+++ b/include/sound/rt5645.h
@@ -25,6 +25,9 @@ struct rt5645_platform_data {
bool level_trigger_irq;
/* Invert JD1_1 status polarity */
bool inv_jd1_1;
+
+ /* Value to asign to snd_soc_card.long_name */
+ const char *long_name;
};
#endif
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index 1a9191cd4bb3..9da6388c20a1 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -16,6 +16,7 @@
#ifndef __LINUX_SND_SOC_ACPI_INTEL_MATCH_H
#define __LINUX_SND_SOC_ACPI_INTEL_MATCH_H
+#include <linux/module.h>
#include <linux/stddef.h>
#include <linux/acpi.h>
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index a7d8d335b043..082224275f52 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -17,6 +17,7 @@
#include <linux/stddef.h>
#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
struct snd_soc_acpi_package_context {
char *name; /* package name */
@@ -26,17 +27,13 @@ struct snd_soc_acpi_package_context {
bool data_valid;
};
+/* codec name is used in DAIs is i2c-<HID>:00 with HID being 8 chars */
+#define SND_ACPI_I2C_ID_LEN (4 + ACPI_ID_LEN + 3 + 1)
+
#if IS_ENABLED(CONFIG_ACPI)
-/* translation fron HID to I2C name, needed for DAI codec_name */
-const char *snd_soc_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN]);
bool snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
struct snd_soc_acpi_package_context *ctx);
#else
-static inline const char *
-snd_soc_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
-{
- return NULL;
-}
static inline bool
snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
struct snd_soc_acpi_package_context *ctx)
@@ -49,9 +46,6 @@ snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
struct snd_soc_acpi_mach *
snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines);
-/* acpi check hid */
-bool snd_soc_acpi_check_hid(const u8 hid[ACPI_ID_LEN]);
-
/**
* snd_soc_acpi_mach: ACPI-based machine descriptor. Most of the fields are
* related to the hardware, except for the firmware and topology file names.
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 58acd00cae19..8ad11669e4d8 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -102,6 +102,8 @@ struct snd_compr_stream;
SNDRV_PCM_FMTBIT_S16_BE |\
SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S20_3BE |\
+ SNDRV_PCM_FMTBIT_S20_LE |\
+ SNDRV_PCM_FMTBIT_S20_BE |\
SNDRV_PCM_FMTBIT_S24_3LE |\
SNDRV_PCM_FMTBIT_S24_3BE |\
SNDRV_PCM_FMTBIT_S32_LE |\
@@ -294,9 +296,6 @@ struct snd_soc_dai {
/* DAI runtime info */
unsigned int capture_active:1; /* stream is in use */
unsigned int playback_active:1; /* stream is in use */
- unsigned int symmetric_rates:1;
- unsigned int symmetric_channels:1;
- unsigned int symmetric_samplebits:1;
unsigned int probed:1;
unsigned int active;
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 1a7323238c49..b655d987fbe7 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -494,6 +494,8 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num);
int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num);
#endif
+void snd_soc_disconnect_sync(struct device *dev);
+
struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
const char *dai_link, int stream);
struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
@@ -802,6 +804,9 @@ struct snd_soc_component_driver {
int (*suspend)(struct snd_soc_component *);
int (*resume)(struct snd_soc_component *);
+ unsigned int (*read)(struct snd_soc_component *, unsigned int);
+ int (*write)(struct snd_soc_component *, unsigned int, unsigned int);
+
/* pcm creation and destruction */
int (*pcm_new)(struct snd_soc_pcm_runtime *);
void (*pcm_free)(struct snd_pcm *);
@@ -858,12 +863,10 @@ struct snd_soc_component {
struct list_head card_aux_list; /* for auxiliary bound components */
struct list_head card_list;
- struct snd_soc_dai_driver *dai_drv;
- int num_dai;
-
const struct snd_soc_component_driver *driver;
struct list_head dai_list;
+ int num_dai;
int (*read)(struct snd_soc_component *, unsigned int, unsigned int *);
int (*write)(struct snd_soc_component *, unsigned int, unsigned int);
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 4342a329821f..c3ac5ec86519 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -193,7 +193,6 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
__print_flags(flag, "|", \
{ (1 << EXTENT_FLAG_PINNED), "PINNED" },\
{ (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\
- { (1 << EXTENT_FLAG_VACANCY), "VACANCY" },\
{ (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\
{ (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\
{ (1 << EXTENT_FLAG_FILLING), "FILLING" },\
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 8f8dd42fa57b..06c87f9f720c 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -147,7 +147,8 @@ TRACE_DEFINE_ENUM(CP_TRIMMED);
{ CP_NO_SPC_ROLL, "no space roll forward" }, \
{ CP_NODE_NEED_CP, "node needs cp" }, \
{ CP_FASTBOOT_MODE, "fastboot mode" }, \
- { CP_SPEC_LOG_NUM, "log type is 2" })
+ { CP_SPEC_LOG_NUM, "log type is 2" }, \
+ { CP_RECOVER_DIR, "dir needs recovery" })
struct victim_sel_policy;
struct f2fs_map_blocks;
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 59d40c454aa0..0b50fda80db0 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -243,6 +243,7 @@ TRACE_EVENT(rcu_exp_funnel_lock,
__entry->grphi, __entry->gpevent)
);
+#ifdef CONFIG_RCU_NOCB_CPU
/*
* Tracepoint for RCU no-CBs CPU callback handoffs. This event is intended
* to assist debugging of these handoffs.
@@ -285,6 +286,7 @@ TRACE_EVENT(rcu_nocb_wake,
TP_printk("%s %d %s", __entry->rcuname, __entry->cpu, __entry->reason)
);
+#endif
/*
* Tracepoint for tasks blocking within preemptible-RCU read-side
@@ -421,76 +423,40 @@ TRACE_EVENT(rcu_fqs,
/*
* Tracepoint for dyntick-idle entry/exit events. These take a string
- * as argument: "Start" for entering dyntick-idle mode, "End" for
- * leaving it, "--=" for events moving towards idle, and "++=" for events
- * moving away from idle. "Error on entry: not idle task" and "Error on
- * exit: not idle task" indicate that a non-idle task is erroneously
- * toying with the idle loop.
+ * as argument: "Start" for entering dyntick-idle mode, "Startirq" for
+ * entering it from irq/NMI, "End" for leaving it, "Endirq" for leaving it
+ * to irq/NMI, "--=" for events moving towards idle, and "++=" for events
+ * moving away from idle.
*
* These events also take a pair of numbers, which indicate the nesting
- * depth before and after the event of interest. Note that task-related
- * events use the upper bits of each number, while interrupt-related
- * events use the lower bits.
+ * depth before and after the event of interest, and a third number that is
+ * the ->dynticks counter. Note that task-related and interrupt-related
+ * events use two separate counters, and that the "++=" and "--=" events
+ * for irq/NMI will change the counter by two, otherwise by one.
*/
TRACE_EVENT(rcu_dyntick,
- TP_PROTO(const char *polarity, long long oldnesting, long long newnesting),
+ TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
- TP_ARGS(polarity, oldnesting, newnesting),
+ TP_ARGS(polarity, oldnesting, newnesting, dynticks),
TP_STRUCT__entry(
__field(const char *, polarity)
- __field(long long, oldnesting)
- __field(long long, newnesting)
+ __field(long, oldnesting)
+ __field(long, newnesting)
+ __field(int, dynticks)
),
TP_fast_assign(
__entry->polarity = polarity;
__entry->oldnesting = oldnesting;
__entry->newnesting = newnesting;
+ __entry->dynticks = atomic_read(&dynticks);
),
- TP_printk("%s %llx %llx", __entry->polarity,
- __entry->oldnesting, __entry->newnesting)
-);
-
-/*
- * Tracepoint for RCU preparation for idle, the goal being to get RCU
- * processing done so that the current CPU can shut off its scheduling
- * clock and enter dyntick-idle mode. One way to accomplish this is
- * to drain all RCU callbacks from this CPU, and the other is to have
- * done everything RCU requires for the current grace period. In this
- * latter case, the CPU will be awakened at the end of the current grace
- * period in order to process the remainder of its callbacks.
- *
- * These tracepoints take a string as argument:
- *
- * "No callbacks": Nothing to do, no callbacks on this CPU.
- * "In holdoff": Nothing to do, holding off after unsuccessful attempt.
- * "Begin holdoff": Attempt failed, don't retry until next jiffy.
- * "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
- * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
- * "More callbacks": Still more callbacks, try again to clear them out.
- * "Callbacks drained": All callbacks processed, off to dyntick idle!
- * "Timer": Timer fired to cause CPU to continue processing callbacks.
- * "Demigrate": Timer fired on wrong CPU, woke up correct CPU.
- * "Cleanup after idle": Idle exited, timer canceled.
- */
-TRACE_EVENT(rcu_prep_idle,
-
- TP_PROTO(const char *reason),
-
- TP_ARGS(reason),
-
- TP_STRUCT__entry(
- __field(const char *, reason)
- ),
-
- TP_fast_assign(
- __entry->reason = reason;
- ),
-
- TP_printk("%s", __entry->reason)
+ TP_printk("%s %lx %lx %#3x", __entry->polarity,
+ __entry->oldnesting, __entry->newnesting,
+ __entry->dynticks & 0xfff)
);
/*
@@ -799,8 +765,7 @@ TRACE_EVENT(rcu_barrier,
grplo, grphi, gp_tasks) do { } \
while (0)
#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
-#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
-#define trace_rcu_prep_idle(reason) do { } while (0)
+#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
do { } while (0)
diff --git a/include/trace/events/rdma.h b/include/trace/events/rdma.h
new file mode 100644
index 000000000000..aa19afc73a4e
--- /dev/null
+++ b/include/trace/events/rdma.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017 Oracle. All rights reserved.
+ */
+
+/*
+ * enum ib_event_type, from include/rdma/ib_verbs.h
+ */
+
+#define IB_EVENT_LIST \
+ ib_event(CQ_ERR) \
+ ib_event(QP_FATAL) \
+ ib_event(QP_REQ_ERR) \
+ ib_event(QP_ACCESS_ERR) \
+ ib_event(COMM_EST) \
+ ib_event(SQ_DRAINED) \
+ ib_event(PATH_MIG) \
+ ib_event(PATH_MIG_ERR) \
+ ib_event(DEVICE_FATAL) \
+ ib_event(PORT_ACTIVE) \
+ ib_event(PORT_ERR) \
+ ib_event(LID_CHANGE) \
+ ib_event(PKEY_CHANGE) \
+ ib_event(SM_CHANGE) \
+ ib_event(SRQ_ERR) \
+ ib_event(SRQ_LIMIT_REACHED) \
+ ib_event(QP_LAST_WQE_REACHED) \
+ ib_event(CLIENT_REREGISTER) \
+ ib_event(GID_CHANGE) \
+ ib_event_end(WQ_FATAL)
+
+#undef ib_event
+#undef ib_event_end
+
+#define ib_event(x) TRACE_DEFINE_ENUM(IB_EVENT_##x);
+#define ib_event_end(x) TRACE_DEFINE_ENUM(IB_EVENT_##x);
+
+IB_EVENT_LIST
+
+#undef ib_event
+#undef ib_event_end
+
+#define ib_event(x) { IB_EVENT_##x, #x },
+#define ib_event_end(x) { IB_EVENT_##x, #x }
+
+#define rdma_show_ib_event(x) \
+ __print_symbolic(x, IB_EVENT_LIST)
+
+/*
+ * enum ib_wc_status type, from include/rdma/ib_verbs.h
+ */
+#define IB_WC_STATUS_LIST \
+ ib_wc_status(SUCCESS) \
+ ib_wc_status(LOC_LEN_ERR) \
+ ib_wc_status(LOC_QP_OP_ERR) \
+ ib_wc_status(LOC_EEC_OP_ERR) \
+ ib_wc_status(LOC_PROT_ERR) \
+ ib_wc_status(WR_FLUSH_ERR) \
+ ib_wc_status(MW_BIND_ERR) \
+ ib_wc_status(BAD_RESP_ERR) \
+ ib_wc_status(LOC_ACCESS_ERR) \
+ ib_wc_status(REM_INV_REQ_ERR) \
+ ib_wc_status(REM_ACCESS_ERR) \
+ ib_wc_status(REM_OP_ERR) \
+ ib_wc_status(RETRY_EXC_ERR) \
+ ib_wc_status(RNR_RETRY_EXC_ERR) \
+ ib_wc_status(LOC_RDD_VIOL_ERR) \
+ ib_wc_status(REM_INV_RD_REQ_ERR) \
+ ib_wc_status(REM_ABORT_ERR) \
+ ib_wc_status(INV_EECN_ERR) \
+ ib_wc_status(INV_EEC_STATE_ERR) \
+ ib_wc_status(FATAL_ERR) \
+ ib_wc_status(RESP_TIMEOUT_ERR) \
+ ib_wc_status_end(GENERAL_ERR)
+
+#undef ib_wc_status
+#undef ib_wc_status_end
+
+#define ib_wc_status(x) TRACE_DEFINE_ENUM(IB_WC_##x);
+#define ib_wc_status_end(x) TRACE_DEFINE_ENUM(IB_WC_##x);
+
+IB_WC_STATUS_LIST
+
+#undef ib_wc_status
+#undef ib_wc_status_end
+
+#define ib_wc_status(x) { IB_WC_##x, #x },
+#define ib_wc_status_end(x) { IB_WC_##x, #x }
+
+#define rdma_show_wc_status(x) \
+ __print_symbolic(x, IB_WC_STATUS_LIST)
+
+/*
+ * enum rdma_cm_event_type, from include/rdma/rdma_cm.h
+ */
+#define RDMA_CM_EVENT_LIST \
+ rdma_cm_event(ADDR_RESOLVED) \
+ rdma_cm_event(ADDR_ERROR) \
+ rdma_cm_event(ROUTE_RESOLVED) \
+ rdma_cm_event(ROUTE_ERROR) \
+ rdma_cm_event(CONNECT_REQUEST) \
+ rdma_cm_event(CONNECT_RESPONSE) \
+ rdma_cm_event(CONNECT_ERROR) \
+ rdma_cm_event(UNREACHABLE) \
+ rdma_cm_event(REJECTED) \
+ rdma_cm_event(ESTABLISHED) \
+ rdma_cm_event(DISCONNECTED) \
+ rdma_cm_event(DEVICE_REMOVAL) \
+ rdma_cm_event(MULTICAST_JOIN) \
+ rdma_cm_event(MULTICAST_ERROR) \
+ rdma_cm_event(ADDR_CHANGE) \
+ rdma_cm_event_end(TIMEWAIT_EXIT)
+
+#undef rdma_cm_event
+#undef rdma_cm_event_end
+
+#define rdma_cm_event(x) TRACE_DEFINE_ENUM(RDMA_CM_EVENT_##x);
+#define rdma_cm_event_end(x) TRACE_DEFINE_ENUM(RDMA_CM_EVENT_##x);
+
+RDMA_CM_EVENT_LIST
+
+#undef rdma_cm_event
+#undef rdma_cm_event_end
+
+#define rdma_cm_event(x) { RDMA_CM_EVENT_##x, #x },
+#define rdma_cm_event_end(x) { RDMA_CM_EVENT_##x, #x }
+
+#define rdma_show_cm_event(x) \
+ __print_symbolic(x, RDMA_CM_EVENT_LIST)
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
new file mode 100644
index 000000000000..50ed3f8bf534
--- /dev/null
+++ b/include/trace/events/rpcrdma.h
@@ -0,0 +1,890 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017 Oracle. All rights reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpcrdma
+
+#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RPCRDMA_H
+
+#include <linux/tracepoint.h>
+#include <trace/events/rdma.h>
+
+/**
+ ** Event classes
+ **/
+
+DECLARE_EVENT_CLASS(xprtrdma_reply_event,
+ TP_PROTO(
+ const struct rpcrdma_rep *rep
+ ),
+
+ TP_ARGS(rep),
+
+ TP_STRUCT__entry(
+ __field(const void *, rep)
+ __field(const void *, r_xprt)
+ __field(u32, xid)
+ __field(u32, version)
+ __field(u32, proc)
+ ),
+
+ TP_fast_assign(
+ __entry->rep = rep;
+ __entry->r_xprt = rep->rr_rxprt;
+ __entry->xid = be32_to_cpu(rep->rr_xid);
+ __entry->version = be32_to_cpu(rep->rr_vers);
+ __entry->proc = be32_to_cpu(rep->rr_proc);
+ ),
+
+ TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
+ __entry->r_xprt, __entry->xid, __entry->rep,
+ __entry->version, __entry->proc
+ )
+);
+
+#define DEFINE_REPLY_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_reply_event, name, \
+ TP_PROTO( \
+ const struct rpcrdma_rep *rep \
+ ), \
+ TP_ARGS(rep))
+
+DECLARE_EVENT_CLASS(xprtrdma_rxprt,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt
+ ),
+
+ TP_ARGS(r_xprt),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p",
+ __get_str(addr), __get_str(port), __entry->r_xprt
+ )
+);
+
+#define DEFINE_RXPRT_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_rxprt, name, \
+ TP_PROTO( \
+ const struct rpcrdma_xprt *r_xprt \
+ ), \
+ TP_ARGS(r_xprt))
+
+DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
+ TP_PROTO(
+ const struct rpc_task *task,
+ unsigned int pos,
+ struct rpcrdma_mr *mr,
+ int nsegs
+ ),
+
+ TP_ARGS(task, pos, mr, nsegs),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, mr)
+ __field(unsigned int, pos)
+ __field(int, nents)
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ __field(int, nsegs)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->mr = mr;
+ __entry->pos = pos;
+ __entry->nents = mr->mr_nents;
+ __entry->handle = mr->mr_handle;
+ __entry->length = mr->mr_length;
+ __entry->offset = mr->mr_offset;
+ __entry->nsegs = nsegs;
+ ),
+
+ TP_printk("task:%u@%u mr=%p pos=%u %u@0x%016llx:0x%08x (%s)",
+ __entry->task_id, __entry->client_id, __entry->mr,
+ __entry->pos, __entry->length,
+ (unsigned long long)__entry->offset, __entry->handle,
+ __entry->nents < __entry->nsegs ? "more" : "last"
+ )
+);
+
+#define DEFINE_RDCH_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_rdch_event, name, \
+ TP_PROTO( \
+ const struct rpc_task *task, \
+ unsigned int pos, \
+ struct rpcrdma_mr *mr, \
+ int nsegs \
+ ), \
+ TP_ARGS(task, pos, mr, nsegs))
+
+DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
+ TP_PROTO(
+ const struct rpc_task *task,
+ struct rpcrdma_mr *mr,
+ int nsegs
+ ),
+
+ TP_ARGS(task, mr, nsegs),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, mr)
+ __field(int, nents)
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ __field(int, nsegs)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->mr = mr;
+ __entry->nents = mr->mr_nents;
+ __entry->handle = mr->mr_handle;
+ __entry->length = mr->mr_length;
+ __entry->offset = mr->mr_offset;
+ __entry->nsegs = nsegs;
+ ),
+
+ TP_printk("task:%u@%u mr=%p %u@0x%016llx:0x%08x (%s)",
+ __entry->task_id, __entry->client_id, __entry->mr,
+ __entry->length, (unsigned long long)__entry->offset,
+ __entry->handle,
+ __entry->nents < __entry->nsegs ? "more" : "last"
+ )
+);
+
+#define DEFINE_WRCH_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_wrch_event, name, \
+ TP_PROTO( \
+ const struct rpc_task *task, \
+ struct rpcrdma_mr *mr, \
+ int nsegs \
+ ), \
+ TP_ARGS(task, mr, nsegs))
+
+TRACE_DEFINE_ENUM(FRWR_IS_INVALID);
+TRACE_DEFINE_ENUM(FRWR_IS_VALID);
+TRACE_DEFINE_ENUM(FRWR_FLUSHED_FR);
+TRACE_DEFINE_ENUM(FRWR_FLUSHED_LI);
+
+#define xprtrdma_show_frwr_state(x) \
+ __print_symbolic(x, \
+ { FRWR_IS_INVALID, "INVALID" }, \
+ { FRWR_IS_VALID, "VALID" }, \
+ { FRWR_FLUSHED_FR, "FLUSHED_FR" }, \
+ { FRWR_FLUSHED_LI, "FLUSHED_LI" })
+
+DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpcrdma_frwr *frwr
+ ),
+
+ TP_ARGS(wc, frwr),
+
+ TP_STRUCT__entry(
+ __field(const void *, mr)
+ __field(unsigned int, state)
+ __field(unsigned int, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
+ __entry->state = frwr->fr_state;
+ __entry->status = wc->status;
+ __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
+ ),
+
+ TP_printk(
+ "mr=%p state=%s: %s (%u/0x%x)",
+ __entry->mr, xprtrdma_show_frwr_state(__entry->state),
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+#define DEFINE_FRWR_DONE_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_frwr_done, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpcrdma_frwr *frwr \
+ ), \
+ TP_ARGS(wc, frwr))
+
+DECLARE_EVENT_CLASS(xprtrdma_mr,
+ TP_PROTO(
+ const struct rpcrdma_mr *mr
+ ),
+
+ TP_ARGS(mr),
+
+ TP_STRUCT__entry(
+ __field(const void *, mr)
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ ),
+
+ TP_fast_assign(
+ __entry->mr = mr;
+ __entry->handle = mr->mr_handle;
+ __entry->length = mr->mr_length;
+ __entry->offset = mr->mr_offset;
+ ),
+
+ TP_printk("mr=%p %u@0x%016llx:0x%08x",
+ __entry->mr, __entry->length,
+ (unsigned long long)__entry->offset,
+ __entry->handle
+ )
+);
+
+#define DEFINE_MR_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_mr, name, \
+ TP_PROTO( \
+ const struct rpcrdma_mr *mr \
+ ), \
+ TP_ARGS(mr))
+
+DECLARE_EVENT_CLASS(xprtrdma_cb_event,
+ TP_PROTO(
+ const struct rpc_rqst *rqst
+ ),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ __field(const void *, rqst)
+ __field(const void *, rep)
+ __field(const void *, req)
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ __entry->rqst = rqst;
+ __entry->req = rpcr_to_rdmar(rqst);
+ __entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ ),
+
+ TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
+ __entry->xid, __entry->rqst, __entry->req, __entry->rep
+ )
+);
+
+#define DEFINE_CB_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_cb_event, name, \
+ TP_PROTO( \
+ const struct rpc_rqst *rqst \
+ ), \
+ TP_ARGS(rqst))
+
+/**
+ ** Connection events
+ **/
+
+TRACE_EVENT(xprtrdma_conn_upcall,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ struct rdma_cm_event *event
+ ),
+
+ TP_ARGS(r_xprt, event),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(unsigned int, event)
+ __field(int, status)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->event = event->event;
+ __entry->status = event->status;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
+ __get_str(addr), __get_str(port),
+ __entry->r_xprt, rdma_show_cm_event(__entry->event),
+ __entry->event, __entry->status
+ )
+);
+
+TRACE_EVENT(xprtrdma_disconnect,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ int status
+ ),
+
+ TP_ARGS(r_xprt, status),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(int, status)
+ __field(int, connected)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->status = status;
+ __entry->connected = r_xprt->rx_ep.rep_connected;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected",
+ __get_str(addr), __get_str(port),
+ __entry->r_xprt, __entry->status,
+ __entry->connected == 1 ? "still " : "dis"
+ )
+);
+
+DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
+DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
+DEFINE_RXPRT_EVENT(xprtrdma_create);
+DEFINE_RXPRT_EVENT(xprtrdma_destroy);
+DEFINE_RXPRT_EVENT(xprtrdma_remove);
+DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
+DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
+DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc);
+
+TRACE_EVENT(xprtrdma_qp_error,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ const struct ib_event *event
+ ),
+
+ TP_ARGS(r_xprt, event),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(unsigned int, event)
+ __string(name, event->device->name)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->event = event->event;
+ __assign_str(name, event->device->name);
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
+ __get_str(addr), __get_str(port), __entry->r_xprt,
+ __get_str(name), rdma_show_ib_event(__entry->event),
+ __entry->event
+ )
+);
+
+/**
+ ** Call events
+ **/
+
+TRACE_EVENT(xprtrdma_createmrs,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ unsigned int count
+ ),
+
+ TP_ARGS(r_xprt, count),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(unsigned int, count)
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->count = count;
+ ),
+
+ TP_printk("r_xprt=%p: created %u MRs",
+ __entry->r_xprt, __entry->count
+ )
+);
+
+DEFINE_RXPRT_EVENT(xprtrdma_nomrs);
+
+DEFINE_RDCH_EVENT(xprtrdma_read_chunk);
+DEFINE_WRCH_EVENT(xprtrdma_write_chunk);
+DEFINE_WRCH_EVENT(xprtrdma_reply_chunk);
+
+TRACE_DEFINE_ENUM(rpcrdma_noch);
+TRACE_DEFINE_ENUM(rpcrdma_readch);
+TRACE_DEFINE_ENUM(rpcrdma_areadch);
+TRACE_DEFINE_ENUM(rpcrdma_writech);
+TRACE_DEFINE_ENUM(rpcrdma_replych);
+
+#define xprtrdma_show_chunktype(x) \
+ __print_symbolic(x, \
+ { rpcrdma_noch, "inline" }, \
+ { rpcrdma_readch, "read list" }, \
+ { rpcrdma_areadch, "*read list" }, \
+ { rpcrdma_writech, "write list" }, \
+ { rpcrdma_replych, "reply chunk" })
+
+TRACE_EVENT(xprtrdma_marshal,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ unsigned int hdrlen,
+ unsigned int rtype,
+ unsigned int wtype
+ ),
+
+ TP_ARGS(rqst, hdrlen, rtype, wtype),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __field(unsigned int, hdrlen)
+ __field(unsigned int, headlen)
+ __field(unsigned int, pagelen)
+ __field(unsigned int, taillen)
+ __field(unsigned int, rtype)
+ __field(unsigned int, wtype)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->hdrlen = hdrlen;
+ __entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
+ __entry->pagelen = rqst->rq_snd_buf.page_len;
+ __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
+ __entry->rtype = rtype;
+ __entry->wtype = wtype;
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->hdrlen,
+ __entry->headlen, __entry->pagelen, __entry->taillen,
+ xprtrdma_show_chunktype(__entry->rtype),
+ xprtrdma_show_chunktype(__entry->wtype)
+ )
+);
+
+TRACE_EVENT(xprtrdma_post_send,
+ TP_PROTO(
+ const struct rpcrdma_req *req,
+ int status
+ ),
+
+ TP_ARGS(req, status),
+
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(int, num_sge)
+ __field(bool, signaled)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
+ __entry->signaled = req->rl_sendctx->sc_wr.send_flags &
+ IB_SEND_SIGNALED;
+ __entry->status = status;
+ ),
+
+ TP_printk("req=%p, %d SGEs%s, status=%d",
+ __entry->req, __entry->num_sge,
+ (__entry->signaled ? ", signaled" : ""),
+ __entry->status
+ )
+);
+
+TRACE_EVENT(xprtrdma_post_recv,
+ TP_PROTO(
+ const struct rpcrdma_rep *rep,
+ int status
+ ),
+
+ TP_ARGS(rep, status),
+
+ TP_STRUCT__entry(
+ __field(const void *, rep)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->rep = rep;
+ __entry->status = status;
+ ),
+
+ TP_printk("rep=%p status=%d",
+ __entry->rep, __entry->status
+ )
+);
+
+/**
+ ** Completion events
+ **/
+
+TRACE_EVENT(xprtrdma_wc_send,
+ TP_PROTO(
+ const struct rpcrdma_sendctx *sc,
+ const struct ib_wc *wc
+ ),
+
+ TP_ARGS(sc, wc),
+
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(unsigned int, unmap_count)
+ __field(unsigned int, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->req = sc->sc_req;
+ __entry->unmap_count = sc->sc_unmap_count;
+ __entry->status = wc->status;
+ __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
+ ),
+
+ TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
+ __entry->req, __entry->unmap_count,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+TRACE_EVENT(xprtrdma_wc_receive,
+ TP_PROTO(
+ const struct rpcrdma_rep *rep,
+ const struct ib_wc *wc
+ ),
+
+ TP_ARGS(rep, wc),
+
+ TP_STRUCT__entry(
+ __field(const void *, rep)
+ __field(unsigned int, byte_len)
+ __field(unsigned int, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->rep = rep;
+ __entry->byte_len = wc->byte_len;
+ __entry->status = wc->status;
+ __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
+ ),
+
+ TP_printk("rep=%p, %u bytes: %s (%u/0x%x)",
+ __entry->rep, __entry->byte_len,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
+DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
+DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
+
+DEFINE_MR_EVENT(xprtrdma_localinv);
+DEFINE_MR_EVENT(xprtrdma_dma_unmap);
+DEFINE_MR_EVENT(xprtrdma_remoteinv);
+DEFINE_MR_EVENT(xprtrdma_recover_mr);
+
+/**
+ ** Reply events
+ **/
+
+TRACE_EVENT(xprtrdma_reply,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct rpcrdma_rep *rep,
+ const struct rpcrdma_req *req,
+ unsigned int credits
+ ),
+
+ TP_ARGS(task, rep, req, credits),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, rep)
+ __field(const void *, req)
+ __field(u32, xid)
+ __field(unsigned int, credits)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->rep = rep;
+ __entry->req = req;
+ __entry->xid = be32_to_cpu(rep->rr_xid);
+ __entry->credits = credits;
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->credits, __entry->rep, __entry->req
+ )
+);
+
+TRACE_EVENT(xprtrdma_defer_cmp,
+ TP_PROTO(
+ const struct rpcrdma_rep *rep
+ ),
+
+ TP_ARGS(rep),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, rep)
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rep->rr_rqst->rq_task->tk_pid;
+ __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
+ __entry->rep = rep;
+ __entry->xid = be32_to_cpu(rep->rr_xid);
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x rep=%p",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->rep
+ )
+);
+
+DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
+DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
+DEFINE_REPLY_EVENT(xprtrdma_reply_short);
+DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
+
+TRACE_EVENT(xprtrdma_fixup,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ int len,
+ int hdrlen
+ ),
+
+ TP_ARGS(rqst, len, hdrlen),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, base)
+ __field(int, len)
+ __field(int, hdrlen)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->base = rqst->rq_rcv_buf.head[0].iov_base;
+ __entry->len = len;
+ __entry->hdrlen = hdrlen;
+ ),
+
+ TP_printk("task:%u@%u base=%p len=%d hdrlen=%d",
+ __entry->task_id, __entry->client_id,
+ __entry->base, __entry->len, __entry->hdrlen
+ )
+);
+
+TRACE_EVENT(xprtrdma_fixup_pg,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ int pageno,
+ const void *pos,
+ int len,
+ int curlen
+ ),
+
+ TP_ARGS(rqst, pageno, pos, len, curlen),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, pos)
+ __field(int, pageno)
+ __field(int, len)
+ __field(int, curlen)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->pos = pos;
+ __entry->pageno = pageno;
+ __entry->len = len;
+ __entry->curlen = curlen;
+ ),
+
+ TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d",
+ __entry->task_id, __entry->client_id,
+ __entry->pageno, __entry->pos, __entry->len, __entry->curlen
+ )
+);
+
+TRACE_EVENT(xprtrdma_decode_seg,
+ TP_PROTO(
+ u32 handle,
+ u32 length,
+ u64 offset
+ ),
+
+ TP_ARGS(handle, length, offset),
+
+ TP_STRUCT__entry(
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ ),
+
+ TP_fast_assign(
+ __entry->handle = handle;
+ __entry->length = length;
+ __entry->offset = offset;
+ ),
+
+ TP_printk("%u@0x%016llx:0x%08x",
+ __entry->length, (unsigned long long)__entry->offset,
+ __entry->handle
+ )
+);
+
+/**
+ ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
+ **/
+
+TRACE_EVENT(xprtrdma_allocate,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct rpcrdma_req *req
+ ),
+
+ TP_ARGS(task, req),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, req)
+ __field(const void *, rep)
+ __field(size_t, callsize)
+ __field(size_t, rcvsize)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->req = req;
+ __entry->rep = req ? req->rl_reply : NULL;
+ __entry->callsize = task->tk_rqstp->rq_callsize;
+ __entry->rcvsize = task->tk_rqstp->rq_rcvsize;
+ ),
+
+ TP_printk("task:%u@%u req=%p rep=%p (%zu, %zu)",
+ __entry->task_id, __entry->client_id,
+ __entry->req, __entry->rep,
+ __entry->callsize, __entry->rcvsize
+ )
+);
+
+TRACE_EVENT(xprtrdma_rpc_done,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct rpcrdma_req *req
+ ),
+
+ TP_ARGS(task, req),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, req)
+ __field(const void *, rep)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->req = req;
+ __entry->rep = req->rl_reply;
+ ),
+
+ TP_printk("task:%u@%u req=%p rep=%p",
+ __entry->task_id, __entry->client_id,
+ __entry->req, __entry->rep
+ )
+);
+
+DEFINE_RXPRT_EVENT(xprtrdma_noreps);
+
+/**
+ ** Callback events
+ **/
+
+TRACE_EVENT(xprtrdma_cb_setup,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ unsigned int reqs
+ ),
+
+ TP_ARGS(r_xprt, reqs),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(unsigned int, reqs)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->reqs = reqs;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
+ __get_str(addr), __get_str(port),
+ __entry->r_xprt, __entry->reqs
+ )
+);
+
+DEFINE_CB_EVENT(xprtrdma_cb_call);
+DEFINE_CB_EVENT(xprtrdma_cb_reply);
+
+#endif /* _TRACE_RPCRDMA_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 8c153f68509e..970c91a83173 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -32,7 +32,7 @@ DECLARE_EVENT_CLASS(rpc_task_status,
__entry->status = task->tk_status;
),
- TP_printk("task:%u@%u, status %d",
+ TP_printk("task:%u@%u status=%d",
__entry->task_id, __entry->client_id,
__entry->status)
);
@@ -66,7 +66,7 @@ TRACE_EVENT(rpc_connect_status,
__entry->status = status;
),
- TP_printk("task:%u@%u, status %d",
+ TP_printk("task:%u@%u status=%d",
__entry->task_id, __entry->client_id,
__entry->status)
);
@@ -175,7 +175,7 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
),
TP_fast_assign(
- __entry->client_id = clnt->cl_clid;
+ __entry->client_id = clnt ? clnt->cl_clid : -1;
__entry->task_id = task->tk_pid;
__entry->timeout = task->tk_timeout;
__entry->runstate = task->tk_runstate;
@@ -184,7 +184,7 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
__assign_str(q_name, rpc_qname(q));
),
- TP_printk("task:%u@%u flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s",
+ TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s",
__entry->task_id, __entry->client_id,
__entry->flags,
__entry->runstate,
@@ -390,6 +390,10 @@ DECLARE_EVENT_CLASS(rpc_xprt_event,
__entry->status)
);
+DEFINE_EVENT(rpc_xprt_event, xprt_timer,
+ TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
+ TP_ARGS(xprt, xid, status));
+
DEFINE_EVENT(rpc_xprt_event, xprt_lookup_rqst,
TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
TP_ARGS(xprt, xid, status));
diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h
index 78946640fe03..135e5421f003 100644
--- a/include/trace/events/thermal.h
+++ b/include/trace/events/thermal.h
@@ -94,9 +94,9 @@ TRACE_EVENT(thermal_zone_trip,
#ifdef CONFIG_CPU_THERMAL
TRACE_EVENT(thermal_power_cpu_get_power,
TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load,
- size_t load_len, u32 dynamic_power, u32 static_power),
+ size_t load_len, u32 dynamic_power),
- TP_ARGS(cpus, freq, load, load_len, dynamic_power, static_power),
+ TP_ARGS(cpus, freq, load, load_len, dynamic_power),
TP_STRUCT__entry(
__bitmask(cpumask, num_possible_cpus())
@@ -104,7 +104,6 @@ TRACE_EVENT(thermal_power_cpu_get_power,
__dynamic_array(u32, load, load_len)
__field(size_t, load_len )
__field(u32, dynamic_power )
- __field(u32, static_power )
),
TP_fast_assign(
@@ -115,13 +114,12 @@ TRACE_EVENT(thermal_power_cpu_get_power,
load_len * sizeof(*load));
__entry->load_len = load_len;
__entry->dynamic_power = dynamic_power;
- __entry->static_power = static_power;
),
- TP_printk("cpus=%s freq=%lu load={%s} dynamic_power=%d static_power=%d",
+ TP_printk("cpus=%s freq=%lu load={%s} dynamic_power=%d",
__get_bitmask(cpumask), __entry->freq,
__print_array(__get_dynamic_array(load), __entry->load_len, 4),
- __entry->dynamic_power, __entry->static_power)
+ __entry->dynamic_power)
);
TRACE_EVENT(thermal_power_cpu_limit,
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 16e305e69f34..a57e4ee989d6 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -136,6 +136,24 @@ DEFINE_EVENT(timer_class, timer_cancel,
TP_ARGS(timer)
);
+#define decode_clockid(type) \
+ __print_symbolic(type, \
+ { CLOCK_REALTIME, "CLOCK_REALTIME" }, \
+ { CLOCK_MONOTONIC, "CLOCK_MONOTONIC" }, \
+ { CLOCK_BOOTTIME, "CLOCK_BOOTTIME" }, \
+ { CLOCK_TAI, "CLOCK_TAI" })
+
+#define decode_hrtimer_mode(mode) \
+ __print_symbolic(mode, \
+ { HRTIMER_MODE_ABS, "ABS" }, \
+ { HRTIMER_MODE_REL, "REL" }, \
+ { HRTIMER_MODE_ABS_PINNED, "ABS|PINNED" }, \
+ { HRTIMER_MODE_REL_PINNED, "REL|PINNED" }, \
+ { HRTIMER_MODE_ABS_SOFT, "ABS|SOFT" }, \
+ { HRTIMER_MODE_REL_SOFT, "REL|SOFT" }, \
+ { HRTIMER_MODE_ABS_PINNED_SOFT, "ABS|PINNED|SOFT" }, \
+ { HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" })
+
/**
* hrtimer_init - called when the hrtimer is initialized
* @hrtimer: pointer to struct hrtimer
@@ -162,10 +180,8 @@ TRACE_EVENT(hrtimer_init,
),
TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
- __entry->clockid == CLOCK_REALTIME ?
- "CLOCK_REALTIME" : "CLOCK_MONOTONIC",
- __entry->mode == HRTIMER_MODE_ABS ?
- "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
+ decode_clockid(__entry->clockid),
+ decode_hrtimer_mode(__entry->mode))
);
/**
@@ -174,15 +190,16 @@ TRACE_EVENT(hrtimer_init,
*/
TRACE_EVENT(hrtimer_start,
- TP_PROTO(struct hrtimer *hrtimer),
+ TP_PROTO(struct hrtimer *hrtimer, enum hrtimer_mode mode),
- TP_ARGS(hrtimer),
+ TP_ARGS(hrtimer, mode),
TP_STRUCT__entry(
__field( void *, hrtimer )
__field( void *, function )
__field( s64, expires )
__field( s64, softexpires )
+ __field( enum hrtimer_mode, mode )
),
TP_fast_assign(
@@ -190,12 +207,14 @@ TRACE_EVENT(hrtimer_start,
__entry->function = hrtimer->function;
__entry->expires = hrtimer_get_expires(hrtimer);
__entry->softexpires = hrtimer_get_softexpires(hrtimer);
+ __entry->mode = mode;
),
- TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
- __entry->hrtimer, __entry->function,
+ TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu "
+ "mode=%s", __entry->hrtimer, __entry->function,
(unsigned long long) __entry->expires,
- (unsigned long long) __entry->softexpires)
+ (unsigned long long) __entry->softexpires,
+ decode_hrtimer_mode(__entry->mode))
);
/**
diff --git a/include/uapi/asm-generic/poll.h b/include/uapi/asm-generic/poll.h
index fefb3d2c3fac..639fade14b23 100644
--- a/include/uapi/asm-generic/poll.h
+++ b/include/uapi/asm-generic/poll.h
@@ -3,35 +3,49 @@
#define __ASM_GENERIC_POLL_H
/* These are specified by iBCS2 */
-#define POLLIN 0x0001
-#define POLLPRI 0x0002
-#define POLLOUT 0x0004
-#define POLLERR 0x0008
-#define POLLHUP 0x0010
-#define POLLNVAL 0x0020
+#define POLLIN (__force __poll_t)0x0001
+#define POLLPRI (__force __poll_t)0x0002
+#define POLLOUT (__force __poll_t)0x0004
+#define POLLERR (__force __poll_t)0x0008
+#define POLLHUP (__force __poll_t)0x0010
+#define POLLNVAL (__force __poll_t)0x0020
/* The rest seem to be more-or-less nonstandard. Check them! */
-#define POLLRDNORM 0x0040
-#define POLLRDBAND 0x0080
+#define POLLRDNORM (__force __poll_t)0x0040
+#define POLLRDBAND (__force __poll_t)0x0080
#ifndef POLLWRNORM
-#define POLLWRNORM 0x0100
+#define POLLWRNORM (__force __poll_t)0x0100
#endif
#ifndef POLLWRBAND
-#define POLLWRBAND 0x0200
+#define POLLWRBAND (__force __poll_t)0x0200
#endif
#ifndef POLLMSG
-#define POLLMSG 0x0400
+#define POLLMSG (__force __poll_t)0x0400
#endif
#ifndef POLLREMOVE
-#define POLLREMOVE 0x1000
+#define POLLREMOVE (__force __poll_t)0x1000
#endif
#ifndef POLLRDHUP
-#define POLLRDHUP 0x2000
+#define POLLRDHUP (__force __poll_t)0x2000
#endif
-#define POLLFREE 0x4000 /* currently only for epoll */
+#define POLLFREE (__force __poll_t)0x4000 /* currently only for epoll */
-#define POLL_BUSY_LOOP 0x8000
+#define POLL_BUSY_LOOP (__force __poll_t)0x8000
+
+#ifdef __KERNEL__
+#ifndef __ARCH_HAS_MANGLED_POLL
+static inline __u16 mangle_poll(__poll_t val)
+{
+ return (__force __u16)val;
+}
+
+static inline __poll_t demangle_poll(__u16 v)
+{
+ return (__force __poll_t)v;
+}
+#endif
+#endif
struct pollfd {
int fd;
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index e447283b8f52..254afc31e3be 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -23,10 +23,6 @@ typedef union sigval {
#define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int))
#endif
-#ifndef __ARCH_SI_UID_T
-#define __ARCH_SI_UID_T __kernel_uid32_t
-#endif
-
/*
* The default "si_band" type is "long", as specified by POSIX.
* However, some architectures want to override this to "int"
@@ -44,12 +40,15 @@ typedef union sigval {
#define __ARCH_SI_ATTRIBUTES
#endif
-#ifndef HAVE_ARCH_SIGINFO_T
-
typedef struct siginfo {
int si_signo;
+#ifndef __ARCH_HAS_SWAPPED_SIGINFO
int si_errno;
int si_code;
+#else
+ int si_code;
+ int si_errno;
+#endif
union {
int _pad[SI_PAD_SIZE];
@@ -57,14 +56,13 @@ typedef struct siginfo {
/* kill() */
struct {
__kernel_pid_t _pid; /* sender's pid */
- __ARCH_SI_UID_T _uid; /* sender's uid */
+ __kernel_uid32_t _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
struct {
__kernel_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
- char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
sigval_t _sigval; /* same as below */
int _sys_private; /* not to be passed to user */
} _timer;
@@ -72,34 +70,47 @@ typedef struct siginfo {
/* POSIX.1b signals */
struct {
__kernel_pid_t _pid; /* sender's pid */
- __ARCH_SI_UID_T _uid; /* sender's uid */
+ __kernel_uid32_t _uid; /* sender's uid */
sigval_t _sigval;
} _rt;
/* SIGCHLD */
struct {
__kernel_pid_t _pid; /* which child */
- __ARCH_SI_UID_T _uid; /* sender's uid */
+ __kernel_uid32_t _uid; /* sender's uid */
int _status; /* exit code */
__ARCH_SI_CLOCK_T _utime;
__ARCH_SI_CLOCK_T _stime;
} _sigchld;
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
struct {
void __user *_addr; /* faulting insn/memory ref. */
#ifdef __ARCH_SI_TRAPNO
int _trapno; /* TRAP # which caused the signal */
#endif
- short _addr_lsb; /* LSB of the reported address */
+#ifdef __ia64__
+ int _imm; /* immediate value for "break" */
+ unsigned int _flags; /* see ia64 si_flags */
+ unsigned long _isr; /* isr */
+#endif
union {
+ /*
+ * used when si_code=BUS_MCEERR_AR or
+ * used when si_code=BUS_MCEERR_AO
+ */
+ short _addr_lsb; /* LSB of the reported address */
/* used when si_code=SEGV_BNDERR */
struct {
+ short _dummy_bnd;
void __user *_lower;
void __user *_upper;
} _addr_bnd;
/* used when si_code=SEGV_PKUERR */
- __u32 _pkey;
+ struct {
+ short _dummy_pkey;
+ __u32 _pkey;
+ } _addr_pkey;
};
} _sigfault;
@@ -118,10 +129,6 @@ typedef struct siginfo {
} _sifields;
} __ARCH_SI_ATTRIBUTES siginfo_t;
-/* If the arch shares siginfo, then it has SIGSYS. */
-#define __ARCH_SIGSYS
-#endif
-
/*
* How these fields are to be accessed.
*/
@@ -143,14 +150,12 @@ typedef struct siginfo {
#define si_addr_lsb _sifields._sigfault._addr_lsb
#define si_lower _sifields._sigfault._addr_bnd._lower
#define si_upper _sifields._sigfault._addr_bnd._upper
-#define si_pkey _sifields._sigfault._pkey
+#define si_pkey _sifields._sigfault._addr_pkey._pkey
#define si_band _sifields._sigpoll._band
#define si_fd _sifields._sigpoll._fd
-#ifdef __ARCH_SIGSYS
#define si_call_addr _sifields._sigsys._call_addr
#define si_syscall _sifields._sigsys._syscall
#define si_arch _sifields._sigsys._arch
-#endif
/*
* si_code values
@@ -165,6 +170,7 @@ typedef struct siginfo {
#define SI_SIGIO -5 /* sent by queued SIGIO */
#define SI_TKILL -6 /* sent by tkill system call */
#define SI_DETHREAD -7 /* sent by execve() killing subsidiary threads */
+#define SI_ASYNCNL -60 /* sent by glibc async name lookup completion */
#define SI_FROMUSER(siptr) ((siptr)->si_code <= 0)
#define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0)
@@ -173,14 +179,34 @@ typedef struct siginfo {
* SIGILL si_codes
*/
#define ILL_ILLOPC 1 /* illegal opcode */
+#ifdef __bfin__
+# define ILL_ILLPARAOP 2 /* illegal opcode combine */
+#endif
#define ILL_ILLOPN 2 /* illegal operand */
#define ILL_ILLADR 3 /* illegal addressing mode */
#define ILL_ILLTRP 4 /* illegal trap */
+#ifdef __bfin__
+# define ILL_ILLEXCPT 4 /* unrecoverable exception */
+#endif
#define ILL_PRVOPC 5 /* privileged opcode */
#define ILL_PRVREG 6 /* privileged register */
#define ILL_COPROC 7 /* coprocessor error */
#define ILL_BADSTK 8 /* internal stack error */
-#define NSIGILL 8
+#ifdef __bfin__
+# define ILL_CPLB_VI 9 /* D/I CPLB protect violation */
+# define ILL_CPLB_MISS 10 /* D/I CPLB miss */
+# define ILL_CPLB_MULHIT 11 /* D/I CPLB multiple hit */
+#endif
+#ifdef __tile__
+# define ILL_DBLFLT 9 /* double fault */
+# define ILL_HARDWALL 10 /* user networks hardwall violation */
+#endif
+#ifdef __ia64__
+# define ILL_BADIADDR 9 /* unimplemented instruction address */
+# define __ILL_BREAK 10 /* illegal break */
+# define __ILL_BNDMOD 11 /* bundle-update (modification) in progress */
+#endif
+#define NSIGILL 11
/*
* SIGFPE si_codes
@@ -193,15 +219,33 @@ typedef struct siginfo {
#define FPE_FLTRES 6 /* floating point inexact result */
#define FPE_FLTINV 7 /* floating point invalid operation */
#define FPE_FLTSUB 8 /* subscript out of range */
-#define NSIGFPE 8
+#ifdef __frv__
+# define FPE_MDAOVF 9 /* media overflow */
+#endif
+#ifdef __ia64__
+# define __FPE_DECOVF 9 /* decimal overflow */
+# define __FPE_DECDIV 10 /* decimal division by zero */
+# define __FPE_DECERR 11 /* packed decimal error */
+# define __FPE_INVASC 12 /* invalid ASCII digit */
+# define __FPE_INVDEC 13 /* invalid decimal digit */
+#endif
+#define NSIGFPE 13
/*
* SIGSEGV si_codes
*/
#define SEGV_MAPERR 1 /* address not mapped to object */
#define SEGV_ACCERR 2 /* invalid permissions for mapped object */
-#define SEGV_BNDERR 3 /* failed address bound checks */
-#define SEGV_PKUERR 4 /* failed protection key checks */
+#ifdef __bfin__
+# define SEGV_STACKFLOW 3 /* stack overflow */
+#else
+# define SEGV_BNDERR 3 /* failed address bound checks */
+#endif
+#ifdef __ia64__
+# define __SEGV_PSTKOVF 4 /* paragraph stack overflow */
+#else
+# define SEGV_PKUERR 4 /* failed protection key checks */
+#endif
#define NSIGSEGV 4
/*
@@ -210,8 +254,12 @@ typedef struct siginfo {
#define BUS_ADRALN 1 /* invalid address alignment */
#define BUS_ADRERR 2 /* non-existent physical address */
#define BUS_OBJERR 3 /* object specific hardware error */
+#ifdef __bfin__
+# define BUS_OPFETCH 4 /* error from instruction fetch */
+#else
/* hardware memory error consumed on a machine check: action required */
-#define BUS_MCEERR_AR 4
+# define BUS_MCEERR_AR 4
+#endif
/* hardware memory error detected in process but not consumed: action optional*/
#define BUS_MCEERR_AO 5
#define NSIGBUS 5
@@ -223,9 +271,20 @@ typedef struct siginfo {
#define TRAP_TRACE 2 /* process trace trap */
#define TRAP_BRANCH 3 /* process taken branch trap */
#define TRAP_HWBKPT 4 /* hardware breakpoint/watchpoint */
+#ifdef __bfin__
+# define TRAP_STEP 1 /* single-step breakpoint */
+# define TRAP_TRACEFLOW 2 /* trace buffer overflow */
+# define TRAP_WATCHPT 3 /* watchpoint match */
+# define TRAP_ILLTRAP 4 /* illegal trap */
+#endif
#define NSIGTRAP 4
/*
+ * There are an additional set of SIGTRAP si_codes used by ptrace
+ * that of the form: ((PTRACE_EVENT_XXX << 8) | SIGTRAP)
+ */
+
+/*
* SIGCHLD si_codes
*/
#define CLD_EXITED 1 /* child has exited */
diff --git a/include/uapi/linux/arm_sdei.h b/include/uapi/linux/arm_sdei.h
new file mode 100644
index 000000000000..af0630ba5437
--- /dev/null
+++ b/include/uapi/linux/arm_sdei.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (C) 2017 Arm Ltd. */
+#ifndef _UAPI_LINUX_ARM_SDEI_H
+#define _UAPI_LINUX_ARM_SDEI_H
+
+#define SDEI_1_0_FN_BASE 0xC4000020
+#define SDEI_1_0_MASK 0xFFFFFFE0
+#define SDEI_1_0_FN(n) (SDEI_1_0_FN_BASE + (n))
+
+#define SDEI_1_0_FN_SDEI_VERSION SDEI_1_0_FN(0x00)
+#define SDEI_1_0_FN_SDEI_EVENT_REGISTER SDEI_1_0_FN(0x01)
+#define SDEI_1_0_FN_SDEI_EVENT_ENABLE SDEI_1_0_FN(0x02)
+#define SDEI_1_0_FN_SDEI_EVENT_DISABLE SDEI_1_0_FN(0x03)
+#define SDEI_1_0_FN_SDEI_EVENT_CONTEXT SDEI_1_0_FN(0x04)
+#define SDEI_1_0_FN_SDEI_EVENT_COMPLETE SDEI_1_0_FN(0x05)
+#define SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME SDEI_1_0_FN(0x06)
+#define SDEI_1_0_FN_SDEI_EVENT_UNREGISTER SDEI_1_0_FN(0x07)
+#define SDEI_1_0_FN_SDEI_EVENT_STATUS SDEI_1_0_FN(0x08)
+#define SDEI_1_0_FN_SDEI_EVENT_GET_INFO SDEI_1_0_FN(0x09)
+#define SDEI_1_0_FN_SDEI_EVENT_ROUTING_SET SDEI_1_0_FN(0x0A)
+#define SDEI_1_0_FN_SDEI_PE_MASK SDEI_1_0_FN(0x0B)
+#define SDEI_1_0_FN_SDEI_PE_UNMASK SDEI_1_0_FN(0x0C)
+#define SDEI_1_0_FN_SDEI_INTERRUPT_BIND SDEI_1_0_FN(0x0D)
+#define SDEI_1_0_FN_SDEI_INTERRUPT_RELEASE SDEI_1_0_FN(0x0E)
+#define SDEI_1_0_FN_SDEI_PRIVATE_RESET SDEI_1_0_FN(0x11)
+#define SDEI_1_0_FN_SDEI_SHARED_RESET SDEI_1_0_FN(0x12)
+
+#define SDEI_VERSION_MAJOR_SHIFT 48
+#define SDEI_VERSION_MAJOR_MASK 0x7fff
+#define SDEI_VERSION_MINOR_SHIFT 32
+#define SDEI_VERSION_MINOR_MASK 0xffff
+#define SDEI_VERSION_VENDOR_SHIFT 0
+#define SDEI_VERSION_VENDOR_MASK 0xffffffff
+
+#define SDEI_VERSION_MAJOR(x) (x>>SDEI_VERSION_MAJOR_SHIFT & SDEI_VERSION_MAJOR_MASK)
+#define SDEI_VERSION_MINOR(x) (x>>SDEI_VERSION_MINOR_SHIFT & SDEI_VERSION_MINOR_MASK)
+#define SDEI_VERSION_VENDOR(x) (x>>SDEI_VERSION_VENDOR_SHIFT & SDEI_VERSION_VENDOR_MASK)
+
+/* SDEI return values */
+#define SDEI_SUCCESS 0
+#define SDEI_NOT_SUPPORTED -1
+#define SDEI_INVALID_PARAMETERS -2
+#define SDEI_DENIED -3
+#define SDEI_PENDING -5
+#define SDEI_OUT_OF_RESOURCE -10
+
+/* EVENT_REGISTER flags */
+#define SDEI_EVENT_REGISTER_RM_ANY 0
+#define SDEI_EVENT_REGISTER_RM_PE 1
+
+/* EVENT_STATUS return value bits */
+#define SDEI_EVENT_STATUS_RUNNING 2
+#define SDEI_EVENT_STATUS_ENABLED 1
+#define SDEI_EVENT_STATUS_REGISTERED 0
+
+/* EVENT_COMPLETE status values */
+#define SDEI_EV_HANDLED 0
+#define SDEI_EV_FAILED 1
+
+/* GET_INFO values */
+#define SDEI_EVENT_INFO_EV_TYPE 0
+#define SDEI_EVENT_INFO_EV_SIGNALED 1
+#define SDEI_EVENT_INFO_EV_PRIORITY 2
+#define SDEI_EVENT_INFO_EV_ROUTING_MODE 3
+#define SDEI_EVENT_INFO_EV_ROUTING_AFF 4
+
+/* and their results */
+#define SDEI_EVENT_TYPE_PRIVATE 0
+#define SDEI_EVENT_TYPE_SHARED 1
+#define SDEI_EVENT_PRIORITY_NORMAL 0
+#define SDEI_EVENT_PRIORITY_CRITICAL 1
+
+#endif /* _UAPI_LINUX_ARM_SDEI_H */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index ce615b75e855..c8d99b9ca550 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -33,7 +33,12 @@ struct btrfs_ioctl_vol_args {
char name[BTRFS_PATH_NAME_MAX + 1];
};
-#define BTRFS_DEVICE_PATH_NAME_MAX 1024
+#define BTRFS_DEVICE_PATH_NAME_MAX 1024
+#define BTRFS_SUBVOL_NAME_MAX 4039
+
+#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
+#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
+#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2)
#define BTRFS_DEVICE_SPEC_BY_ID (1ULL << 3)
@@ -101,11 +106,7 @@ struct btrfs_ioctl_qgroup_limit_args {
* - BTRFS_IOC_SUBVOL_GETFLAGS
* - BTRFS_IOC_SUBVOL_SETFLAGS
*/
-#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
-#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
-#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2)
-#define BTRFS_SUBVOL_NAME_MAX 4039
struct btrfs_ioctl_vol_args_v2 {
__s64 fd;
__u64 transid;
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index 6d6e5da51527..aff1356c2bb8 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -456,6 +456,8 @@ struct btrfs_free_space_header {
#define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32)
#define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33)
+#define BTRFS_SUPER_FLAG_METADUMP_V2 (1ULL << 34)
+#define BTRFS_SUPER_FLAG_CHANGING_FSID (1ULL << 35)
/*
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 4199f8acbce5..d2a8313fabd7 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -377,7 +377,11 @@ typedef int __bitwise __kernel_rwf_t;
/* per-IO, return -EAGAIN if operation would block */
#define RWF_NOWAIT ((__force __kernel_rwf_t)0x00000008)
+/* per-IO O_APPEND */
+#define RWF_APPEND ((__force __kernel_rwf_t)0x00000010)
+
/* mask of flags supported by the kernel */
-#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT)
+#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\
+ RWF_APPEND)
#endif /* _UAPI_LINUX_FS_H */
diff --git a/include/uapi/linux/gfs2_ondisk.h b/include/uapi/linux/gfs2_ondisk.h
index 5156bad77b47..2dc10a034de1 100644
--- a/include/uapi/linux/gfs2_ondisk.h
+++ b/include/uapi/linux/gfs2_ondisk.h
@@ -187,10 +187,19 @@ struct gfs2_rgrp {
__be32 rg_flags;
__be32 rg_free;
__be32 rg_dinodes;
- __be32 __pad;
+ union {
+ __be32 __pad;
+ __be32 rg_skip; /* Distance to the next rgrp in fs blocks */
+ };
__be64 rg_igeneration;
-
- __u8 rg_reserved[80]; /* Several fields from gfs1 now reserved */
+ /* The following 3 fields are duplicated from gfs2_rindex to reduce
+ reliance on the rindex */
+ __be64 rg_data0; /* First data location */
+ __be32 rg_data; /* Number of data blocks in rgrp */
+ __be32 rg_bitbytes; /* Number of bytes in data bitmaps */
+ __be32 rg_crc; /* crc32 of the structure with this field 0 */
+
+ __u8 rg_reserved[60]; /* Several fields from gfs1 now reserved */
};
/*
@@ -394,7 +403,36 @@ struct gfs2_ea_header {
* Log header structure
*/
-#define GFS2_LOG_HEAD_UNMOUNT 0x00000001 /* log is clean */
+#define GFS2_LOG_HEAD_UNMOUNT 0x00000001 /* log is clean */
+#define GFS2_LOG_HEAD_FLUSH_NORMAL 0x00000002 /* normal log flush */
+#define GFS2_LOG_HEAD_FLUSH_SYNC 0x00000004 /* Sync log flush */
+#define GFS2_LOG_HEAD_FLUSH_SHUTDOWN 0x00000008 /* Shutdown log flush */
+#define GFS2_LOG_HEAD_FLUSH_FREEZE 0x00000010 /* Freeze flush */
+#define GFS2_LOG_HEAD_RECOVERY 0x00000020 /* Journal recovery */
+#define GFS2_LOG_HEAD_USERSPACE 0x80000000 /* Written by gfs2-utils */
+
+/* Log flush callers */
+#define GFS2_LFC_SHUTDOWN 0x00000100
+#define GFS2_LFC_JDATA_WPAGES 0x00000200
+#define GFS2_LFC_SET_FLAGS 0x00000400
+#define GFS2_LFC_AIL_EMPTY_GL 0x00000800
+#define GFS2_LFC_AIL_FLUSH 0x00001000
+#define GFS2_LFC_RGRP_GO_SYNC 0x00002000
+#define GFS2_LFC_INODE_GO_SYNC 0x00004000
+#define GFS2_LFC_INODE_GO_INVAL 0x00008000
+#define GFS2_LFC_FREEZE_GO_SYNC 0x00010000
+#define GFS2_LFC_KILL_SB 0x00020000
+#define GFS2_LFC_DO_SYNC 0x00040000
+#define GFS2_LFC_INPLACE_RESERVE 0x00080000
+#define GFS2_LFC_WRITE_INODE 0x00100000
+#define GFS2_LFC_MAKE_FS_RO 0x00200000
+#define GFS2_LFC_SYNC_FS 0x00400000
+#define GFS2_LFC_EVICT_INODE 0x00800000
+#define GFS2_LFC_TRANS_END 0x01000000
+#define GFS2_LFC_LOGD_JFLUSH_REQD 0x02000000
+#define GFS2_LFC_LOGD_AIL_FLUSH_REQD 0x04000000
+
+#define LH_V1_SIZE (offsetofend(struct gfs2_log_header, lh_hash))
struct gfs2_log_header {
struct gfs2_meta_header lh_header;
@@ -403,7 +441,21 @@ struct gfs2_log_header {
__be32 lh_flags; /* GFS2_LOG_HEAD_... */
__be32 lh_tail; /* Block number of log tail */
__be32 lh_blkno;
- __be32 lh_hash;
+ __be32 lh_hash; /* crc up to here with this field 0 */
+
+ /* Version 2 additional fields start here */
+ __be32 lh_crc; /* crc32c from lh_nsec to end of block */
+ __be32 lh_nsec; /* Nanoseconds of timestamp */
+ __be64 lh_sec; /* Seconds of timestamp */
+ __be64 lh_addr; /* Block addr of this log header (absolute) */
+ __be64 lh_jinode; /* Journal inode number */
+ __be64 lh_statfs_addr; /* Local statfs inode number */
+ __be64 lh_quota_addr; /* Local quota change inode number */
+
+ /* Statfs local changes (i.e. diff from global statfs) */
+ __be64 lh_local_total;
+ __be64 lh_local_free;
+ __be64 lh_local_dinodes;
};
/*
diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
index 42d1a434af29..f9a1be7fc696 100644
--- a/include/uapi/linux/lightnvm.h
+++ b/include/uapi/linux/lightnvm.h
@@ -75,14 +75,23 @@ struct nvm_ioctl_create_simple {
__u32 lun_end;
};
+struct nvm_ioctl_create_extended {
+ __u16 lun_begin;
+ __u16 lun_end;
+ __u16 op;
+ __u16 rsv;
+};
+
enum {
NVM_CONFIG_TYPE_SIMPLE = 0,
+ NVM_CONFIG_TYPE_EXTENDED = 1,
};
struct nvm_ioctl_create_conf {
__u32 type;
union {
struct nvm_ioctl_create_simple s;
+ struct nvm_ioctl_create_extended e;
};
};
diff --git a/include/uapi/linux/nfs.h b/include/uapi/linux/nfs.h
index 057d22a48416..946cb62d64b0 100644
--- a/include/uapi/linux/nfs.h
+++ b/include/uapi/linux/nfs.h
@@ -12,6 +12,7 @@
#define NFS_PROGRAM 100003
#define NFS_PORT 2049
+#define NFS_RDMA_PORT 20049
#define NFS_MAXDATA 8192
#define NFS_MAXPATHLEN 1024
#define NFS_MAXNAMLEN 255
diff --git a/include/uapi/linux/nubus.h b/include/uapi/linux/nubus.h
index f3776cc80f4d..48031e7858f1 100644
--- a/include/uapi/linux/nubus.h
+++ b/include/uapi/linux/nubus.h
@@ -221,27 +221,4 @@ enum nubus_display_res_id {
NUBUS_RESID_SIXTHMODE = 0x0085
};
-struct nubus_dir
-{
- unsigned char *base;
- unsigned char *ptr;
- int done;
- int mask;
-};
-
-struct nubus_dirent
-{
- unsigned char *base;
- unsigned char type;
- __u32 data; /* Actually 24bits used */
- int mask;
-};
-
-
-/* We'd like to get rid of this eventually. Only daynaport.c uses it now. */
-static inline void *nubus_slot_addr(int slot)
-{
- return (void *)(0xF0000000|(slot<<24));
-}
-
#endif /* _UAPILINUX_NUBUS_H */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 769533696483..e0739a1aa4b2 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -634,9 +634,12 @@ struct perf_event_mmap_page {
*/
#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
/*
- * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
- * different events so can reuse the same bit position.
- * Ditto PERF_RECORD_MISC_SWITCH_OUT.
+ * Following PERF_RECORD_MISC_* are used on different
+ * events, so can reuse the same bit position:
+ *
+ * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events
+ * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event
+ * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events
*/
#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
@@ -886,6 +889,7 @@ enum perf_event_type {
* struct perf_event_header header;
* u32 pid;
* u32 tid;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_ITRACE_START = 12,
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h
index e3939e00980b..e46d82b91166 100644
--- a/include/uapi/linux/ptrace.h
+++ b/include/uapi/linux/ptrace.h
@@ -66,6 +66,12 @@ struct ptrace_peeksiginfo_args {
#define PTRACE_SETSIGMASK 0x420b
#define PTRACE_SECCOMP_GET_FILTER 0x420c
+#define PTRACE_SECCOMP_GET_METADATA 0x420d
+
+struct seccomp_metadata {
+ unsigned long filter_off; /* Input: which filter */
+ unsigned int flags; /* Output: filter's flags */
+};
/* Read signals from a shared (process wide) queue */
#define PTRACE_PEEKSIGINFO_SHARED (1 << 0)
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 30a9e51bbb1e..22627f80063e 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -49,5 +49,10 @@
*/
#define SCHED_FLAG_RESET_ON_FORK 0x01
#define SCHED_FLAG_RECLAIM 0x02
+#define SCHED_FLAG_DL_OVERRUN 0x04
+
+#define SCHED_FLAG_ALL (SCHED_FLAG_RESET_ON_FORK | \
+ SCHED_FLAG_RECLAIM | \
+ SCHED_FLAG_DL_OVERRUN)
#endif /* _UAPI_LINUX_SCHED_H */
diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h
index e3d1d0c78f3c..cd4f0b897a48 100644
--- a/include/uapi/linux/types.h
+++ b/include/uapi/linux/types.h
@@ -49,5 +49,11 @@ typedef __u32 __bitwise __wsum;
#define __aligned_be64 __be64 __attribute__((aligned(8)))
#define __aligned_le64 __le64 __attribute__((aligned(8)))
+#ifdef __CHECK_POLL
+typedef unsigned __bitwise __poll_t;
+#else
+typedef unsigned __poll_t;
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _UAPI_LINUX_TYPES_H */
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index 398a514ee446..db54115be044 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -82,6 +82,15 @@ struct bnxt_re_qp_resp {
__u32 rsvd;
};
+struct bnxt_re_srq_req {
+ __u64 srqva;
+ __u64 srq_handle;
+};
+
+struct bnxt_re_srq_resp {
+ __u32 srqid;
+};
+
enum bnxt_re_shpg_offt {
BNXT_RE_BEG_RESV_OFFT = 0x00,
BNXT_RE_AVID_OFFT = 0x10,
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 7e11bb8651b6..04d0e67b1312 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -402,13 +402,18 @@ struct ib_uverbs_create_cq {
__u64 driver_data[0];
};
+enum ib_uverbs_ex_create_cq_flags {
+ IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
+ IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
+};
+
struct ib_uverbs_ex_create_cq {
__u64 user_handle;
__u32 cqe;
__u32 comp_vector;
__s32 comp_channel;
__u32 comp_mask;
- __u32 flags;
+ __u32 flags; /* bitmask of ib_uverbs_ex_create_cq_flags */
__u32 reserved;
};
@@ -449,7 +454,7 @@ struct ib_uverbs_wc {
__u32 vendor_err;
__u32 byte_len;
union {
- __u32 imm_data;
+ __be32 imm_data;
__u32 invalidate_rkey;
} ex;
__u32 qp_num;
@@ -765,7 +770,7 @@ struct ib_uverbs_send_wr {
__u32 opcode;
__u32 send_flags;
union {
- __u32 imm_data;
+ __be32 imm_data;
__u32 invalidate_rkey;
} ex;
union {
diff --git a/include/uapi/rdma/mlx4-abi.h b/include/uapi/rdma/mlx4-abi.h
index 224b52b6279c..7f9c37346613 100644
--- a/include/uapi/rdma/mlx4-abi.h
+++ b/include/uapi/rdma/mlx4-abi.h
@@ -97,8 +97,8 @@ struct mlx4_ib_create_srq_resp {
};
struct mlx4_ib_create_qp_rss {
- __u64 rx_hash_fields_mask;
- __u8 rx_hash_function;
+ __u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */
+ __u8 rx_hash_function; /* Use enum mlx4_ib_rx_hash_function_flags */
__u8 reserved[7];
__u8 rx_hash_key[40];
__u32 comp_mask;
@@ -152,7 +152,8 @@ enum mlx4_ib_rx_hash_fields {
MLX4_IB_RX_HASH_SRC_PORT_TCP = 1 << 4,
MLX4_IB_RX_HASH_DST_PORT_TCP = 1 << 5,
MLX4_IB_RX_HASH_SRC_PORT_UDP = 1 << 6,
- MLX4_IB_RX_HASH_DST_PORT_UDP = 1 << 7
+ MLX4_IB_RX_HASH_DST_PORT_UDP = 1 << 7,
+ MLX4_IB_RX_HASH_INNER = 1ULL << 31,
};
#endif /* MLX4_ABI_USER_H */
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index a33e0517d3fd..1111aa4e7c1e 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -41,6 +41,9 @@ enum {
MLX5_QP_FLAG_SIGNATURE = 1 << 0,
MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2,
+ MLX5_QP_FLAG_BFREG_INDEX = 1 << 3,
+ MLX5_QP_FLAG_TYPE_DCT = 1 << 4,
+ MLX5_QP_FLAG_TYPE_DCI = 1 << 5,
};
enum {
@@ -121,10 +124,12 @@ struct mlx5_ib_alloc_ucontext_resp {
__u8 cqe_version;
__u8 cmds_supp_uhw;
__u8 eth_min_inline;
- __u8 reserved2;
+ __u8 clock_info_versions;
__u64 hca_core_clock_offset;
__u32 log_uar_size;
__u32 num_uars_per_page;
+ __u32 num_dyn_bfregs;
+ __u32 reserved3;
};
struct mlx5_ib_alloc_pd_resp {
@@ -280,8 +285,11 @@ struct mlx5_ib_create_qp {
__u32 rq_wqe_shift;
__u32 flags;
__u32 uidx;
- __u32 reserved0;
- __u64 sq_buf_addr;
+ __u32 bfreg_index;
+ union {
+ __u64 sq_buf_addr;
+ __u64 access_key;
+ };
};
/* RX Hash function flags */
@@ -307,7 +315,7 @@ enum mlx5_rx_hash_fields {
MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
MLX5_RX_HASH_DST_PORT_UDP = 1 << 7,
/* Save bits for future fields */
- MLX5_RX_HASH_INNER = 1 << 31
+ MLX5_RX_HASH_INNER = (1UL << 31),
};
struct mlx5_ib_create_qp_rss {
@@ -354,6 +362,11 @@ struct mlx5_ib_create_ah_resp {
__u8 reserved[6];
};
+struct mlx5_ib_modify_qp_resp {
+ __u32 response_length;
+ __u32 dctn;
+};
+
struct mlx5_ib_create_wq_resp {
__u32 response_length;
__u32 reserved;
@@ -368,4 +381,36 @@ struct mlx5_ib_modify_wq {
__u32 comp_mask;
__u32 reserved;
};
+
+struct mlx5_ib_clock_info {
+ __u32 sign;
+ __u32 resv;
+ __u64 nsec;
+ __u64 cycles;
+ __u64 frac;
+ __u32 mult;
+ __u32 shift;
+ __u64 mask;
+ __u64 overflow_period;
+};
+
+enum mlx5_ib_mmap_cmd {
+ MLX5_IB_MMAP_REGULAR_PAGE = 0,
+ MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
+ MLX5_IB_MMAP_WC_PAGE = 2,
+ MLX5_IB_MMAP_NC_PAGE = 3,
+ /* 5 is chosen in order to be compatible with old versions of libmlx5 */
+ MLX5_IB_MMAP_CORE_CLOCK = 5,
+ MLX5_IB_MMAP_ALLOC_WC = 6,
+ MLX5_IB_MMAP_CLOCK_INFO = 7,
+};
+
+enum {
+ MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1,
+};
+
+/* Bit indexes for the mlx5_alloc_ucontext_resp.clock_info_versions bitmap */
+enum {
+ MLX5_IB_CLOCK_INFO_V1 = 0,
+};
#endif /* MLX5_ABI_USER_H */
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index cc002e316d09..17e59bec169e 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -236,6 +236,10 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_PORT_NEW,
RDMA_NLDEV_CMD_PORT_DEL,
+ RDMA_NLDEV_CMD_RES_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_QP_GET, /* can dump */
+
RDMA_NLDEV_NUM_OPS
};
@@ -303,6 +307,51 @@ enum rdma_nldev_attr {
RDMA_NLDEV_ATTR_DEV_NODE_TYPE, /* u8 */
+ RDMA_NLDEV_ATTR_RES_SUMMARY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, /* string */
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, /* u64 */
+
+ RDMA_NLDEV_ATTR_RES_QP, /* nested table */
+ RDMA_NLDEV_ATTR_RES_QP_ENTRY, /* nested table */
+ /*
+ * Local QPN
+ */
+ RDMA_NLDEV_ATTR_RES_LQPN, /* u32 */
+ /*
+ * Remote QPN,
+ * Applicable for RC and UC only IBTA 11.2.5.3 QUERY QUEUE PAIR
+ */
+ RDMA_NLDEV_ATTR_RES_RQPN, /* u32 */
+ /*
+ * Receive Queue PSN,
+ * Applicable for RC and UC only 11.2.5.3 QUERY QUEUE PAIR
+ */
+ RDMA_NLDEV_ATTR_RES_RQ_PSN, /* u32 */
+ /*
+ * Send Queue PSN
+ */
+ RDMA_NLDEV_ATTR_RES_SQ_PSN, /* u32 */
+ RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, /* u8 */
+ /*
+ * QP types as visible to RDMA/core, the reserved QPT
+ * are not exported through this interface.
+ */
+ RDMA_NLDEV_ATTR_RES_TYPE, /* u8 */
+ RDMA_NLDEV_ATTR_RES_STATE, /* u8 */
+ /*
+ * Process ID which created object,
+ * in case of kernel origin, PID won't exist.
+ */
+ RDMA_NLDEV_ATTR_RES_PID, /* u32 */
+ /*
+ * The name of process created following resource.
+ * It will exist only for kernel objects.
+ * For user created objects, the user is supposed
+ * to read /proc/PID/comm file.
+ */
+ RDMA_NLDEV_ATTR_RES_KERN_NAME, /* string */
+
RDMA_NLDEV_ATTR_MAX
};
#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index aaa352f2f110..02ca0d0f1eb7 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -52,12 +52,14 @@
#define PVRDMA_UVERBS_ABI_VERSION 3 /* ABI Version. */
#define PVRDMA_UAR_HANDLE_MASK 0x00FFFFFF /* Bottom 24 bits. */
#define PVRDMA_UAR_QP_OFFSET 0 /* QP doorbell. */
-#define PVRDMA_UAR_QP_SEND BIT(30) /* Send bit. */
-#define PVRDMA_UAR_QP_RECV BIT(31) /* Recv bit. */
+#define PVRDMA_UAR_QP_SEND (1 << 30) /* Send bit. */
+#define PVRDMA_UAR_QP_RECV (1 << 31) /* Recv bit. */
#define PVRDMA_UAR_CQ_OFFSET 4 /* CQ doorbell. */
-#define PVRDMA_UAR_CQ_ARM_SOL BIT(29) /* Arm solicited bit. */
-#define PVRDMA_UAR_CQ_ARM BIT(30) /* Arm bit. */
-#define PVRDMA_UAR_CQ_POLL BIT(31) /* Poll bit. */
+#define PVRDMA_UAR_CQ_ARM_SOL (1 << 29) /* Arm solicited bit. */
+#define PVRDMA_UAR_CQ_ARM (1 << 30) /* Arm bit. */
+#define PVRDMA_UAR_CQ_POLL (1 << 31) /* Poll bit. */
+#define PVRDMA_UAR_SRQ_OFFSET 8 /* SRQ doorbell. */
+#define PVRDMA_UAR_SRQ_RECV (1 << 30) /* Recv bit. */
enum pvrdma_wr_opcode {
PVRDMA_WR_RDMA_WRITE,
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index c227ccba60ae..07d61583fd02 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -214,6 +214,11 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_IMA_ADPCM ((__force snd_pcm_format_t) 22)
#define SNDRV_PCM_FORMAT_MPEG ((__force snd_pcm_format_t) 23)
#define SNDRV_PCM_FORMAT_GSM ((__force snd_pcm_format_t) 24)
+#define SNDRV_PCM_FORMAT_S20_LE ((__force snd_pcm_format_t) 25) /* in four bytes, LSB justified */
+#define SNDRV_PCM_FORMAT_S20_BE ((__force snd_pcm_format_t) 26) /* in four bytes, LSB justified */
+#define SNDRV_PCM_FORMAT_U20_LE ((__force snd_pcm_format_t) 27) /* in four bytes, LSB justified */
+#define SNDRV_PCM_FORMAT_U20_BE ((__force snd_pcm_format_t) 28) /* in four bytes, LSB justified */
+/* gap in the numbering for a future standard linear format */
#define SNDRV_PCM_FORMAT_SPECIAL ((__force snd_pcm_format_t) 31)
#define SNDRV_PCM_FORMAT_S24_3LE ((__force snd_pcm_format_t) 32) /* in three bytes */
#define SNDRV_PCM_FORMAT_S24_3BE ((__force snd_pcm_format_t) 33) /* in three bytes */
@@ -248,6 +253,8 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_FLOAT SNDRV_PCM_FORMAT_FLOAT_LE
#define SNDRV_PCM_FORMAT_FLOAT64 SNDRV_PCM_FORMAT_FLOAT64_LE
#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE
+#define SNDRV_PCM_FORMAT_S20 SNDRV_PCM_FORMAT_S20_LE
+#define SNDRV_PCM_FORMAT_U20 SNDRV_PCM_FORMAT_U20_LE
#endif
#ifdef SNDRV_BIG_ENDIAN
#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_BE
@@ -259,6 +266,8 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_FLOAT SNDRV_PCM_FORMAT_FLOAT_BE
#define SNDRV_PCM_FORMAT_FLOAT64 SNDRV_PCM_FORMAT_FLOAT64_BE
#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME SNDRV_PCM_FORMAT_IEC958_SUBFRAME_BE
+#define SNDRV_PCM_FORMAT_S20 SNDRV_PCM_FORMAT_S20_BE
+#define SNDRV_PCM_FORMAT_U20 SNDRV_PCM_FORMAT_U20_BE
#endif
typedef int __bitwise snd_pcm_subformat_t;
diff --git a/include/uapi/sound/snd_sst_tokens.h b/include/uapi/sound/snd_sst_tokens.h
index 326054a72bc7..8ba0112e5336 100644
--- a/include/uapi/sound/snd_sst_tokens.h
+++ b/include/uapi/sound/snd_sst_tokens.h
@@ -222,6 +222,17 @@
* %SKL_TKN_MM_U32_NUM_IN_FMT: Number of input formats
* %SKL_TKN_MM_U32_NUM_OUT_FMT: Number of output formats
*
+ * %SKL_TKN_U32_ASTATE_IDX: Table Index for the A-State entry to be filled
+ * with kcps and clock source
+ *
+ * %SKL_TKN_U32_ASTATE_COUNT: Number of valid entries in A-State table
+ *
+ * %SKL_TKN_U32_ASTATE_KCPS: Specifies the core load threshold (in kilo
+ * cycles per second) below which DSP is clocked
+ * from source specified by clock source.
+ *
+ * %SKL_TKN_U32_ASTATE_CLK_SRC: Clock source for A-State entry
+ *
* module_id and loadable flags dont have tokens as these values will be
* read from the DSP FW manifest
*
@@ -309,7 +320,11 @@ enum SKL_TKNS {
SKL_TKN_MM_U32_NUM_IN_FMT,
SKL_TKN_MM_U32_NUM_OUT_FMT,
- SKL_TKN_MAX = SKL_TKN_MM_U32_NUM_OUT_FMT,
+ SKL_TKN_U32_ASTATE_IDX,
+ SKL_TKN_U32_ASTATE_COUNT,
+ SKL_TKN_U32_ASTATE_KCPS,
+ SKL_TKN_U32_ASTATE_CLK_SRC,
+ SKL_TKN_MAX = SKL_TKN_U32_ASTATE_CLK_SRC,
};
#endif