diff options
Diffstat (limited to 'include/linux')
95 files changed, 3343 insertions, 463 deletions
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index 8cced632ecd0..edcee9f5335a 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -70,8 +70,10 @@ struct amd_iommu *get_amd_iommu(unsigned int idx); #ifdef CONFIG_KVM_AMD_SEV int amd_iommu_snp_disable(void); +extern bool amd_iommu_sev_tio_supported(void); #else static inline int amd_iommu_snp_disable(void) { return 0; } +static inline bool amd_iommu_sev_tio_supported(void) { return false; } #endif #endif /* _ASM_X86_AMD_IOMMU_H */ diff --git a/include/linux/annotate.h b/include/linux/annotate.h index 7c10d34d198c..2f1599c9e573 100644 --- a/include/linux/annotate.h +++ b/include/linux/annotate.h @@ -6,41 +6,34 @@ #ifdef CONFIG_OBJTOOL -#ifndef __ASSEMBLY__ - #define __ASM_ANNOTATE(section, label, type) \ - ".pushsection " section ",\"M\", @progbits, 8\n\t" \ - ".long " __stringify(label) " - .\n\t" \ - ".long " __stringify(type) "\n\t" \ - ".popsection\n\t" + .pushsection section, "M", @progbits, 8; \ + .long label - ., type; \ + .popsection + +#ifndef __ASSEMBLY__ #define ASM_ANNOTATE_LABEL(label, type) \ - __ASM_ANNOTATE(".discard.annotate_insn", label, type) + __stringify(__ASM_ANNOTATE(.discard.annotate_insn, label, type)) #define ASM_ANNOTATE(type) \ - "911:\n\t" \ - ASM_ANNOTATE_LABEL(911b, type) + "911: " \ + __stringify(__ASM_ANNOTATE(.discard.annotate_insn, 911b, type)) #define ASM_ANNOTATE_DATA(type) \ - "912:\n\t" \ - __ASM_ANNOTATE(".discard.annotate_data", 912b, type) + "912: " \ + __stringify(__ASM_ANNOTATE(.discard.annotate_data, 912b, type)) #else /* __ASSEMBLY__ */ -.macro __ANNOTATE section, type -.Lhere_\@: - .pushsection \section, "M", @progbits, 8 - .long .Lhere_\@ - . - .long \type - .popsection -.endm - .macro ANNOTATE type - __ANNOTATE ".discard.annotate_insn", \type +.Lhere_\@: + __ASM_ANNOTATE(.discard.annotate_insn, .Lhere_\@, \type) .endm .macro ANNOTATE_DATA type - __ANNOTATE ".discard.annotate_data", \type +.Lhere_\@: + __ASM_ANNOTATE(.discard.annotate_data, .Lhere_\@, \type) .endm #endif /* __ASSEMBLY__ */ diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 0c2a8b846c20..ebd7f8935f96 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -80,6 +80,11 @@ extern struct cpu_topology cpu_topology[NR_CPUS]; #define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) #define topology_cluster_cpumask(cpu) (&cpu_topology[cpu].cluster_sibling) #define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling) + +#ifndef arch_cpu_is_threaded +#define arch_cpu_is_threaded() (0) +#endif + void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); const struct cpumask *cpu_coregroup_mask(int cpu); diff --git a/include/linux/ata.h b/include/linux/ata.h index c9013e472aa3..54b416e26995 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -29,6 +29,7 @@ enum { ATA_MAX_SECTORS_128 = 128, ATA_MAX_SECTORS = 256, ATA_MAX_SECTORS_1024 = 1024, + ATA_MAX_SECTORS_8191 = 8191, ATA_MAX_SECTORS_LBA48 = 65535,/* avoid count to be 0000h */ ATA_MAX_SECTORS_TAPE = 65535, ATA_MAX_TRIM_RNUM = 64, /* 512-byte payload / (6-byte LBA + 2-byte range per entry) */ diff --git a/include/linux/base64.h b/include/linux/base64.h index 660d4cb1ef31..a2c6c9222da3 100644 --- a/include/linux/base64.h +++ b/include/linux/base64.h @@ -8,9 +8,15 @@ #include <linux/types.h> +enum base64_variant { + BASE64_STD, /* RFC 4648 (standard) */ + BASE64_URLSAFE, /* RFC 4648 (base64url) */ + BASE64_IMAP, /* RFC 3501 */ +}; + #define BASE64_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3) -int base64_encode(const u8 *src, int len, char *dst); -int base64_decode(const char *src, int len, u8 *dst); +int base64_encode(const u8 *src, int len, char *dst, bool padding, enum base64_variant variant); +int base64_decode(const char *src, int len, u8 *dst, bool padding, enum base64_variant variant); #endif /* _LINUX_BASE64_H */ diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 5355f8f806a9..126dc5b380af 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -17,6 +17,7 @@ * FIELD_{GET,PREP} macros take as first parameter shifted mask * from which they extract the base mask and shift amount. * Mask must be a compilation time constant. + * field_{get,prep} are variants that take a non-const mask. * * Example: * @@ -60,7 +61,7 @@ #define __bf_cast_unsigned(type, x) ((__unsigned_scalar_typeof(type))(x)) -#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \ +#define __BF_FIELD_CHECK_MASK(_mask, _val, _pfx) \ ({ \ BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \ _pfx "mask is not constant"); \ @@ -69,13 +70,33 @@ ~((_mask) >> __bf_shf(_mask)) & \ (0 + (_val)) : 0, \ _pfx "value too large for the field"); \ - BUILD_BUG_ON_MSG(__bf_cast_unsigned(_mask, _mask) > \ - __bf_cast_unsigned(_reg, ~0ull), \ - _pfx "type of reg too small for mask"); \ __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \ (1ULL << __bf_shf(_mask))); \ }) +#define __BF_FIELD_CHECK_REG(mask, reg, pfx) \ + BUILD_BUG_ON_MSG(__bf_cast_unsigned(mask, mask) > \ + __bf_cast_unsigned(reg, ~0ull), \ + pfx "type of reg too small for mask") + +#define __BF_FIELD_CHECK(mask, reg, val, pfx) \ + ({ \ + __BF_FIELD_CHECK_MASK(mask, val, pfx); \ + __BF_FIELD_CHECK_REG(mask, reg, pfx); \ + }) + +#define __FIELD_PREP(mask, val, pfx) \ + ({ \ + __BF_FIELD_CHECK_MASK(mask, val, pfx); \ + ((typeof(mask))(val) << __bf_shf(mask)) & (mask); \ + }) + +#define __FIELD_GET(mask, reg, pfx) \ + ({ \ + __BF_FIELD_CHECK_MASK(mask, 0U, pfx); \ + (typeof(mask))(((reg) & (mask)) >> __bf_shf(mask)); \ + }) + /** * FIELD_MAX() - produce the maximum value representable by a field * @_mask: shifted mask defining the field's length and position @@ -112,8 +133,8 @@ */ #define FIELD_PREP(_mask, _val) \ ({ \ - __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \ - ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \ + __BF_FIELD_CHECK_REG(_mask, 0ULL, "FIELD_PREP: "); \ + __FIELD_PREP(_mask, _val, "FIELD_PREP: "); \ }) #define __BF_CHECK_POW2(n) BUILD_BUG_ON_ZERO(((n) & ((n) - 1)) != 0) @@ -152,8 +173,8 @@ */ #define FIELD_GET(_mask, _reg) \ ({ \ - __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \ - (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ + __BF_FIELD_CHECK_REG(_mask, _reg, "FIELD_GET: "); \ + __FIELD_GET(_mask, _reg, "FIELD_GET: "); \ }) /** @@ -220,4 +241,62 @@ __MAKE_OP(64) #undef __MAKE_OP #undef ____MAKE_OP +#define __field_prep(mask, val) \ + ({ \ + __auto_type __mask = (mask); \ + typeof(__mask) __val = (val); \ + unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \ + __ffs(__mask) : __ffs64(__mask); \ + (__val << __shift) & __mask; \ + }) + +#define __field_get(mask, reg) \ + ({ \ + __auto_type __mask = (mask); \ + typeof(__mask) __reg = (reg); \ + unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \ + __ffs(__mask) : __ffs64(__mask); \ + (__reg & __mask) >> __shift; \ + }) + +/** + * field_prep() - prepare a bitfield element + * @mask: shifted mask defining the field's length and position, must be + * non-zero + * @val: value to put in the field + * + * Return: field value masked and shifted to its final destination + * + * field_prep() masks and shifts up the value. The result should be + * combined with other fields of the bitfield using logical OR. + * Unlike FIELD_PREP(), @mask is not limited to a compile-time constant. + * Typical usage patterns are a value stored in a table, or calculated by + * shifting a constant by a variable number of bits. + * If you want to ensure that @mask is a compile-time constant, please use + * FIELD_PREP() directly instead. + */ +#define field_prep(mask, val) \ + (__builtin_constant_p(mask) ? __FIELD_PREP(mask, val, "field_prep: ") \ + : __field_prep(mask, val)) + +/** + * field_get() - extract a bitfield element + * @mask: shifted mask defining the field's length and position, must be + * non-zero + * @reg: value of entire bitfield + * + * Return: extracted field value + * + * field_get() extracts the field specified by @mask from the + * bitfield passed in as @reg by masking and shifting it down. + * Unlike FIELD_GET(), @mask is not limited to a compile-time constant. + * Typical usage patterns are a value stored in a table, or calculated by + * shifting a constant by a variable number of bits. + * If you want to ensure that @mask is a compile-time constant, please use + * FIELD_GET() directly instead. + */ +#define field_get(mask, reg) \ + (__builtin_constant_p(mask) ? __FIELD_GET(mask, reg, "field_get: ") \ + : __field_get(mask, reg)) + #endif diff --git a/include/linux/cache_coherency.h b/include/linux/cache_coherency.h new file mode 100644 index 000000000000..cc81c5733e31 --- /dev/null +++ b/include/linux/cache_coherency.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cache coherency maintenance operation device drivers + * + * Copyright Huawei 2025 + */ +#ifndef _LINUX_CACHE_COHERENCY_H_ +#define _LINUX_CACHE_COHERENCY_H_ + +#include <linux/list.h> +#include <linux/kref.h> +#include <linux/types.h> + +struct cc_inval_params { + phys_addr_t addr; + size_t size; +}; + +struct cache_coherency_ops_inst; + +struct cache_coherency_ops { + int (*wbinv)(struct cache_coherency_ops_inst *cci, + struct cc_inval_params *invp); + int (*done)(struct cache_coherency_ops_inst *cci); +}; + +struct cache_coherency_ops_inst { + struct kref kref; + struct list_head node; + const struct cache_coherency_ops *ops; +}; + +int cache_coherency_ops_instance_register(struct cache_coherency_ops_inst *cci); +void cache_coherency_ops_instance_unregister(struct cache_coherency_ops_inst *cci); + +struct cache_coherency_ops_inst * +_cache_coherency_ops_instance_alloc(const struct cache_coherency_ops *ops, + size_t size); +/** + * cache_coherency_ops_instance_alloc - Allocate cache coherency ops instance + * @ops: Cache maintenance operations + * @drv_struct: structure that contains the struct cache_coherency_ops_inst + * @member: Name of the struct cache_coherency_ops_inst member in @drv_struct. + * + * This allocates a driver specific structure and initializes the + * cache_coherency_ops_inst embedded in the drv_struct. Upon success the + * pointer must be freed via cache_coherency_ops_instance_put(). + * + * Returns a &drv_struct * on success, %NULL on error. + */ +#define cache_coherency_ops_instance_alloc(ops, drv_struct, member) \ + ({ \ + static_assert(__same_type(struct cache_coherency_ops_inst, \ + ((drv_struct *)NULL)->member)); \ + static_assert(offsetof(drv_struct, member) == 0); \ + (drv_struct *)_cache_coherency_ops_instance_alloc(ops, \ + sizeof(drv_struct)); \ + }) +void cache_coherency_ops_instance_put(struct cache_coherency_ops_inst *cci); + +#endif diff --git a/include/linux/cdx/cdx_bus.h b/include/linux/cdx/cdx_bus.h index 79bb80e56790..b1ba97f6c9ad 100644 --- a/include/linux/cdx/cdx_bus.h +++ b/include/linux/cdx/cdx_bus.h @@ -234,7 +234,7 @@ int __must_check __cdx_driver_register(struct cdx_driver *cdx_driver, */ void cdx_driver_unregister(struct cdx_driver *cdx_driver); -extern struct bus_type cdx_bus_type; +extern const struct bus_type cdx_bus_type; /** * cdx_dev_reset - Reset CDX device diff --git a/include/linux/comedi/comedidev.h b/include/linux/comedi/comedidev.h index 4cb0400ad616..35fdc41845ce 100644 --- a/include/linux/comedi/comedidev.h +++ b/include/linux/comedi/comedidev.h @@ -15,6 +15,7 @@ #include <linux/spinlock_types.h> #include <linux/rwsem.h> #include <linux/kref.h> +#include <linux/completion.h> #include <linux/comedi.h> #define COMEDI_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) @@ -272,6 +273,8 @@ struct comedi_buf_map { * @events: Bit-vector of events that have occurred. * @cmd: Details of comedi command in progress. * @wait_head: Task wait queue for file reader or writer. + * @run_complete: "run complete" completion event. + * @run_active: "run active" reference counter. * @cb_mask: Bit-vector of events that should wake waiting tasks. * @inttrig: Software trigger function for command, or NULL. * @@ -357,6 +360,8 @@ struct comedi_async { unsigned int events; struct comedi_cmd cmd; wait_queue_head_t wait_head; + struct completion run_complete; + refcount_t run_active; unsigned int cb_mask; int (*inttrig)(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int x); @@ -584,6 +589,8 @@ struct comedi_device *comedi_dev_get_from_minor(unsigned int minor); int comedi_dev_put(struct comedi_device *dev); bool comedi_is_subdevice_running(struct comedi_subdevice *s); +bool comedi_get_is_subdevice_running(struct comedi_subdevice *s); +void comedi_put_is_subdevice_running(struct comedi_subdevice *s); void *comedi_alloc_spriv(struct comedi_subdevice *s, size_t size); void comedi_set_spriv_auto_free(struct comedi_subdevice *s); diff --git a/include/linux/comedi/comedilib.h b/include/linux/comedi/comedilib.h index 0223c9cd9215..1f2b22b383cc 100644 --- a/include/linux/comedi/comedilib.h +++ b/include/linux/comedi/comedilib.h @@ -10,8 +10,38 @@ #ifndef _LINUX_COMEDILIB_H #define _LINUX_COMEDILIB_H -struct comedi_device *comedi_open(const char *path); -int comedi_close(struct comedi_device *dev); +struct comedi_device *comedi_open_from(const char *path, int from); + +/** + * comedi_open() - Open a COMEDI device from the kernel + * @filename: Fake pathname of the form "/dev/comediN". + * + * Converts @filename to a COMEDI device number and "opens" it if it exists + * and is attached to a low-level COMEDI driver. + * + * Return: A pointer to the COMEDI device on success. + * Return %NULL on failure. + */ +static inline struct comedi_device *comedi_open(const char *path) +{ + return comedi_open_from(path, -1); +} + +int comedi_close_from(struct comedi_device *dev, int from); + +/** + * comedi_close() - Close a COMEDI device from the kernel + * @dev: COMEDI device. + * + * Closes a COMEDI device previously opened by comedi_open(). + * + * Returns: 0 + */ +static inline int comedi_close(struct comedi_device *dev) +{ + return comedi_close_from(dev, -1); +} + int comedi_dio_get_config(struct comedi_device *dev, unsigned int subdev, unsigned int chan, unsigned int *io); int comedi_dio_config(struct comedi_device *dev, unsigned int subdev, diff --git a/include/linux/compiler.h b/include/linux/compiler.h index ab181d87d71d..ff71bebe56f5 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -273,12 +273,6 @@ static inline void *offset_to_ptr(const int *off) #endif /* __ASSEMBLY__ */ -#ifdef CONFIG_64BIT -#define ARCH_SEL(a,b) a -#else -#define ARCH_SEL(a,b) b -#endif - /* * Force the compiler to emit 'sym' as a symbol, so that we can reference * it from inline assembler. Necessary in case 'sym' could be inlined diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 6de59ce8ef8c..2b48be97fcd0 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -251,15 +251,11 @@ struct coresight_trace_id_map { * by @coresight_ops. * @access: Device i/o access abstraction for this device. * @dev: The device entity associated to this component. - * @mode: This tracer's mode, i.e sysFS, Perf or disabled. This is - * actually an 'enum cs_mode', but is stored in an atomic type. - * This is always accessed through local_read() and local_set(), - * but wherever it's done from within the Coresight device's lock, - * a non-atomic read would also work. This is the main point of - * synchronisation between code happening inside the sysfs mode's - * coresight_mutex and outside when running in Perf mode. A compare - * and exchange swap is done to atomically claim one mode or the - * other. + * @mode: The device mode, i.e sysFS, Perf or disabled. This is actually + * an 'enum cs_mode' but stored in an atomic type. Access is always + * through atomic APIs, ensuring SMP-safe synchronisation between + * racing from sysFS and Perf mode. A compare-and-exchange + * operation is done to atomically claim one mode or the other. * @refcnt: keep track of what is in use. Only access this outside of the * device's spinlock when the coresight_mutex held and mode == * CS_MODE_SYSFS. Otherwise it must be accessed from inside the @@ -288,7 +284,7 @@ struct coresight_device { const struct coresight_ops *ops; struct csdev_access access; struct device dev; - local_t mode; + atomic_t mode; int refcnt; bool orphan; /* sink specific fields */ @@ -332,12 +328,14 @@ static struct coresight_dev_list (var) = { \ /** * struct coresight_path - data needed by enable/disable path - * @path_list: path from source to sink. - * @trace_id: trace_id of the whole path. + * @path_list: path from source to sink. + * @trace_id: trace_id of the whole path. + * @handle: handle of the aux_event. */ struct coresight_path { - struct list_head path_list; - u8 trace_id; + struct list_head path_list; + u8 trace_id; + struct perf_output_handle *handle; }; enum cs_mode { @@ -365,7 +363,7 @@ enum cs_mode { */ struct coresight_ops_sink { int (*enable)(struct coresight_device *csdev, enum cs_mode mode, - void *data); + struct coresight_path *path); int (*disable)(struct coresight_device *csdev); void *(*alloc_buffer)(struct coresight_device *csdev, struct perf_event *event, void **pages, @@ -422,8 +420,9 @@ struct coresight_ops_source { */ struct coresight_ops_helper { int (*enable)(struct coresight_device *csdev, enum cs_mode mode, - void *data); - int (*disable)(struct coresight_device *csdev, void *data); + struct coresight_path *path); + int (*disable)(struct coresight_device *csdev, + struct coresight_path *path); }; @@ -621,13 +620,14 @@ static inline bool coresight_is_percpu_sink(struct coresight_device *csdev) static inline bool coresight_take_mode(struct coresight_device *csdev, enum cs_mode new_mode) { - return local_cmpxchg(&csdev->mode, CS_MODE_DISABLED, new_mode) == - CS_MODE_DISABLED; + int curr = CS_MODE_DISABLED; + + return atomic_try_cmpxchg_acquire(&csdev->mode, &curr, new_mode); } static inline enum cs_mode coresight_get_mode(struct coresight_device *csdev) { - return local_read(&csdev->mode); + return atomic_read_acquire(&csdev->mode); } static inline void coresight_set_mode(struct coresight_device *csdev, @@ -643,7 +643,7 @@ static inline void coresight_set_mode(struct coresight_device *csdev, WARN(new_mode != CS_MODE_DISABLED && current_mode != CS_MODE_DISABLED && current_mode != new_mode, "Device already in use\n"); - local_set(&csdev->mode, new_mode); + atomic_set_release(&csdev->mode, new_mode); } struct coresight_device *coresight_register(struct coresight_desc *desc); diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index afedfd5bea07..80211900f373 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -7,14 +7,16 @@ * set of CPUs in a system, one bit position per CPU number. In general, * only nr_cpu_ids (<= NR_CPUS) bits are valid. */ -#include <linux/cleanup.h> -#include <linux/kernel.h> +#include <linux/atomic.h> #include <linux/bitmap.h> +#include <linux/cleanup.h> #include <linux/cpumask_types.h> -#include <linux/atomic.h> -#include <linux/bug.h> #include <linux/gfp_types.h> #include <linux/numa.h> +#include <linux/threads.h> +#include <linux/types.h> + +#include <asm/bug.h> /** * cpumask_pr_args - printf args to output a cpumask diff --git a/include/linux/crash_reserve.h b/include/linux/crash_reserve.h index 7b44b41d0a20..f0dc03d94ca2 100644 --- a/include/linux/crash_reserve.h +++ b/include/linux/crash_reserve.h @@ -32,6 +32,12 @@ int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, void __init reserve_crashkernel_cma(unsigned long long cma_size); #ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION +#ifndef arch_add_crash_res_to_iomem +static inline bool arch_add_crash_res_to_iomem(void) +{ + return true; +} +#endif #ifndef DEFAULT_CRASH_KERNEL_LOW_SIZE #define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20) #endif diff --git a/include/linux/device.h b/include/linux/device.h index b031ff71a5bd..0be95294b6e6 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -281,25 +281,6 @@ int __must_check device_create_bin_file(struct device *dev, void device_remove_bin_file(struct device *dev, const struct bin_attribute *attr); -/** - * devm_alloc_percpu - Resource-managed alloc_percpu - * @dev: Device to allocate per-cpu memory for - * @type: Type to allocate per-cpu memory for - * - * Managed alloc_percpu. Per-cpu memory allocated with this function is - * automatically freed on driver detach. - * - * RETURNS: - * Pointer to allocated memory on success, NULL on failure. - */ -#define devm_alloc_percpu(dev, type) \ - ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \ - __alignof__(type))) - -void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, - size_t align); -void devm_free_percpu(struct device *dev, void __percpu *pdata); - struct device_dma_parameters { /* * a low level driver may set these to teach IOMMU code about diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h index f5a56efd2bd6..99b1002b3e31 100644 --- a/include/linux/device/bus.h +++ b/include/linux/device/bus.h @@ -150,6 +150,9 @@ int bus_for_each_dev(const struct bus_type *bus, struct device *start, void *data, device_iter_t fn); struct device *bus_find_device(const struct bus_type *bus, struct device *start, const void *data, device_match_t match); +struct device *bus_find_device_reverse(const struct bus_type *bus, + struct device *start, const void *data, + device_match_t match); /** * bus_find_device_by_name - device iterator for locating a particular device * of a specific name. diff --git a/include/linux/device/devres.h b/include/linux/device/devres.h index 8c5f57e0d613..9c1e3d643d69 100644 --- a/include/linux/device/devres.h +++ b/include/linux/device/devres.h @@ -9,6 +9,7 @@ #include <linux/stdarg.h> #include <linux/types.h> #include <asm/bug.h> +#include <asm/percpu.h> struct device; struct device_node; @@ -96,6 +97,22 @@ devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap); char * __printf(3, 4) __malloc devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...); +/** + * devm_alloc_percpu - Resource-managed alloc_percpu + * @dev: Device to allocate per-cpu memory for + * @type: Type to allocate per-cpu memory for + * + * Managed alloc_percpu. Per-cpu memory allocated with this function is + * automatically freed on driver detach. + * + * RETURNS: + * Pointer to allocated memory on success, NULL on failure. + */ +#define devm_alloc_percpu(dev, type) \ + ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), __alignof__(type))) + +void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, size_t align); + unsigned long devm_get_free_pages(struct device *dev, gfp_t gfp_mask, unsigned int order); void devm_free_pages(struct device *dev, unsigned long addr); diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 10882d00cb17..4809204c674c 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -31,10 +31,10 @@ struct dma_map_ops { void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); - dma_addr_t (*map_page)(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, unsigned long attrs); - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, + dma_addr_t (*map_phys)(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, + unsigned long attrs); + void (*unmap_phys)(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir, unsigned long attrs); /* @@ -46,12 +46,6 @@ struct dma_map_ops { enum dma_data_direction dir, unsigned long attrs); void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs); - dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs); - void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir, - unsigned long attrs); void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir); void (*sync_single_for_device)(struct device *dev, diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index ff44ec346162..05743900a116 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -38,11 +38,12 @@ struct _ddebug { #define _DPRINTK_FLAGS_INCL_LINENO (1<<3) #define _DPRINTK_FLAGS_INCL_TID (1<<4) #define _DPRINTK_FLAGS_INCL_SOURCENAME (1<<5) +#define _DPRINTK_FLAGS_INCL_STACK (1<<6) #define _DPRINTK_FLAGS_INCL_ANY \ (_DPRINTK_FLAGS_INCL_MODNAME | _DPRINTK_FLAGS_INCL_FUNCNAME |\ _DPRINTK_FLAGS_INCL_LINENO | _DPRINTK_FLAGS_INCL_TID |\ - _DPRINTK_FLAGS_INCL_SOURCENAME) + _DPRINTK_FLAGS_INCL_SOURCENAME | _DPRINTK_FLAGS_INCL_STACK) #if defined DEBUG #define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT @@ -160,6 +161,12 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor, const struct ib_device *ibdev, const char *fmt, ...); +#define __dynamic_dump_stack(desc) \ +{ \ + if (desc.flags & _DPRINTK_FLAGS_INCL_STACK) \ + dump_stack(); \ +} + #define DEFINE_DYNAMIC_DEBUG_METADATA_CLS(name, cls, fmt) \ static struct _ddebug __aligned(8) \ __section("__dyndbg") name = { \ @@ -220,8 +227,10 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor, */ #define __dynamic_func_call_cls(id, cls, fmt, func, ...) do { \ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(id, cls, fmt); \ - if (DYNAMIC_DEBUG_BRANCH(id)) \ + if (DYNAMIC_DEBUG_BRANCH(id)) { \ func(&id, ##__VA_ARGS__); \ + __dynamic_dump_stack(id); \ + } \ } while (0) #define __dynamic_func_call(id, fmt, func, ...) \ __dynamic_func_call_cls(id, _DPRINTK_CLASS_DFLT, fmt, \ @@ -229,8 +238,10 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor, #define __dynamic_func_call_cls_no_desc(id, cls, fmt, func, ...) do { \ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(id, cls, fmt); \ - if (DYNAMIC_DEBUG_BRANCH(id)) \ + if (DYNAMIC_DEBUG_BRANCH(id)) { \ func(__VA_ARGS__); \ + __dynamic_dump_stack(id); \ + } \ } while (0) #define __dynamic_func_call_no_desc(id, fmt, func, ...) \ __dynamic_func_call_cls_no_desc(id, _DPRINTK_CLASS_DFLT, \ diff --git a/include/linux/eisa.h b/include/linux/eisa.h index 21a2ecc1e538..cf55630b595b 100644 --- a/include/linux/eisa.h +++ b/include/linux/eisa.h @@ -68,7 +68,7 @@ struct eisa_driver { /* These external functions are only available when EISA support is enabled. */ #ifdef CONFIG_EISA -extern struct bus_type eisa_bus_type; +extern const struct bus_type eisa_bus_type; int eisa_driver_register (struct eisa_driver *edrv); void eisa_driver_unregister (struct eisa_driver *edrv); diff --git a/include/linux/err.h b/include/linux/err.h index 1d60aa86db53..8c37be0620ab 100644 --- a/include/linux/err.h +++ b/include/linux/err.h @@ -41,6 +41,14 @@ static inline void * __must_check ERR_PTR(long error) return (void *) error; } +/** + * INIT_ERR_PTR - Init a const error pointer. + * @error: A negative error code. + * + * Like ERR_PTR(), but usable to initialize static variables. + */ +#define INIT_ERR_PTR(error) ((void *)(error)) + /* Return the pointer in the percpu address space. */ #define ERR_PTR_PCPU(error) ((void __percpu *)(unsigned long)ERR_PTR(error)) diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h index ee80ca4bb0d0..935dba3633b5 100644 --- a/include/linux/firmware/intel/stratix10-smc.h +++ b/include/linux/firmware/intel/stratix10-smc.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2017-2018, Intel Corporation + * Copyright (C) 2025, Altera Corporation */ #ifndef __STRATIX10_SMC_H @@ -47,6 +48,10 @@ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \ ARM_SMCCC_OWNER_SIP, (func_num)) +#define INTEL_SIP_SMC_ASYNC_VAL(func_name) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_SIP, (func_name)) + /** * Return values in INTEL_SIP_SMC_* call * @@ -620,4 +625,110 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_FCS_GET_PROVISION_DATA \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_GET_PROVISION_DATA) +/** + * Request INTEL_SIP_SMC_HWMON_READTEMP + * Sync call to request temperature + * + * Call register usage: + * a0 Temperature Channel + * a1-a7 not used + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + * a1 Temperature Value + * a2-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_HWMON_READTEMP 32 +#define INTEL_SIP_SMC_HWMON_READTEMP \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_HWMON_READTEMP) + +/** + * Request INTEL_SIP_SMC_HWMON_READVOLT + * Sync call to request voltage + * + * Call register usage: + * a0 Voltage Channel + * a1-a7 not used + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + * a1 Voltage Value + * a2-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_HWMON_READVOLT 33 +#define INTEL_SIP_SMC_HWMON_READVOLT \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_HWMON_READVOLT) + +/** + * Request INTEL_SIP_SMC_ASYNC_POLL + * Async call used by service driver at EL1 to query mailbox response from SDM. + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_POLL + * a1 transaction job id + * a2-17 will be used to return the response data + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + * a1-17 will contain the response values from mailbox for the previous send + * transaction + * Or + * a0 INTEL_SIP_SMC_STATUS_NO_RESPONSE + * a1-17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_POLL (0xC8) +#define INTEL_SIP_SMC_ASYNC_POLL \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_POLL) + +/** + * Request INTEL_SIP_SMC_ASYNC_RSU_GET_SPT + * Async call to get RSU SPT from SDM. + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_RSU_GET_SPT + * a1 transaction job id + * a2-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_SPT (0xEA) +#define INTEL_SIP_SMC_ASYNC_RSU_GET_SPT \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_SPT) + +/** + * Request INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS + * Async call to get RSU error status from SDM. + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS + * a1 transaction job id + * a2-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_ERROR_STATUS (0xEB) +#define INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_ERROR_STATUS) + +/** + * Request INTEL_SIP_SMC_ASYNC_RSU_NOTIFY + * Async call to send NOTIFY value to SDM. + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_RSU_NOTIFY + * a1 transaction job id + * a2 notify value + * a3-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_NOTIFY (0xEC) +#define INTEL_SIP_SMC_ASYNC_RSU_NOTIFY \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_NOTIFY) #endif diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h index 60ed82112680..d290060f4c73 100644 --- a/include/linux/firmware/intel/stratix10-svc-client.h +++ b/include/linux/firmware/intel/stratix10-svc-client.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2017-2018, Intel Corporation + * Copyright (C) 2025, Altera Corporation */ #ifndef __STRATIX10_SVC_CLIENT_H @@ -11,10 +12,12 @@ * * fpga: for FPGA configuration * rsu: for remote status update + * hwmon: for hardware monitoring (voltage and temperature) */ #define SVC_CLIENT_FPGA "fpga" #define SVC_CLIENT_RSU "rsu" #define SVC_CLIENT_FCS "fcs" +#define SVC_CLIENT_HWMON "hwmon" /* * Status of the sent command, in bit number @@ -70,6 +73,7 @@ #define SVC_RSU_REQUEST_TIMEOUT_MS 300 #define SVC_FCS_REQUEST_TIMEOUT_MS 2000 #define SVC_COMPLETED_TIMEOUT_MS 30000 +#define SVC_HWMON_REQUEST_TIMEOUT_MS 300 struct stratix10_svc_chan; @@ -124,6 +128,9 @@ struct stratix10_svc_chan; * @COMMAND_RSU_DCMF_STATUS: query firmware for the DCMF status * return status is SVC_STATUS_OK or SVC_STATUS_ERROR * + * @COMMAND_RSU_GET_SPT_TABLE: query firmware for SPT table + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * * @COMMAND_FCS_REQUEST_SERVICE: request validation of image from firmware, * return status is SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM * @@ -141,6 +148,12 @@ struct stratix10_svc_chan; * * @COMMAND_FCS_RANDOM_NUMBER_GEN: generate a random number, return status * is SVC_STATUS_OK, SVC_STATUS_ERROR + * + * @COMMAND_HWMON_READTEMP: query the temperature from the hardware monitor, + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_HWMON_READVOLT: query the voltage from the hardware monitor, + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR */ enum stratix10_svc_command_code { /* for FPGA */ @@ -158,6 +171,7 @@ enum stratix10_svc_command_code { COMMAND_RSU_DCMF_VERSION, COMMAND_RSU_DCMF_STATUS, COMMAND_FIRMWARE_VERSION, + COMMAND_RSU_GET_SPT_TABLE, /* for FCS */ COMMAND_FCS_REQUEST_SERVICE = 20, COMMAND_FCS_SEND_CERTIFICATE, @@ -171,6 +185,9 @@ enum stratix10_svc_command_code { COMMAND_MBOX_SEND_CMD = 100, /* Non-mailbox SMC Call */ COMMAND_SMC_SVC_VERSION = 200, + /* for HWMON */ + COMMAND_HWMON_READTEMP, + COMMAND_HWMON_READVOLT }; /** @@ -284,5 +301,92 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg); * request process. */ void stratix10_svc_done(struct stratix10_svc_chan *chan); + +/** + * typedef async_callback_t - A type definition for an asynchronous callback function. + * + * This type defines a function pointer for an asynchronous callback. + * The callback function takes a single argument, which is a pointer to + * user-defined data. + * + * @cb_arg: Argument to be passed to the callback function. + */ +typedef void (*async_callback_t)(void *cb_arg); + +/** + * stratix10_svc_add_async_client - Add an asynchronous client to a Stratix 10 + * service channel. + * @chan: Pointer to the Stratix 10 service channel structure. + * @use_unique_clientid: Boolean flag indicating whether to use a unique client ID. + * + * This function registers an asynchronous client with the specified Stratix 10 + * service channel. If the use_unique_clientid flag is set to true, a unique client + * ID will be assigned to the client. + * + * Return: 0 on success, or a negative error code on failure: + * -EINVAL if the channel is NULL or the async controller is not initialized. + * -EALREADY if the async channel is already allocated. + * -ENOMEM if memory allocation fails. + * Other negative values if ID allocation fails + */ +int stratix10_svc_add_async_client(struct stratix10_svc_chan *chan, bool use_unique_clientid); + +/** + * stratix10_svc_remove_async_client - Remove an asynchronous client from the Stratix 10 + * service channel. + * @chan: Pointer to the Stratix 10 service channel structure. + * + * This function removes an asynchronous client from the specified Stratix 10 service channel. + * It is typically used to clean up and release resources associated with the client. + * + * Return: 0 on success, -EINVAL if the channel or asynchronous channel is invalid. + */ +int stratix10_svc_remove_async_client(struct stratix10_svc_chan *chan); + +/** + * stratix10_svc_async_send - Send an asynchronous message to the SDM mailbox + * in EL3 secure firmware. + * @chan: Pointer to the service channel structure. + * @msg: Pointer to the message to be sent. + * @handler: Pointer to the handler object used by caller to track the transaction. + * @cb: Callback function to be called upon completion. + * @cb_arg: Argument to be passed to the callback function. + * + * This function sends a message asynchronously to the SDM mailbox in EL3 secure firmware. + * and registers a callback function to be invoked when the operation completes. + * + * Return: 0 on success,and negative error codes on failure. + */ +int stratix10_svc_async_send(struct stratix10_svc_chan *chan, void *msg, void **handler, + async_callback_t cb, void *cb_arg); + +/** + * stratix10_svc_async_poll - Polls the status of an asynchronous service request. + * @chan: Pointer to the service channel structure. + * @tx_handle: Handle to the transaction being polled. + * @data: Pointer to the callback data structure to be filled with the result. + * + * This function checks the status of an asynchronous service request + * and fills the provided callback data structure with the result. + * + * Return: 0 on success, -EINVAL if any input parameter is invalid or if the + * async controller is not initialized, -EAGAIN if the transaction is + * still in progress, or other negative error codes on failure. + */ +int stratix10_svc_async_poll(struct stratix10_svc_chan *chan, void *tx_handle, + struct stratix10_svc_cb_data *data); + +/** + * stratix10_svc_async_done - Complete an asynchronous transaction + * @chan: Pointer to the service channel structure + * @tx_handle: Pointer to the transaction handle + * + * This function completes an asynchronous transaction by removing the + * transaction from the hash table and deallocating the associated resources. + * + * Return: 0 on success, -EINVAL on invalid input or errors. + */ +int stratix10_svc_async_done(struct stratix10_svc_chan *chan, void *tx_handle); + #endif diff --git a/include/linux/firmware/qcom/qcom_tzmem.h b/include/linux/firmware/qcom/qcom_tzmem.h index 48ac0e5454c7..23173e0c3ddd 100644 --- a/include/linux/firmware/qcom/qcom_tzmem.h +++ b/include/linux/firmware/qcom/qcom_tzmem.h @@ -17,11 +17,20 @@ struct qcom_tzmem_pool; * enum qcom_tzmem_policy - Policy for pool growth. */ enum qcom_tzmem_policy { - /**< Static pool, never grow above initial size. */ + /** + * @QCOM_TZMEM_POLICY_STATIC: Static pool, + * never grow above initial size. + */ QCOM_TZMEM_POLICY_STATIC = 1, - /**< When out of memory, add increment * current size of memory. */ + /** + * @QCOM_TZMEM_POLICY_MULTIPLIER: When out of memory, + * add increment * current size of memory. + */ QCOM_TZMEM_POLICY_MULTIPLIER, - /**< When out of memory add as much as is needed until max_size. */ + /** + * @QCOM_TZMEM_POLICY_ON_DEMAND: When out of memory + * add as much as is needed until max_size. + */ QCOM_TZMEM_POLICY_ON_DEMAND, }; diff --git a/include/linux/firmware/xlnx-zynqmp-ufs.h b/include/linux/firmware/xlnx-zynqmp-ufs.h new file mode 100644 index 000000000000..d3538dd5822a --- /dev/null +++ b/include/linux/firmware/xlnx-zynqmp-ufs.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Firmware layer for UFS APIs. + * + * Copyright (c) 2025 Advanced Micro Devices, Inc. + */ + +#ifndef __FIRMWARE_XLNX_ZYNQMP_UFS_H__ +#define __FIRMWARE_XLNX_ZYNQMP_UFS_H__ + +#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE) +int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready); +int zynqmp_pm_is_sram_init_done(bool *is_done); +int zynqmp_pm_set_sram_bypass(void); +int zynqmp_pm_get_ufs_calibration_values(u32 *val); +#else +static inline int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_is_sram_init_done(bool *is_done) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_set_sram_bypass(void) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_get_ufs_calibration_values(u32 *val) +{ + return -ENODEV; +} +#endif + +#endif /* __FIRMWARE_XLNX_ZYNQMP_UFS_H__ */ diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index ae48d619c4e0..15fdbd089bbf 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -3,7 +3,7 @@ * Xilinx Zynq MPSoC Firmware layer * * Copyright (C) 2014-2021 Xilinx - * Copyright (C) 2022 - 2024, Advanced Micro Devices, Inc. + * Copyright (C) 2022 - 2025 Advanced Micro Devices, Inc. * * Michal Simek <michal.simek@amd.com> * Davorin Mista <davorin.mista@aggios.com> @@ -16,6 +16,7 @@ #include <linux/types.h> #include <linux/err.h> +#include <linux/firmware/xlnx-zynqmp-ufs.h> #define ZYNQMP_PM_VERSION_MAJOR 1 #define ZYNQMP_PM_VERSION_MINOR 0 @@ -51,16 +52,10 @@ #define PM_PINCTRL_PARAM_SET_VERSION 2 -#define ZYNQMP_FAMILY_CODE 0x23 -#define VERSAL_FAMILY_CODE 0x26 - -/* When all subfamily of platform need to support */ -#define ALL_SUB_FAMILY_CODE 0x00 -#define VERSAL_SUB_FAMILY_CODE 0x01 -#define VERSALNET_SUB_FAMILY_CODE 0x03 - -#define FAMILY_CODE_MASK GENMASK(27, 21) -#define SUB_FAMILY_CODE_MASK GENMASK(20, 19) +/* Family codes */ +#define PM_ZYNQMP_FAMILY_CODE 0x1 /* ZynqMP family code */ +#define PM_VERSAL_FAMILY_CODE 0x2 /* Versal family code */ +#define PM_VERSAL_NET_FAMILY_CODE 0x3 /* Versal NET family code */ #define API_ID_MASK GENMASK(7, 0) #define MODULE_ID_MASK GENMASK(11, 8) @@ -164,6 +159,7 @@ enum pm_api_cb_id { enum pm_api_id { PM_API_FEATURES = 0, PM_GET_API_VERSION = 1, + PM_GET_NODE_STATUS = 3, PM_REGISTER_NOTIFIER = 5, PM_FORCE_POWERDOWN = 8, PM_REQUEST_WAKEUP = 10, @@ -241,6 +237,7 @@ enum pm_ioctl_id { IOCTL_GET_FEATURE_CONFIG = 27, /* IOCTL for Secure Read/Write Interface */ IOCTL_READ_REG = 28, + IOCTL_MASK_WRITE_REG = 29, /* Dynamic SD/GEM configuration */ IOCTL_SET_SD_CONFIG = 30, IOCTL_SET_GEM_CONFIG = 31, @@ -564,7 +561,7 @@ int zynqmp_pm_invoke_fw_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...); #if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE) int zynqmp_pm_get_api_version(u32 *version); int zynqmp_pm_get_chipid(u32 *idcode, u32 *version); -int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily); +int zynqmp_pm_get_family_info(u32 *family); int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out); int zynqmp_pm_clock_enable(u32 clock_id); int zynqmp_pm_clock_disable(u32 clock_id); @@ -619,6 +616,9 @@ int zynqmp_pm_feature(const u32 api_id); int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id); int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value); int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload); +int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value); +int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset, + u32 mask, u32 value); int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset); int zynqmp_pm_force_pwrdwn(const u32 target, const enum zynqmp_pm_request_ack ack); @@ -629,6 +629,8 @@ int zynqmp_pm_request_wake(const u32 node, int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode); int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode); int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode); +int zynqmp_pm_get_node_status(const u32 node, u32 *const status, + u32 *const requirements, u32 *const usage); int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value); int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config, u32 value); @@ -643,7 +645,7 @@ static inline int zynqmp_pm_get_chipid(u32 *idcode, u32 *version) return -ENODEV; } -static inline int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily) +static inline int zynqmp_pm_get_family_info(u32 *family) { return -ENODEV; } @@ -916,6 +918,17 @@ static inline int zynqmp_pm_request_wake(const u32 node, return -ENODEV; } +static inline int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset, + u32 mask, u32 value) +{ + return -ENODEV; +} + static inline int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode) { return -ENODEV; @@ -931,6 +944,13 @@ static inline int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mo return -ENODEV; } +static inline int zynqmp_pm_get_node_status(const u32 node, u32 *const status, + u32 *const requirements, + u32 *const usage) +{ + return -ENODEV; +} + static inline int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value) diff --git a/include/linux/font.h b/include/linux/font.h index 81caffd51bb4..fd8625cd76b2 100644 --- a/include/linux/font.h +++ b/include/linux/font.h @@ -35,6 +35,7 @@ struct font_desc { #define FONT6x10_IDX 10 #define TER16x32_IDX 11 #define FONT6x8_IDX 12 +#define TER10x18_IDX 13 extern const struct font_desc font_vga_8x8, font_vga_8x16, @@ -48,7 +49,8 @@ extern const struct font_desc font_vga_8x8, font_mini_4x6, font_6x10, font_ter_16x32, - font_6x8; + font_6x8, + font_ter_10x18; /* Find a font with a specific name */ diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h index 5a0e897cae80..5e8a3b546033 100644 --- a/include/linux/fs_parser.h +++ b/include/linux/fs_parser.h @@ -120,6 +120,8 @@ static inline bool fs_validate_description(const char *name, #define fsparam_u32(NAME, OPT) __fsparam(fs_param_is_u32, NAME, OPT, 0, NULL) #define fsparam_u32oct(NAME, OPT) \ __fsparam(fs_param_is_u32, NAME, OPT, 0, (void *)8) +#define fsparam_u32hex(NAME, OPT) \ + __fsparam(fs_param_is_u32, NAME, OPT, 0, (void *)16) #define fsparam_s32(NAME, OPT) __fsparam(fs_param_is_s32, NAME, OPT, 0, NULL) #define fsparam_u64(NAME, OPT) __fsparam(fs_param_is_u64, NAME, OPT, 0, NULL) #define fsparam_enum(NAME, OPT, array) __fsparam(fs_param_is_enum, NAME, OPT, 0, array) diff --git a/include/linux/iio/adc/qcom-vadc-common.h b/include/linux/iio/adc/qcom-vadc-common.h index aa21b032e861..3bf4c49726a7 100644 --- a/include/linux/iio/adc/qcom-vadc-common.h +++ b/include/linux/iio/adc/qcom-vadc-common.h @@ -83,27 +83,27 @@ struct vadc_linear_graph { /** * enum vadc_scale_fn_type - Scaling function to convert ADC code to * physical scaled units for the channel. - * SCALE_DEFAULT: Default scaling to convert raw adc code to voltage (uV). - * SCALE_THERM_100K_PULLUP: Returns temperature in millidegC. + * @SCALE_DEFAULT: Default scaling to convert raw adc code to voltage (uV). + * @SCALE_THERM_100K_PULLUP: Returns temperature in millidegC. * Uses a mapping table with 100K pullup. - * SCALE_PMIC_THERM: Returns result in milli degree's Centigrade. - * SCALE_XOTHERM: Returns XO thermistor voltage in millidegC. - * SCALE_PMI_CHG_TEMP: Conversion for PMI CHG temp - * SCALE_HW_CALIB_DEFAULT: Default scaling to convert raw adc code to + * @SCALE_PMIC_THERM: Returns result in milli degree's Centigrade. + * @SCALE_XOTHERM: Returns XO thermistor voltage in millidegC. + * @SCALE_PMI_CHG_TEMP: Conversion for PMI CHG temp + * @SCALE_HW_CALIB_DEFAULT: Default scaling to convert raw adc code to * voltage (uV) with hardware applied offset/slope values to adc code. - * SCALE_HW_CALIB_THERM_100K_PULLUP: Returns temperature in millidegC using + * @SCALE_HW_CALIB_THERM_100K_PULLUP: Returns temperature in millidegC using * lookup table. The hardware applies offset/slope to adc code. - * SCALE_HW_CALIB_XOTHERM: Returns XO thermistor voltage in millidegC using + * @SCALE_HW_CALIB_XOTHERM: Returns XO thermistor voltage in millidegC using * 100k pullup. The hardware applies offset/slope to adc code. - * SCALE_HW_CALIB_THERM_100K_PU_PM7: Returns temperature in millidegC using + * @SCALE_HW_CALIB_THERM_100K_PU_PM7: Returns temperature in millidegC using * lookup table for PMIC7. The hardware applies offset/slope to adc code. - * SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade. + * @SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade. * The hardware applies offset/slope to adc code. - * SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade. + * @SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade. * The hardware applies offset/slope to adc code. This is for PMIC7. - * SCALE_HW_CALIB_PM5_CHG_TEMP: Returns result in millidegrees for PMIC5 + * @SCALE_HW_CALIB_PM5_CHG_TEMP: Returns result in millidegrees for PMIC5 * charger temperature. - * SCALE_HW_CALIB_PM5_SMB_TEMP: Returns result in millidegrees for PMIC5 + * @SCALE_HW_CALIB_PM5_SMB_TEMP: Returns result in millidegrees for PMIC5 * SMB1390 temperature. */ enum vadc_scale_fn_type { @@ -120,6 +120,7 @@ enum vadc_scale_fn_type { SCALE_HW_CALIB_PMIC_THERM_PM7, SCALE_HW_CALIB_PM5_CHG_TEMP, SCALE_HW_CALIB_PM5_SMB_TEMP, + /* private: */ SCALE_HW_CALIB_INVALID, }; diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h index 5c84ec4a9810..d37f82678f71 100644 --- a/include/linux/iio/buffer.h +++ b/include/linux/iio/buffer.h @@ -26,11 +26,7 @@ int iio_pop_from_buffer(struct iio_buffer *buffer, void *data); * @data: sample data * @timestamp: timestamp for the sample data * - * Pushes data to the IIO device's buffers. If timestamps are enabled for the - * device the function will store the supplied timestamp as the last element in - * the sample data buffer before pushing it to the device buffers. The sample - * data buffer needs to be large enough to hold the additional timestamp - * (usually the buffer should be indio->scan_bytes bytes large). + * DEPRECATED: Use iio_push_to_buffers_with_ts() instead. * * Returns 0 on success, a negative error code otherwise. */ @@ -45,6 +41,22 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev, return iio_push_to_buffers(indio_dev, data); } +/** + * iio_push_to_buffers_with_ts() - push data and timestamp to buffers + * @indio_dev: iio_dev structure for device. + * @data: Pointer to sample data buffer. + * @data_total_len: The size of @data in bytes. + * @timestamp: Timestamp for the sample data. + * + * Pushes data to the IIO device's buffers. If timestamps are enabled for the + * device the function will store the supplied timestamp as the last element in + * the sample data buffer before pushing it to the device buffers. The sample + * data buffer needs to be large enough to hold the additional timestamp + * (usually the buffer should be at least indio->scan_bytes bytes large). + * + * Context: Any context. + * Return: 0 on success, a negative error code otherwise. + */ static inline int iio_push_to_buffers_with_ts(struct iio_dev *indio_dev, void *data, size_t data_total_len, s64 timestamp) diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h index 8d770ced66b2..c0b0e0992a85 100644 --- a/include/linux/iio/buffer_impl.h +++ b/include/linux/iio/buffer_impl.h @@ -24,7 +24,8 @@ struct sg_table; /** * struct iio_buffer_access_funcs - access functions for buffers. - * @store_to: actually store stuff to the buffer + * @store_to: actually store stuff to the buffer - must be safe to + * call from any context (e.g. must not sleep). * @read: try to get a specified number of bytes (must exist) * @data_available: indicates how much data is available for reading from * the buffer. diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h index a38b277c2c02..5039558267e4 100644 --- a/include/linux/iio/consumer.h +++ b/include/linux/iio/consumer.h @@ -131,7 +131,8 @@ struct iio_cb_buffer; /** * iio_channel_get_all_cb() - register callback for triggered capture * @dev: Pointer to client device. - * @cb: Callback function. + * @cb: Callback function. Must be safe to call from any context + * (e.g. must not sleep). * @private: Private data passed to callback. * * NB right now we have no ability to mux data from multiple devices. diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index aa160511e265..bfb6df68e6c9 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -57,6 +57,7 @@ struct adis_timeout { * @enable_irq: Hook for ADIS devices that have a special IRQ enable/disable * @unmasked_drdy: True for devices that cannot mask/unmask the data ready pin * @has_paging: True if ADIS device has paged registers + * @has_fifo: True if ADIS device has a hardware FIFO * @burst_reg_cmd: Register command that triggers burst * @burst_len: Burst size in the SPI RX buffer. If @burst_max_len is defined, * this should be the minimum size supported by the device. @@ -136,7 +137,7 @@ struct adis { const struct adis_data *data; unsigned int burst_extra_len; const struct adis_ops *ops; - /** + /* * The state_lock is meant to be used during operations that require * a sequence of SPI R/W in order to protect the SPI transfer * information (fields 'xfer', 'msg' & 'current_page') between @@ -166,7 +167,7 @@ int __adis_reset(struct adis *adis); * adis_reset() - Reset the device * @adis: The adis device * - * Returns 0 on success, a negative error code otherwise + * Returns: %0 on success, a negative error code otherwise */ static inline int adis_reset(struct adis *adis) { @@ -183,7 +184,9 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, * __adis_write_reg_8() - Write single byte to a register (unlocked) * @adis: The adis device * @reg: The address of the register to be written - * @value: The value to write + * @val: The value to write + * + * Returns: %0 on success, a negative error code otherwise */ static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg, u8 val) @@ -195,7 +198,9 @@ static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg, * __adis_write_reg_16() - Write 2 bytes to a pair of registers (unlocked) * @adis: The adis device * @reg: The address of the lower of the two registers - * @value: Value to be written + * @val: Value to be written + * + * Returns: %0 on success, a negative error code otherwise */ static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg, u16 val) @@ -207,7 +212,9 @@ static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg, * __adis_write_reg_32() - write 4 bytes to four registers (unlocked) * @adis: The adis device * @reg: The address of the lower of the four register - * @value: Value to be written + * @val: Value to be written + * + * Returns: %0 on success, a negative error code otherwise */ static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg, u32 val) @@ -220,6 +227,8 @@ static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg, * @adis: The adis device * @reg: The address of the lower of the two registers * @val: The value read back from the device + * + * Returns: %0 on success, a negative error code otherwise */ static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg, u16 *val) @@ -239,6 +248,8 @@ static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg, * @adis: The adis device * @reg: The address of the lower of the two registers * @val: The value read back from the device + * + * Returns: %0 on success, a negative error code otherwise */ static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg, u32 *val) @@ -257,8 +268,10 @@ static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg, * adis_write_reg() - write N bytes to register * @adis: The adis device * @reg: The address of the lower of the two registers - * @value: The value to write to device (up to 4 bytes) + * @val: The value to write to device (up to 4 bytes) * @size: The size of the @value (in bytes) + * + * Returns: %0 on success, a negative error code otherwise */ static inline int adis_write_reg(struct adis *adis, unsigned int reg, unsigned int val, unsigned int size) @@ -273,6 +286,8 @@ static inline int adis_write_reg(struct adis *adis, unsigned int reg, * @reg: The address of the lower of the two registers * @val: The value read back from the device * @size: The size of the @val buffer + * + * Returns: %0 on success, a negative error code otherwise */ static int adis_read_reg(struct adis *adis, unsigned int reg, unsigned int *val, unsigned int size) @@ -285,7 +300,9 @@ static int adis_read_reg(struct adis *adis, unsigned int reg, * adis_write_reg_8() - Write single byte to a register * @adis: The adis device * @reg: The address of the register to be written - * @value: The value to write + * @val: The value to write + * + * Returns: %0 on success, a negative error code otherwise */ static inline int adis_write_reg_8(struct adis *adis, unsigned int reg, u8 val) @@ -297,7 +314,9 @@ static inline int adis_write_reg_8(struct adis *adis, unsigned int reg, * adis_write_reg_16() - Write 2 bytes to a pair of registers * @adis: The adis device * @reg: The address of the lower of the two registers - * @value: Value to be written + * @val: Value to be written + * + * Returns: %0 on success, a negative error code otherwise */ static inline int adis_write_reg_16(struct adis *adis, unsigned int reg, u16 val) @@ -309,7 +328,9 @@ static inline int adis_write_reg_16(struct adis *adis, unsigned int reg, * adis_write_reg_32() - write 4 bytes to four registers * @adis: The adis device * @reg: The address of the lower of the four register - * @value: Value to be written + * @val: Value to be written + * + * Returns: %0 on success, a negative error code otherwise */ static inline int adis_write_reg_32(struct adis *adis, unsigned int reg, u32 val) @@ -322,6 +343,8 @@ static inline int adis_write_reg_32(struct adis *adis, unsigned int reg, * @adis: The adis device * @reg: The address of the lower of the two registers * @val: The value read back from the device + * + * Returns: %0 on success, a negative error code otherwise */ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg, u16 *val) @@ -341,6 +364,8 @@ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg, * @adis: The adis device * @reg: The address of the lower of the two registers * @val: The value read back from the device + * + * Returns: %0 on success, a negative error code otherwise */ static inline int adis_read_reg_32(struct adis *adis, unsigned int reg, u32 *val) @@ -366,6 +391,8 @@ int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask, * @size: Size of the register to update * * Updates the desired bits of @reg in accordance with @mask and @val. + * + * Returns: %0 on success, a negative error code otherwise */ static inline int adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask, const u32 val, u8 size) diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h index e4b8808823ad..4b12821528a6 100644 --- a/include/linux/interconnect.h +++ b/include/linux/interconnect.h @@ -16,7 +16,7 @@ #define MBps_to_icc(x) ((x) * 1000) #define GBps_to_icc(x) ((x) * 1000 * 1000) #define bps_to_icc(x) (1) -#define kbps_to_icc(x) ((x) / 8 + ((x) % 8 ? 1 : 0)) +#define kbps_to_icc(x) (((x) + 7) / 8) #define Mbps_to_icc(x) ((x) * 1000 / 8) #define Gbps_to_icc(x) ((x) * 1000 * 1000 / 8) diff --git a/include/linux/ioport.h b/include/linux/ioport.h index e8b2d6aa4013..9afa30f9346f 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -334,6 +334,15 @@ static inline bool resource_union(const struct resource *r1, const struct resour return true; } +/* + * Check if this resource is added to a resource tree or detached. Caller is + * responsible for not racing assignment. + */ +static inline bool resource_assigned(struct resource *res) +{ + return res->parent; +} + int find_resource_space(struct resource *root, struct resource *new, resource_size_t size, struct resource_constraint *constraint); diff --git a/include/linux/ipack.h b/include/linux/ipack.h index 2c6936b8371f..455f6c2a1903 100644 --- a/include/linux/ipack.h +++ b/include/linux/ipack.h @@ -70,15 +70,13 @@ enum ipack_space { IPACK_SPACE_COUNT, }; -/** - */ struct ipack_region { phys_addr_t start; size_t size; }; /** - * struct ipack_device + * struct ipack_device - subsystem representation of an IPack device * * @slot: Slot where the device is plugged in the carrier board * @bus: ipack_bus_device where the device is plugged to. @@ -89,7 +87,7 @@ struct ipack_region { * * Warning: Direct access to mapped memory is possible but the endianness * is not the same with PCI carrier or VME carrier. The endianness is managed - * by the carrier board throught bus->ops. + * by the carrier board through bus->ops. */ struct ipack_device { unsigned int slot; @@ -124,6 +122,7 @@ struct ipack_driver_ops { * struct ipack_driver -- Specific data to each ipack device driver * * @driver: Device driver kernel representation + * @id_table: Device ID table for this driver * @ops: Callbacks provided by the IPack device driver */ struct ipack_driver { @@ -161,7 +160,7 @@ struct ipack_bus_ops { }; /** - * struct ipack_bus_device + * struct ipack_bus_device - IPack bus representation * * @dev: pointer to carrier device * @slots: number of slots available @@ -185,6 +184,8 @@ struct ipack_bus_device { * * The carrier board device should call this function to register itself as * available bus device in ipack. + * + * Return: %NULL on error or &struct ipack_bus_device on success */ struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots, const struct ipack_bus_ops *ops, @@ -192,6 +193,8 @@ struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots, /** * ipack_bus_unregister -- unregister an ipack bus + * + * Return: %0 */ int ipack_bus_unregister(struct ipack_bus_device *bus); @@ -200,6 +203,8 @@ int ipack_bus_unregister(struct ipack_bus_device *bus); * * Called by a ipack driver to register itself as a driver * that can manage ipack devices. + * + * Return: zero on success or error code on failure. */ int ipack_driver_register(struct ipack_driver *edrv, struct module *owner, const char *name); @@ -215,7 +220,7 @@ void ipack_driver_unregister(struct ipack_driver *edrv); * function. The rest of the fields will be allocated and populated * during initalization. * - * Return zero on success or error code on failure. + * Return: zero on success or error code on failure. * * NOTE: _Never_ directly free @dev after calling this function, even * if it returned an error! Always use ipack_put_device() to give up the @@ -230,7 +235,7 @@ int ipack_device_init(struct ipack_device *dev); * Add a new IPack device. The call is done by the carrier driver * after calling ipack_device_init(). * - * Return zero on success or error code on failure. + * Return: zero on success or error code on failure. * * NOTE: _Never_ directly free @dev after calling this function, even * if it returned an error! Always use ipack_put_device() to give up the @@ -266,9 +271,11 @@ void ipack_put_device(struct ipack_device *dev); .device = (dev) /** - * ipack_get_carrier - it increase the carrier ref. counter of + * ipack_get_carrier - try to increase the carrier ref. counter of * the carrier module * @dev: mezzanine device which wants to get the carrier + * + * Return: true on success. */ static inline int ipack_get_carrier(struct ipack_device *dev) { diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 2223f95079ce..d45fa19f9e47 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -86,7 +86,13 @@ #define GICH_HCR_EN (1 << 0) #define GICH_HCR_UIE (1 << 1) +#define GICH_HCR_LRENPIE (1 << 2) #define GICH_HCR_NPIE (1 << 3) +#define GICH_HCR_VGrp0EIE (1 << 4) +#define GICH_HCR_VGrp0DIE (1 << 5) +#define GICH_HCR_VGrp1EIE (1 << 6) +#define GICH_HCR_VGrp1DIE (1 << 7) +#define GICH_HCR_EOICOUNT GENMASK(31, 27) #define GICH_LR_VIRTUALID (0x3ff << 0) #define GICH_LR_PHYSID_CPUID_SHIFT (10) diff --git a/include/linux/irqchip/arm-vgic-info.h b/include/linux/irqchip/arm-vgic-info.h index a470a73a805a..67d9d960273b 100644 --- a/include/linux/irqchip/arm-vgic-info.h +++ b/include/linux/irqchip/arm-vgic-info.h @@ -24,6 +24,8 @@ struct gic_kvm_info { enum gic_type type; /* Virtual CPU interface */ struct resource vcpu; + /* GICv2 GICC VA */ + void __iomem *gicc_base; /* Interrupt number */ unsigned int maint_irq; /* No interrupt mask, no need to use the above field */ diff --git a/include/linux/kexec_handover.h b/include/linux/kexec_handover.h index 25042c1d8d54..5f7b9de97e8d 100644 --- a/include/linux/kexec_handover.h +++ b/include/linux/kexec_handover.h @@ -2,22 +2,16 @@ #ifndef LINUX_KEXEC_HANDOVER_H #define LINUX_KEXEC_HANDOVER_H -#include <linux/types.h> +#include <linux/err.h> #include <linux/errno.h> +#include <linux/types.h> struct kho_scratch { phys_addr_t addr; phys_addr_t size; }; -/* KHO Notifier index */ -enum kho_event { - KEXEC_KHO_FINALIZE = 0, - KEXEC_KHO_ABORT = 1, -}; - struct folio; -struct notifier_block; struct page; #define DECLARE_KHOSER_PTR(name, type) \ @@ -37,8 +31,6 @@ struct page; (typeof((s).ptr))((s).phys ? phys_to_virt((s).phys) : NULL); \ }) -struct kho_serialization; - struct kho_vmalloc_chunk; struct kho_vmalloc { DECLARE_KHOSER_PTR(first, struct kho_vmalloc_chunk *); @@ -52,17 +44,21 @@ bool kho_is_enabled(void); bool is_kho_boot(void); int kho_preserve_folio(struct folio *folio); +void kho_unpreserve_folio(struct folio *folio); int kho_preserve_pages(struct page *page, unsigned int nr_pages); +void kho_unpreserve_pages(struct page *page, unsigned int nr_pages); int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation); +void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation); +void *kho_alloc_preserve(size_t size); +void kho_unpreserve_free(void *mem); +void kho_restore_free(void *mem); struct folio *kho_restore_folio(phys_addr_t phys); struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages); void *kho_restore_vmalloc(const struct kho_vmalloc *preservation); -int kho_add_subtree(struct kho_serialization *ser, const char *name, void *fdt); +int kho_add_subtree(const char *name, void *fdt); +void kho_remove_subtree(void *fdt); int kho_retrieve_subtree(const char *name, phys_addr_t *phys); -int register_kho_notifier(struct notifier_block *nb); -int unregister_kho_notifier(struct notifier_block *nb); - void kho_memory_init(void); void kho_populate(phys_addr_t fdt_phys, u64 fdt_len, phys_addr_t scratch_phys, @@ -83,17 +79,31 @@ static inline int kho_preserve_folio(struct folio *folio) return -EOPNOTSUPP; } +static inline void kho_unpreserve_folio(struct folio *folio) { } + static inline int kho_preserve_pages(struct page *page, unsigned int nr_pages) { return -EOPNOTSUPP; } +static inline void kho_unpreserve_pages(struct page *page, unsigned int nr_pages) { } + static inline int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation) { return -EOPNOTSUPP; } +static inline void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation) { } + +static inline void *kho_alloc_preserve(size_t size) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void kho_unpreserve_free(void *mem) { } +static inline void kho_restore_free(void *mem) { } + static inline struct folio *kho_restore_folio(phys_addr_t phys) { return NULL; @@ -110,30 +120,19 @@ static inline void *kho_restore_vmalloc(const struct kho_vmalloc *preservation) return NULL; } -static inline int kho_add_subtree(struct kho_serialization *ser, - const char *name, void *fdt) -{ - return -EOPNOTSUPP; -} - -static inline int kho_retrieve_subtree(const char *name, phys_addr_t *phys) +static inline int kho_add_subtree(const char *name, void *fdt) { return -EOPNOTSUPP; } -static inline int register_kho_notifier(struct notifier_block *nb) -{ - return -EOPNOTSUPP; -} +static inline void kho_remove_subtree(void *fdt) { } -static inline int unregister_kho_notifier(struct notifier_block *nb) +static inline int kho_retrieve_subtree(const char *name, phys_addr_t *phys) { return -EOPNOTSUPP; } -static inline void kho_memory_init(void) -{ -} +static inline void kho_memory_init(void) { } static inline void kho_populate(phys_addr_t fdt_phys, u64 fdt_len, phys_addr_t scratch_phys, u64 scratch_len) diff --git a/include/linux/kho/abi/luo.h b/include/linux/kho/abi/luo.h new file mode 100644 index 000000000000..bb099c92e469 --- /dev/null +++ b/include/linux/kho/abi/luo.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Copyright (c) 2025, Google LLC. + * Pasha Tatashin <pasha.tatashin@soleen.com> + */ + +/** + * DOC: Live Update Orchestrator ABI + * + * This header defines the stable Application Binary Interface used by the + * Live Update Orchestrator to pass state from a pre-update kernel to a + * post-update kernel. The ABI is built upon the Kexec HandOver framework + * and uses a Flattened Device Tree to describe the preserved data. + * + * This interface is a contract. Any modification to the FDT structure, node + * properties, compatible strings, or the layout of the `__packed` serialization + * structures defined here constitutes a breaking change. Such changes require + * incrementing the version number in the relevant `_COMPATIBLE` string to + * prevent a new kernel from misinterpreting data from an old kernel. + * + * Changes are allowed provided the compatibility version is incremented; + * however, backward/forward compatibility is only guaranteed for kernels + * supporting the same ABI version. + * + * FDT Structure Overview: + * The entire LUO state is encapsulated within a single KHO entry named "LUO". + * This entry contains an FDT with the following layout: + * + * .. code-block:: none + * + * / { + * compatible = "luo-v1"; + * liveupdate-number = <...>; + * + * luo-session { + * compatible = "luo-session-v1"; + * luo-session-header = <phys_addr_of_session_header_ser>; + * }; + * }; + * + * Main LUO Node (/): + * + * - compatible: "luo-v1" + * Identifies the overall LUO ABI version. + * - liveupdate-number: u64 + * A counter tracking the number of successful live updates performed. + * + * Session Node (luo-session): + * This node describes all preserved user-space sessions. + * + * - compatible: "luo-session-v1" + * Identifies the session ABI version. + * - luo-session-header: u64 + * The physical address of a `struct luo_session_header_ser`. This structure + * is the header for a contiguous block of memory containing an array of + * `struct luo_session_ser`, one for each preserved session. + * + * Serialization Structures: + * The FDT properties point to memory regions containing arrays of simple, + * `__packed` structures. These structures contain the actual preserved state. + * + * - struct luo_session_header_ser: + * Header for the session array. Contains the total page count of the + * preserved memory block and the number of `struct luo_session_ser` + * entries that follow. + * + * - struct luo_session_ser: + * Metadata for a single session, including its name and a physical pointer + * to another preserved memory block containing an array of + * `struct luo_file_ser` for all files in that session. + * + * - struct luo_file_ser: + * Metadata for a single preserved file. Contains the `compatible` string to + * find the correct handler in the new kernel, a user-provided `token` for + * identification, and an opaque `data` handle for the handler to use. + */ + +#ifndef _LINUX_KHO_ABI_LUO_H +#define _LINUX_KHO_ABI_LUO_H + +#include <uapi/linux/liveupdate.h> + +/* + * The LUO FDT hooks all LUO state for sessions, fds, etc. + * In the root it also carries "liveupdate-number" 64-bit property that + * corresponds to the number of live-updates performed on this machine. + */ +#define LUO_FDT_SIZE PAGE_SIZE +#define LUO_FDT_KHO_ENTRY_NAME "LUO" +#define LUO_FDT_COMPATIBLE "luo-v1" +#define LUO_FDT_LIVEUPDATE_NUM "liveupdate-number" + +#define LIVEUPDATE_HNDL_COMPAT_LENGTH 48 + +/** + * struct luo_file_ser - Represents the serialized preserves files. + * @compatible: File handler compatible string. + * @data: Private data + * @token: User provided token for this file + * + * If this structure is modified, LUO_SESSION_COMPATIBLE must be updated. + */ +struct luo_file_ser { + char compatible[LIVEUPDATE_HNDL_COMPAT_LENGTH]; + u64 data; + u64 token; +} __packed; + +/** + * struct luo_file_set_ser - Represents the serialized metadata for file set + * @files: The physical address of a contiguous memory block that holds + * the serialized state of files (array of luo_file_ser) in this file + * set. + * @count: The total number of files that were part of this session during + * serialization. Used for iteration and validation during + * restoration. + */ +struct luo_file_set_ser { + u64 files; + u64 count; +} __packed; + +/* + * LUO FDT session node + * LUO_FDT_SESSION_HEADER: is a u64 physical address of struct + * luo_session_header_ser + */ +#define LUO_FDT_SESSION_NODE_NAME "luo-session" +#define LUO_FDT_SESSION_COMPATIBLE "luo-session-v2" +#define LUO_FDT_SESSION_HEADER "luo-session-header" + +/** + * struct luo_session_header_ser - Header for the serialized session data block. + * @count: The number of `struct luo_session_ser` entries that immediately + * follow this header in the memory block. + * + * This structure is located at the beginning of a contiguous block of + * physical memory preserved across the kexec. It provides the necessary + * metadata to interpret the array of session entries that follow. + * + * If this structure is modified, `LUO_FDT_SESSION_COMPATIBLE` must be updated. + */ +struct luo_session_header_ser { + u64 count; +} __packed; + +/** + * struct luo_session_ser - Represents the serialized metadata for a LUO session. + * @name: The unique name of the session, provided by the userspace at + * the time of session creation. + * @file_set_ser: Serialized files belonging to this session, + * + * This structure is used to package session-specific metadata for transfer + * between kernels via Kexec Handover. An array of these structures (one per + * session) is created and passed to the new kernel, allowing it to reconstruct + * the session context. + * + * If this structure is modified, `LUO_FDT_SESSION_COMPATIBLE` must be updated. + */ +struct luo_session_ser { + char name[LIVEUPDATE_SESSION_NAME_LENGTH]; + struct luo_file_set_ser file_set_ser; +} __packed; + +#endif /* _LINUX_KHO_ABI_LUO_H */ diff --git a/include/linux/kho/abi/memfd.h b/include/linux/kho/abi/memfd.h new file mode 100644 index 000000000000..da7d063474a1 --- /dev/null +++ b/include/linux/kho/abi/memfd.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Copyright (c) 2025, Google LLC. + * Pasha Tatashin <pasha.tatashin@soleen.com> + * + * Copyright (C) 2025 Amazon.com Inc. or its affiliates. + * Pratyush Yadav <ptyadav@amazon.de> + */ + +#ifndef _LINUX_KHO_ABI_MEMFD_H +#define _LINUX_KHO_ABI_MEMFD_H + +#include <linux/types.h> +#include <linux/kexec_handover.h> + +/** + * DOC: memfd Live Update ABI + * + * This header defines the ABI for preserving the state of a memfd across a + * kexec reboot using the LUO. + * + * The state is serialized into a packed structure `struct memfd_luo_ser` + * which is handed over to the next kernel via the KHO mechanism. + * + * This interface is a contract. Any modification to the structure layout + * constitutes a breaking change. Such changes require incrementing the + * version number in the MEMFD_LUO_FH_COMPATIBLE string. + */ + +/** + * MEMFD_LUO_FOLIO_DIRTY - The folio is dirty. + * + * This flag indicates the folio contains data from user. A non-dirty folio is + * one that was allocated (say using fallocate(2)) but not written to. + */ +#define MEMFD_LUO_FOLIO_DIRTY BIT(0) + +/** + * MEMFD_LUO_FOLIO_UPTODATE - The folio is up-to-date. + * + * An up-to-date folio has been zeroed out. shmem zeroes out folios on first + * use. This flag tracks which folios need zeroing. + */ +#define MEMFD_LUO_FOLIO_UPTODATE BIT(1) + +/** + * struct memfd_luo_folio_ser - Serialized state of a single folio. + * @pfn: The page frame number of the folio. + * @flags: Flags to describe the state of the folio. + * @index: The page offset (pgoff_t) of the folio within the original file. + */ +struct memfd_luo_folio_ser { + u64 pfn:52; + u64 flags:12; + u64 index; +} __packed; + +/** + * struct memfd_luo_ser - Main serialization structure for a memfd. + * @pos: The file's current position (f_pos). + * @size: The total size of the file in bytes (i_size). + * @nr_folios: Number of folios in the folios array. + * @folios: KHO vmalloc descriptor pointing to the array of + * struct memfd_luo_folio_ser. + */ +struct memfd_luo_ser { + u64 pos; + u64 size; + u64 nr_folios; + struct kho_vmalloc folios; +} __packed; + +/* The compatibility string for memfd file handler */ +#define MEMFD_LUO_FH_COMPATIBLE "memfd-v1" + +#endif /* _LINUX_KHO_ABI_MEMFD_H */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 5bd76cf394fa..d93f75b05ae2 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1557,6 +1557,8 @@ long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); +long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg); vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); @@ -2437,18 +2439,6 @@ static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) } #endif /* CONFIG_HAVE_KVM_NO_POLL */ -#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL -long kvm_arch_vcpu_async_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg); -#else -static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, - unsigned int ioctl, - unsigned long arg) -{ - return -ENOIOCTLCMD; -} -#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ - void kvm_arch_guest_memory_reclaimed(struct kvm *kvm); #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE diff --git a/include/linux/libata.h b/include/linux/libata.h index 7a98de1cc995..39534fafa36a 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -75,6 +75,7 @@ enum ata_quirks { __ATA_QUIRK_NO_DMA_LOG, /* Do not use DMA for log read */ __ATA_QUIRK_NOTRIM, /* Do not use TRIM */ __ATA_QUIRK_MAX_SEC_1024, /* Limit max sects to 1024 */ + __ATA_QUIRK_MAX_SEC_8191, /* Limit max sects to 8191 */ __ATA_QUIRK_MAX_TRIM_128M, /* Limit max trim size to 128M */ __ATA_QUIRK_NO_NCQ_ON_ATI, /* Disable NCQ on ATI chipset */ __ATA_QUIRK_NO_LPM_ON_ATI, /* Disable LPM on ATI chipset */ @@ -85,6 +86,45 @@ enum ata_quirks { __ATA_QUIRK_MAX, }; +/* + * Quirk flags: may be set by libata or controller drivers on drives. + * Some quirks may be drive/controller pair dependent. + */ +enum { + ATA_QUIRK_DIAGNOSTIC = (1U << __ATA_QUIRK_DIAGNOSTIC), + ATA_QUIRK_NODMA = (1U << __ATA_QUIRK_NODMA), + ATA_QUIRK_NONCQ = (1U << __ATA_QUIRK_NONCQ), + ATA_QUIRK_MAX_SEC_128 = (1U << __ATA_QUIRK_MAX_SEC_128), + ATA_QUIRK_BROKEN_HPA = (1U << __ATA_QUIRK_BROKEN_HPA), + ATA_QUIRK_DISABLE = (1U << __ATA_QUIRK_DISABLE), + ATA_QUIRK_HPA_SIZE = (1U << __ATA_QUIRK_HPA_SIZE), + ATA_QUIRK_IVB = (1U << __ATA_QUIRK_IVB), + ATA_QUIRK_STUCK_ERR = (1U << __ATA_QUIRK_STUCK_ERR), + ATA_QUIRK_BRIDGE_OK = (1U << __ATA_QUIRK_BRIDGE_OK), + ATA_QUIRK_ATAPI_MOD16_DMA = (1U << __ATA_QUIRK_ATAPI_MOD16_DMA), + ATA_QUIRK_FIRMWARE_WARN = (1U << __ATA_QUIRK_FIRMWARE_WARN), + ATA_QUIRK_1_5_GBPS = (1U << __ATA_QUIRK_1_5_GBPS), + ATA_QUIRK_NOSETXFER = (1U << __ATA_QUIRK_NOSETXFER), + ATA_QUIRK_BROKEN_FPDMA_AA = (1U << __ATA_QUIRK_BROKEN_FPDMA_AA), + ATA_QUIRK_DUMP_ID = (1U << __ATA_QUIRK_DUMP_ID), + ATA_QUIRK_MAX_SEC_LBA48 = (1U << __ATA_QUIRK_MAX_SEC_LBA48), + ATA_QUIRK_ATAPI_DMADIR = (1U << __ATA_QUIRK_ATAPI_DMADIR), + ATA_QUIRK_NO_NCQ_TRIM = (1U << __ATA_QUIRK_NO_NCQ_TRIM), + ATA_QUIRK_NOLPM = (1U << __ATA_QUIRK_NOLPM), + ATA_QUIRK_WD_BROKEN_LPM = (1U << __ATA_QUIRK_WD_BROKEN_LPM), + ATA_QUIRK_ZERO_AFTER_TRIM = (1U << __ATA_QUIRK_ZERO_AFTER_TRIM), + ATA_QUIRK_NO_DMA_LOG = (1U << __ATA_QUIRK_NO_DMA_LOG), + ATA_QUIRK_NOTRIM = (1U << __ATA_QUIRK_NOTRIM), + ATA_QUIRK_MAX_SEC_1024 = (1U << __ATA_QUIRK_MAX_SEC_1024), + ATA_QUIRK_MAX_SEC_8191 = (1U << __ATA_QUIRK_MAX_SEC_8191), + ATA_QUIRK_MAX_TRIM_128M = (1U << __ATA_QUIRK_MAX_TRIM_128M), + ATA_QUIRK_NO_NCQ_ON_ATI = (1U << __ATA_QUIRK_NO_NCQ_ON_ATI), + ATA_QUIRK_NO_LPM_ON_ATI = (1U << __ATA_QUIRK_NO_LPM_ON_ATI), + ATA_QUIRK_NO_ID_DEV_LOG = (1U << __ATA_QUIRK_NO_ID_DEV_LOG), + ATA_QUIRK_NO_LOG_DIR = (1U << __ATA_QUIRK_NO_LOG_DIR), + ATA_QUIRK_NO_FUA = (1U << __ATA_QUIRK_NO_FUA), +}; + enum { /* various global constants */ LIBATA_MAX_PRD = ATA_MAX_PRD / 2, @@ -390,42 +430,6 @@ enum { */ ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8, - /* - * Quirk flags: may be set by libata or controller drivers on drives. - * Some quirks may be drive/controller pair dependent. - */ - ATA_QUIRK_DIAGNOSTIC = (1U << __ATA_QUIRK_DIAGNOSTIC), - ATA_QUIRK_NODMA = (1U << __ATA_QUIRK_NODMA), - ATA_QUIRK_NONCQ = (1U << __ATA_QUIRK_NONCQ), - ATA_QUIRK_MAX_SEC_128 = (1U << __ATA_QUIRK_MAX_SEC_128), - ATA_QUIRK_BROKEN_HPA = (1U << __ATA_QUIRK_BROKEN_HPA), - ATA_QUIRK_DISABLE = (1U << __ATA_QUIRK_DISABLE), - ATA_QUIRK_HPA_SIZE = (1U << __ATA_QUIRK_HPA_SIZE), - ATA_QUIRK_IVB = (1U << __ATA_QUIRK_IVB), - ATA_QUIRK_STUCK_ERR = (1U << __ATA_QUIRK_STUCK_ERR), - ATA_QUIRK_BRIDGE_OK = (1U << __ATA_QUIRK_BRIDGE_OK), - ATA_QUIRK_ATAPI_MOD16_DMA = (1U << __ATA_QUIRK_ATAPI_MOD16_DMA), - ATA_QUIRK_FIRMWARE_WARN = (1U << __ATA_QUIRK_FIRMWARE_WARN), - ATA_QUIRK_1_5_GBPS = (1U << __ATA_QUIRK_1_5_GBPS), - ATA_QUIRK_NOSETXFER = (1U << __ATA_QUIRK_NOSETXFER), - ATA_QUIRK_BROKEN_FPDMA_AA = (1U << __ATA_QUIRK_BROKEN_FPDMA_AA), - ATA_QUIRK_DUMP_ID = (1U << __ATA_QUIRK_DUMP_ID), - ATA_QUIRK_MAX_SEC_LBA48 = (1U << __ATA_QUIRK_MAX_SEC_LBA48), - ATA_QUIRK_ATAPI_DMADIR = (1U << __ATA_QUIRK_ATAPI_DMADIR), - ATA_QUIRK_NO_NCQ_TRIM = (1U << __ATA_QUIRK_NO_NCQ_TRIM), - ATA_QUIRK_NOLPM = (1U << __ATA_QUIRK_NOLPM), - ATA_QUIRK_WD_BROKEN_LPM = (1U << __ATA_QUIRK_WD_BROKEN_LPM), - ATA_QUIRK_ZERO_AFTER_TRIM = (1U << __ATA_QUIRK_ZERO_AFTER_TRIM), - ATA_QUIRK_NO_DMA_LOG = (1U << __ATA_QUIRK_NO_DMA_LOG), - ATA_QUIRK_NOTRIM = (1U << __ATA_QUIRK_NOTRIM), - ATA_QUIRK_MAX_SEC_1024 = (1U << __ATA_QUIRK_MAX_SEC_1024), - ATA_QUIRK_MAX_TRIM_128M = (1U << __ATA_QUIRK_MAX_TRIM_128M), - ATA_QUIRK_NO_NCQ_ON_ATI = (1U << __ATA_QUIRK_NO_NCQ_ON_ATI), - ATA_QUIRK_NO_LPM_ON_ATI = (1U << __ATA_QUIRK_NO_LPM_ON_ATI), - ATA_QUIRK_NO_ID_DEV_LOG = (1U << __ATA_QUIRK_NO_ID_DEV_LOG), - ATA_QUIRK_NO_LOG_DIR = (1U << __ATA_QUIRK_NO_LOG_DIR), - ATA_QUIRK_NO_FUA = (1U << __ATA_QUIRK_NO_FUA), - /* User visible DMA mask for DMA control. DO NOT renumber. */ ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */ ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */ diff --git a/include/linux/liveupdate.h b/include/linux/liveupdate.h new file mode 100644 index 000000000000..a7f6ee5b6771 --- /dev/null +++ b/include/linux/liveupdate.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Copyright (c) 2025, Google LLC. + * Pasha Tatashin <pasha.tatashin@soleen.com> + */ +#ifndef _LINUX_LIVEUPDATE_H +#define _LINUX_LIVEUPDATE_H + +#include <linux/bug.h> +#include <linux/compiler.h> +#include <linux/kho/abi/luo.h> +#include <linux/list.h> +#include <linux/types.h> +#include <uapi/linux/liveupdate.h> + +struct liveupdate_file_handler; +struct file; + +/** + * struct liveupdate_file_op_args - Arguments for file operation callbacks. + * @handler: The file handler being called. + * @retrieved: The retrieve status for the 'can_finish / finish' + * operation. + * @file: The file object. For retrieve: [OUT] The callback sets + * this to the new file. For other ops: [IN] The caller sets + * this to the file being operated on. + * @serialized_data: The opaque u64 handle, preserve/prepare/freeze may update + * this field. + * @private_data: Private data for the file used to hold runtime state that + * is not preserved. Set by the handler's .preserve() + * callback, and must be freed in the handler's + * .unpreserve() callback. + * + * This structure bundles all parameters for the file operation callbacks. + * The 'data' and 'file' fields are used for both input and output. + */ +struct liveupdate_file_op_args { + struct liveupdate_file_handler *handler; + bool retrieved; + struct file *file; + u64 serialized_data; + void *private_data; +}; + +/** + * struct liveupdate_file_ops - Callbacks for live-updatable files. + * @can_preserve: Required. Lightweight check to see if this handler is + * compatible with the given file. + * @preserve: Required. Performs state-saving for the file. + * @unpreserve: Required. Cleans up any resources allocated by @preserve. + * @freeze: Optional. Final actions just before kernel transition. + * @unfreeze: Optional. Undo freeze operations. + * @retrieve: Required. Restores the file in the new kernel. + * @can_finish: Optional. Check if this FD can finish, i.e. all restoration + * pre-requirements for this FD are satisfied. Called prior to + * finish, in order to do successful finish calls for all + * resources in the session. + * @finish: Required. Final cleanup in the new kernel. + * @owner: Module reference + * + * All operations (except can_preserve) receive a pointer to a + * 'struct liveupdate_file_op_args' containing the necessary context. + */ +struct liveupdate_file_ops { + bool (*can_preserve)(struct liveupdate_file_handler *handler, + struct file *file); + int (*preserve)(struct liveupdate_file_op_args *args); + void (*unpreserve)(struct liveupdate_file_op_args *args); + int (*freeze)(struct liveupdate_file_op_args *args); + void (*unfreeze)(struct liveupdate_file_op_args *args); + int (*retrieve)(struct liveupdate_file_op_args *args); + bool (*can_finish)(struct liveupdate_file_op_args *args); + void (*finish)(struct liveupdate_file_op_args *args); + struct module *owner; +}; + +/** + * struct liveupdate_file_handler - Represents a handler for a live-updatable file type. + * @ops: Callback functions + * @compatible: The compatibility string (e.g., "memfd-v1", "vfiofd-v1") + * that uniquely identifies the file type this handler + * supports. This is matched against the compatible string + * associated with individual &struct file instances. + * + * Modules that want to support live update for specific file types should + * register an instance of this structure. LUO uses this registration to + * determine if a given file can be preserved and to find the appropriate + * operations to manage its state across the update. + */ +struct liveupdate_file_handler { + const struct liveupdate_file_ops *ops; + const char compatible[LIVEUPDATE_HNDL_COMPAT_LENGTH]; + + /* private: */ + + /* + * Used for linking this handler instance into a global list of + * registered file handlers. + */ + struct list_head __private list; +}; + +#ifdef CONFIG_LIVEUPDATE + +/* Return true if live update orchestrator is enabled */ +bool liveupdate_enabled(void); + +/* Called during kexec to tell LUO that entered into reboot */ +int liveupdate_reboot(void); + +int liveupdate_register_file_handler(struct liveupdate_file_handler *fh); +int liveupdate_unregister_file_handler(struct liveupdate_file_handler *fh); + +#else /* CONFIG_LIVEUPDATE */ + +static inline bool liveupdate_enabled(void) +{ + return false; +} + +static inline int liveupdate_reboot(void) +{ + return 0; +} + +static inline int liveupdate_register_file_handler(struct liveupdate_file_handler *fh) +{ + return -EOPNOTSUPP; +} + +static inline int liveupdate_unregister_file_handler(struct liveupdate_file_handler *fh) +{ + return -EOPNOTSUPP; +} + +#endif /* CONFIG_LIVEUPDATE */ +#endif /* _LINUX_LIVEUPDATE_H */ diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index c8f0f9458f2c..330e38776bb2 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -12,6 +12,7 @@ /* XXX: a lot of this should really be under fs/lockd. */ +#include <linux/exportfs.h> #include <linux/in.h> #include <linux/in6.h> #include <net/ipv6.h> @@ -307,7 +308,7 @@ void nlmsvc_invalidate_all(void); int nlmsvc_unlock_all_by_sb(struct super_block *sb); int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr); -static inline struct file *nlmsvc_file_file(struct nlm_file *file) +static inline struct file *nlmsvc_file_file(const struct nlm_file *file) { return file->f_file[O_RDONLY] ? file->f_file[O_RDONLY] : file->f_file[O_WRONLY]; @@ -318,6 +319,12 @@ static inline struct inode *nlmsvc_file_inode(struct nlm_file *file) return file_inode(nlmsvc_file_file(file)); } +static inline bool +nlmsvc_file_cannot_lock(const struct nlm_file *file) +{ + return exportfs_cannot_lock(nlmsvc_file_file(file)->f_path.dentry->d_sb->s_export_op); +} + static inline int __nlm_privileged_request4(const struct sockaddr *sap) { const struct sockaddr_in *sin = (struct sockaddr_in *)sap; diff --git a/include/linux/map_benchmark.h b/include/linux/map_benchmark.h deleted file mode 100644 index 48e2ff95332f..000000000000 --- a/include/linux/map_benchmark.h +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2022 HiSilicon Limited. - */ - -#ifndef _KERNEL_DMA_BENCHMARK_H -#define _KERNEL_DMA_BENCHMARK_H - -#define DMA_MAP_BENCHMARK _IOWR('d', 1, struct map_benchmark) -#define DMA_MAP_MAX_THREADS 1024 -#define DMA_MAP_MAX_SECONDS 300 -#define DMA_MAP_MAX_TRANS_DELAY (10 * NSEC_PER_MSEC) - -#define DMA_MAP_BIDIRECTIONAL 0 -#define DMA_MAP_TO_DEVICE 1 -#define DMA_MAP_FROM_DEVICE 2 - -struct map_benchmark { - __u64 avg_map_100ns; /* average map latency in 100ns */ - __u64 map_stddev; /* standard deviation of map latency */ - __u64 avg_unmap_100ns; /* as above */ - __u64 unmap_stddev; - __u32 threads; /* how many threads will do map/unmap in parallel */ - __u32 seconds; /* how long the test will last */ - __s32 node; /* which numa node this benchmark will run on */ - __u32 dma_bits; /* DMA addressing capability */ - __u32 dma_dir; /* DMA data direction */ - __u32 dma_trans_ns; /* time for DMA transmission in ns */ - __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */ - __u8 expansion[76]; /* For future use */ -}; -#endif /* _KERNEL_DMA_BENCHMARK_H */ diff --git a/include/linux/math.h b/include/linux/math.h index 0198c92cbe3e..6dc1d1d32fbc 100644 --- a/include/linux/math.h +++ b/include/linux/math.h @@ -148,11 +148,16 @@ __STRUCT_FRACT(u32) /** * abs - return absolute value of an argument - * @x: the value. If it is unsigned type, it is converted to signed type first. - * char is treated as if it was signed (regardless of whether it really is) - * but the macro's return type is preserved as char. + * @x: the value. * - * Return: an absolute value of x. + * If it is unsigned type, @x is converted to signed type first. + * char is treated as if it was signed (regardless of whether it really is) + * but the macro's return type is preserved as char. + * + * NOTE, for signed type if @x is the minimum, the returned result is undefined + * as there is not enough bits to represent it as a positive number. + * + * Return: an absolute value of @x. */ #define abs(x) __abs_choose_expr(x, long long, \ __abs_choose_expr(x, long, \ diff --git a/include/linux/math64.h b/include/linux/math64.h index 6aaccc1626ab..cc305206d89f 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -158,6 +158,17 @@ static inline u64 mul_u32_u32(u32 a, u32 b) } #endif +#ifndef add_u64_u32 +/* + * Many a GCC version also messes this up. + * Zero extending b and then spilling everything to stack. + */ +static inline u64 add_u64_u32(u64 a, u32 b) +{ + return a + b; +} +#endif + #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) #ifndef mul_u64_u32_shr @@ -282,7 +293,53 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) } #endif /* mul_u64_u32_div */ -u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div); +/** + * mul_u64_add_u64_div_u64 - unsigned 64bit multiply, add, and divide + * @a: first unsigned 64bit multiplicand + * @b: second unsigned 64bit multiplicand + * @c: unsigned 64bit addend + * @d: unsigned 64bit divisor + * + * Multiply two 64bit values together to generate a 128bit product + * add a third value and then divide by a fourth. + * The Generic code divides by 0 if @d is zero and returns ~0 on overflow. + * Architecture specific code may trap on zero or overflow. + * + * Return: (@a * @b + @c) / @d + */ +u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d); + +/** + * mul_u64_u64_div_u64 - unsigned 64bit multiply and divide + * @a: first unsigned 64bit multiplicand + * @b: second unsigned 64bit multiplicand + * @d: unsigned 64bit divisor + * + * Multiply two 64bit values together to generate a 128bit product + * and then divide by a third value. + * The Generic code divides by 0 if @d is zero and returns ~0 on overflow. + * Architecture specific code may trap on zero or overflow. + * + * Return: @a * @b / @d + */ +#define mul_u64_u64_div_u64(a, b, d) mul_u64_add_u64_div_u64(a, b, 0, d) + +/** + * mul_u64_u64_div_u64_roundup - unsigned 64bit multiply and divide rounded up + * @a: first unsigned 64bit multiplicand + * @b: second unsigned 64bit multiplicand + * @d: unsigned 64bit divisor + * + * Multiply two 64bit values together to generate a 128bit product + * and then divide and round up. + * The Generic code divides by 0 if @d is zero and returns ~0 on overflow. + * Architecture specific code may trap on zero or overflow. + * + * Return: (@a * @b + @d - 1) / @d + */ +#define mul_u64_u64_div_u64_roundup(a, b, d) \ + ({ u64 _tmp = (d); mul_u64_add_u64_div_u64(a, b, _tmp - 1, _tmp); }) + /** * DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up diff --git a/include/linux/memregion.h b/include/linux/memregion.h index c01321467789..a55f62cc5266 100644 --- a/include/linux/memregion.h +++ b/include/linux/memregion.h @@ -26,8 +26,10 @@ static inline void memregion_free(int id) /** * cpu_cache_invalidate_memregion - drop any CPU cached data for - * memregions described by @res_desc - * @res_desc: one of the IORES_DESC_* types + * memregion + * @start: start physical address of the target memory region. + * @len: length of the target memory region. -1 for all the regions of + * the target type. * * Perform cache maintenance after a memory event / operation that * changes the contents of physical memory in a cache-incoherent manner. @@ -46,7 +48,7 @@ static inline void memregion_free(int id) * the cache maintenance. */ #ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION -int cpu_cache_invalidate_memregion(int res_desc); +int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len); bool cpu_cache_has_invalidate_memregion(void); #else static inline bool cpu_cache_has_invalidate_memregion(void) @@ -54,10 +56,16 @@ static inline bool cpu_cache_has_invalidate_memregion(void) return false; } -static inline int cpu_cache_invalidate_memregion(int res_desc) +static inline int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len) { WARN_ON_ONCE("CPU cache invalidation required"); return -ENXIO; } #endif + +static inline int cpu_cache_invalidate_all(void) +{ + return cpu_cache_invalidate_memregion(0, -1); +} + #endif /* _MEMREGION_H_ */ diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 6077972e8b45..24eb5a88a5c5 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -867,7 +867,7 @@ struct mhi_device_id { kernel_ulong_t driver_data; }; -#define AUXILIARY_NAME_SIZE 32 +#define AUXILIARY_NAME_SIZE 40 #define AUXILIARY_MODULE_PREFIX "auxiliary:" struct auxiliary_device_id { diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 6907aedc4f74..915f32f7d888 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -26,6 +26,9 @@ /* Generic info of form tag = "info" */ #define MODULE_INFO(tag, info) \ + static_assert( \ + sizeof(info) - 1 == __builtin_strlen(info), \ + "MODULE_INFO(" #tag ", ...) contains embedded NUL byte"); \ static const char __UNIQUE_ID(modinfo)[] \ __used __section(".modinfo") __aligned(1) \ = __MODULE_INFO_PREFIX __stringify(tag) "=" info diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index 7ad1f5c7407e..bd38648c998d 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -245,18 +245,18 @@ static __always_inline int __nodes_weight(const nodemask_t *srcp, unsigned int n } /* FIXME: better would be to fix all architectures to never return - > MAX_NUMNODES, then the silly min_ts could be dropped. */ + > MAX_NUMNODES, then the silly min()s could be dropped. */ #define first_node(src) __first_node(&(src)) static __always_inline unsigned int __first_node(const nodemask_t *srcp) { - return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES)); + return min(MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES)); } #define next_node(n, src) __next_node((n), &(src)) static __always_inline unsigned int __next_node(int n, const nodemask_t *srcp) { - return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); + return min(MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); } /* @@ -293,8 +293,7 @@ static __always_inline void init_nodemask_of_node(nodemask_t *mask, int node) #define first_unset_node(mask) __first_unset_node(&(mask)) static __always_inline unsigned int __first_unset_node(const nodemask_t *maskp) { - return min_t(unsigned int, MAX_NUMNODES, - find_first_zero_bit(maskp->bits, MAX_NUMNODES)); + return min(MAX_NUMNODES, find_first_zero_bit(maskp->bits, MAX_NUMNODES)); } #define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES) diff --git a/include/linux/objtool.h b/include/linux/objtool.h index b18ab53561c9..9a00e701454c 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -12,7 +12,7 @@ #define UNWIND_HINT(type, sp_reg, sp_offset, signal) \ "987: \n\t" \ ".pushsection .discard.unwind_hints\n\t" \ - ANNOTATE_DATA_SPECIAL \ + ANNOTATE_DATA_SPECIAL "\n\t" \ /* struct unwind_hint */ \ ".long 987b - .\n\t" \ ".short " __stringify(sp_offset) "\n\t" \ diff --git a/include/linux/once_lite.h b/include/linux/once_lite.h index 27de7bc32a06..236592c4eeb1 100644 --- a/include/linux/once_lite.h +++ b/include/linux/once_lite.h @@ -16,7 +16,7 @@ bool __ret_cond = !!(condition); \ bool __ret_once = false; \ \ - if (unlikely(__ret_cond && !__already_done)) { \ + if (unlikely(__ret_cond) && unlikely(!__already_done)) {\ __already_done = true; \ __ret_once = true; \ } \ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index e601a3144f28..31a848485ad9 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -651,9 +651,11 @@ static inline void *detach_page_private(struct page *page) } #ifdef CONFIG_NUMA -struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order); +struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order, + struct mempolicy *policy); #else -static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) +static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order, + struct mempolicy *policy) { return folio_alloc_noprof(gfp, order); } @@ -664,7 +666,7 @@ static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int o static inline struct page *__page_cache_alloc(gfp_t gfp) { - return &filemap_alloc_folio(gfp, 0)->page; + return &filemap_alloc_folio(gfp, 0, NULL)->page; } static inline gfp_t readahead_gfp_mask(struct address_space *x) @@ -750,11 +752,17 @@ static inline fgf_t fgf_set_order(size_t size) } void *filemap_get_entry(struct address_space *mapping, pgoff_t index); -struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, - fgf_t fgp_flags, gfp_t gfp); +struct folio *__filemap_get_folio_mpol(struct address_space *mapping, + pgoff_t index, fgf_t fgf_flags, gfp_t gfp, struct mempolicy *policy); struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, fgf_t fgp_flags, gfp_t gfp); +static inline struct folio *__filemap_get_folio(struct address_space *mapping, + pgoff_t index, fgf_t fgf_flags, gfp_t gfp) +{ + return __filemap_get_folio_mpol(mapping, index, fgf_flags, gfp, NULL); +} + /** * write_begin_get_folio - Get folio for write_begin with flags. * @iocb: The kiocb passed from write_begin (may be NULL). diff --git a/include/linux/panic.h b/include/linux/panic.h index 6f972a66c13e..a00bc0937698 100644 --- a/include/linux/panic.h +++ b/include/linux/panic.h @@ -86,7 +86,6 @@ static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout) struct taint_flag { char c_true; /* character printed when tainted */ char c_false; /* character printed when not tainted */ - bool module; /* also show as a per-module taint flag */ const char *desc; /* verbose description of the set taint flag */ }; diff --git a/include/linux/pci-doe.h b/include/linux/pci-doe.h index 1f14aed4354b..bd4346a7c4e7 100644 --- a/include/linux/pci-doe.h +++ b/include/linux/pci-doe.h @@ -15,6 +15,10 @@ struct pci_doe_mb; +#define PCI_DOE_FEATURE_DISCOVERY 0 +#define PCI_DOE_FEATURE_CMA 1 +#define PCI_DOE_FEATURE_SSESSION 2 + struct pci_doe_mb *pci_find_doe_mailbox(struct pci_dev *pdev, u16 vendor, u8 type); diff --git a/include/linux/pci-ide.h b/include/linux/pci-ide.h new file mode 100644 index 000000000000..37a1ad9501b0 --- /dev/null +++ b/include/linux/pci-ide.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common helpers for drivers (e.g. low-level PCI/TSM drivers) implementing the + * IDE key management protocol (IDE_KM) as defined by: + * PCIe r7.0 section 6.33 Integrity & Data Encryption (IDE) + * + * Copyright(c) 2024-2025 Intel Corporation. All rights reserved. + */ + +#ifndef __PCI_IDE_H__ +#define __PCI_IDE_H__ + +enum pci_ide_partner_select { + PCI_IDE_EP, + PCI_IDE_RP, + PCI_IDE_PARTNER_MAX, + /* + * In addition to the resources in each partner port the + * platform / host-bridge additionally has a Stream ID pool that + * it shares across root ports. Let pci_ide_stream_alloc() use + * the alloc_stream_index() helper as endpoints and root ports. + */ + PCI_IDE_HB = PCI_IDE_PARTNER_MAX, +}; + +/** + * struct pci_ide_partner - Per port pair Selective IDE Stream settings + * @rid_start: Partner Port Requester ID range start + * @rid_end: Partner Port Requester ID range end + * @stream_index: Selective IDE Stream Register Block selection + * @mem_assoc: PCI bus memory address association for targeting peer partner + * @pref_assoc: PCI bus prefetchable memory address association for + * targeting peer partner + * @default_stream: Endpoint uses this stream for all upstream TLPs regardless of + * address and RID association registers + * @setup: flag to track whether to run pci_ide_stream_teardown() for this + * partner slot + * @enable: flag whether to run pci_ide_stream_disable() for this partner slot + * + * By default, pci_ide_stream_alloc() initializes @mem_assoc and @pref_assoc + * with the immediate ancestor downstream port memory ranges (i.e. Type 1 + * Configuration Space Header values). Caller may zero size ({0, -1}) the range + * to drop it from consideration at pci_ide_stream_setup() time. + */ +struct pci_ide_partner { + u16 rid_start; + u16 rid_end; + u8 stream_index; + struct pci_bus_region mem_assoc; + struct pci_bus_region pref_assoc; + unsigned int default_stream:1; + unsigned int setup:1; + unsigned int enable:1; +}; + +/** + * struct pci_ide_regs - Hardware register association settings for Selective + * IDE Streams + * @rid1: IDE RID Association Register 1 + * @rid2: IDE RID Association Register 2 + * @addr: Up to two address association blocks (IDE Address Association Register + * 1 through 3) for MMIO and prefetchable MMIO + * @nr_addr: Number of address association blocks initialized + * + * See pci_ide_stream_to_regs() + */ +struct pci_ide_regs { + u32 rid1; + u32 rid2; + struct { + u32 assoc1; + u32 assoc2; + u32 assoc3; + } addr[2]; + int nr_addr; +}; + +/** + * struct pci_ide - PCIe Selective IDE Stream descriptor + * @pdev: PCIe Endpoint in the pci_ide_partner pair + * @partner: per-partner settings + * @host_bridge_stream: allocated from host bridge @ide_stream_ida pool + * @stream_id: unique Stream ID (within Partner Port pairing) + * @name: name of the established Selective IDE Stream in sysfs + * @tsm_dev: For TSM established IDE, the TSM device context + * + * Negative @stream_id values indicate "uninitialized" on the + * expectation that with TSM established IDE the TSM owns the stream_id + * allocation. + */ +struct pci_ide { + struct pci_dev *pdev; + struct pci_ide_partner partner[PCI_IDE_PARTNER_MAX]; + u8 host_bridge_stream; + int stream_id; + const char *name; + struct tsm_dev *tsm_dev; +}; + +/* + * Some devices need help with aliased stream-ids even for idle streams. Use + * this id as the "never enabled" place holder. + */ +#define PCI_IDE_RESERVED_STREAM_ID 255 + +void pci_ide_set_nr_streams(struct pci_host_bridge *hb, u16 nr); +struct pci_ide_partner *pci_ide_to_settings(struct pci_dev *pdev, + struct pci_ide *ide); +struct pci_ide *pci_ide_stream_alloc(struct pci_dev *pdev); +void pci_ide_stream_free(struct pci_ide *ide); +int pci_ide_stream_register(struct pci_ide *ide); +void pci_ide_stream_unregister(struct pci_ide *ide); +void pci_ide_stream_setup(struct pci_dev *pdev, struct pci_ide *ide); +void pci_ide_stream_teardown(struct pci_dev *pdev, struct pci_ide *ide); +int pci_ide_stream_enable(struct pci_dev *pdev, struct pci_ide *ide); +void pci_ide_stream_disable(struct pci_dev *pdev, struct pci_ide *ide); +void pci_ide_stream_release(struct pci_ide *ide); +DEFINE_FREE(pci_ide_stream_release, struct pci_ide *, if (_T) pci_ide_stream_release(_T)) +#endif /* __PCI_IDE_H__ */ diff --git a/include/linux/pci-tsm.h b/include/linux/pci-tsm.h new file mode 100644 index 000000000000..a6435aba03f9 --- /dev/null +++ b/include/linux/pci-tsm.h @@ -0,0 +1,243 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PCI_TSM_H +#define __PCI_TSM_H +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/sockptr.h> + +struct pci_tsm; +struct tsm_dev; +struct kvm; +enum pci_tsm_req_scope; + +/* + * struct pci_tsm_ops - manage confidential links and security state + * @link_ops: Coordinate PCIe SPDM and IDE establishment via a platform TSM. + * Provide a secure session transport for TDISP state management + * (typically bare metal physical function operations). + * @devsec_ops: Lock, unlock, and interrogate the security state of the + * function via the platform TSM (typically virtual function + * operations). + * + * This operations are mutually exclusive either a tsm_dev instance + * manages physical link properties or it manages function security + * states like TDISP lock/unlock. + */ +struct pci_tsm_ops { + /* + * struct pci_tsm_link_ops - Manage physical link and the TSM/DSM session + * @probe: establish context with the TSM (allocate / wrap 'struct + * pci_tsm') for follow-on link operations + * @remove: destroy link operations context + * @connect: establish / validate a secure connection (e.g. IDE) + * with the device + * @disconnect: teardown the secure link + * @bind: bind a TDI in preparation for it to be accepted by a TVM + * @unbind: remove a TDI from secure operation with a TVM + * @guest_req: marshal TVM information and state change requests + * + * Context: @probe, @remove, @connect, and @disconnect run under + * pci_tsm_rwsem held for write to sync with TSM unregistration and + * mutual exclusion of @connect and @disconnect. @connect and + * @disconnect additionally run under the DSM lock (struct + * pci_tsm_pf0::lock) as well as @probe and @remove of the subfunctions. + * @bind, @unbind, and @guest_req run under pci_tsm_rwsem held for read + * and the DSM lock. + */ + struct_group_tagged(pci_tsm_link_ops, link_ops, + struct pci_tsm *(*probe)(struct tsm_dev *tsm_dev, + struct pci_dev *pdev); + void (*remove)(struct pci_tsm *tsm); + int (*connect)(struct pci_dev *pdev); + void (*disconnect)(struct pci_dev *pdev); + struct pci_tdi *(*bind)(struct pci_dev *pdev, + struct kvm *kvm, u32 tdi_id); + void (*unbind)(struct pci_tdi *tdi); + ssize_t (*guest_req)(struct pci_tdi *tdi, + enum pci_tsm_req_scope scope, + sockptr_t req_in, size_t in_len, + sockptr_t req_out, size_t out_len, + u64 *tsm_code); + ); + + /* + * struct pci_tsm_devsec_ops - Manage the security state of the function + * @lock: establish context with the TSM (allocate / wrap 'struct + * pci_tsm') for follow-on security state transitions from the + * LOCKED state + * @unlock: destroy TSM context and return device to UNLOCKED state + * + * Context: @lock and @unlock run under pci_tsm_rwsem held for write to + * sync with TSM unregistration and each other + */ + struct_group_tagged(pci_tsm_devsec_ops, devsec_ops, + struct pci_tsm *(*lock)(struct tsm_dev *tsm_dev, + struct pci_dev *pdev); + void (*unlock)(struct pci_tsm *tsm); + ); +}; + +/** + * struct pci_tdi - Core TEE I/O Device Interface (TDI) context + * @pdev: host side representation of guest-side TDI + * @kvm: TEE VM context of bound TDI + * @tdi_id: Identifier (virtual BDF) for the TDI as referenced by the TSM and DSM + */ +struct pci_tdi { + struct pci_dev *pdev; + struct kvm *kvm; + u32 tdi_id; +}; + +/** + * struct pci_tsm - Core TSM context for a given PCIe endpoint + * @pdev: Back ref to device function, distinguishes type of pci_tsm context + * @dsm_dev: PCI Device Security Manager for link operations on @pdev + * @tsm_dev: PCI TEE Security Manager device for Link Confidentiality or Device + * Function Security operations + * @tdi: TDI context established by the @bind link operation + * + * This structure is wrapped by low level TSM driver data and returned by + * probe()/lock(), it is freed by the corresponding remove()/unlock(). + * + * For link operations it serves to cache the association between a Device + * Security Manager (DSM) and the functions that manager can assign to a TVM. + * That can be "self", for assigning function0 of a TEE I/O device, a + * sub-function (SR-IOV virtual function, or non-function0 + * multifunction-device), or a downstream endpoint (PCIe upstream switch-port as + * DSM). + */ +struct pci_tsm { + struct pci_dev *pdev; + struct pci_dev *dsm_dev; + struct tsm_dev *tsm_dev; + struct pci_tdi *tdi; +}; + +/** + * struct pci_tsm_pf0 - Physical Function 0 TDISP link context + * @base_tsm: generic core "tsm" context + * @lock: mutual exclustion for pci_tsm_ops invocation + * @doe_mb: PCIe Data Object Exchange mailbox + */ +struct pci_tsm_pf0 { + struct pci_tsm base_tsm; + struct mutex lock; + struct pci_doe_mb *doe_mb; +}; + +/* physical function0 and capable of 'connect' */ +static inline bool is_pci_tsm_pf0(struct pci_dev *pdev) +{ + if (!pdev) + return false; + + if (!pci_is_pcie(pdev)) + return false; + + if (pdev->is_virtfn) + return false; + + /* + * Allow for a Device Security Manager (DSM) associated with function0 + * of an Endpoint to coordinate TDISP requests for other functions + * (physical or virtual) of the device, or allow for an Upstream Port + * DSM to accept TDISP requests for the Endpoints downstream of the + * switch. + */ + switch (pci_pcie_type(pdev)) { + case PCI_EXP_TYPE_ENDPOINT: + case PCI_EXP_TYPE_UPSTREAM: + case PCI_EXP_TYPE_RC_END: + if (pdev->ide_cap || (pdev->devcap & PCI_EXP_DEVCAP_TEE)) + break; + fallthrough; + default: + return false; + } + + return PCI_FUNC(pdev->devfn) == 0; +} + +/** + * enum pci_tsm_req_scope - Scope of guest requests to be validated by TSM + * + * Guest requests are a transport for a TVM to communicate with a TSM + DSM for + * a given TDI. A TSM driver is responsible for maintaining the kernel security + * model and limit commands that may affect the host, or are otherwise outside + * the typical TDISP operational model. + */ +enum pci_tsm_req_scope { + /** + * @PCI_TSM_REQ_INFO: Read-only, without side effects, request for + * typical TDISP collateral information like Device Interface Reports. + * No device secrets are permitted, and no device state is changed. + */ + PCI_TSM_REQ_INFO = 0, + /** + * @PCI_TSM_REQ_STATE_CHANGE: Request to change the TDISP state from + * UNLOCKED->LOCKED, LOCKED->RUN, or other architecture specific state + * changes to support those transitions for a TDI. No other (unrelated + * to TDISP) device / host state, configuration, or data change is + * permitted. + */ + PCI_TSM_REQ_STATE_CHANGE = 1, + /** + * @PCI_TSM_REQ_DEBUG_READ: Read-only request for debug information + * + * A method to facilitate TVM information retrieval outside of typical + * TDISP operational requirements. No device secrets are permitted. + */ + PCI_TSM_REQ_DEBUG_READ = 2, + /** + * @PCI_TSM_REQ_DEBUG_WRITE: Device state changes for debug purposes + * + * The request may affect the operational state of the device outside of + * the TDISP operational model. If allowed, requires CAP_SYS_RAW_IO, and + * will taint the kernel. + */ + PCI_TSM_REQ_DEBUG_WRITE = 3, +}; + +#ifdef CONFIG_PCI_TSM +int pci_tsm_register(struct tsm_dev *tsm_dev); +void pci_tsm_unregister(struct tsm_dev *tsm_dev); +int pci_tsm_link_constructor(struct pci_dev *pdev, struct pci_tsm *tsm, + struct tsm_dev *tsm_dev); +int pci_tsm_pf0_constructor(struct pci_dev *pdev, struct pci_tsm_pf0 *tsm, + struct tsm_dev *tsm_dev); +void pci_tsm_pf0_destructor(struct pci_tsm_pf0 *tsm); +int pci_tsm_doe_transfer(struct pci_dev *pdev, u8 type, const void *req, + size_t req_sz, void *resp, size_t resp_sz); +int pci_tsm_bind(struct pci_dev *pdev, struct kvm *kvm, u32 tdi_id); +void pci_tsm_unbind(struct pci_dev *pdev); +void pci_tsm_tdi_constructor(struct pci_dev *pdev, struct pci_tdi *tdi, + struct kvm *kvm, u32 tdi_id); +ssize_t pci_tsm_guest_req(struct pci_dev *pdev, enum pci_tsm_req_scope scope, + sockptr_t req_in, size_t in_len, sockptr_t req_out, + size_t out_len, u64 *tsm_code); +#else +static inline int pci_tsm_register(struct tsm_dev *tsm_dev) +{ + return 0; +} +static inline void pci_tsm_unregister(struct tsm_dev *tsm_dev) +{ +} +static inline int pci_tsm_bind(struct pci_dev *pdev, struct kvm *kvm, u64 tdi_id) +{ + return -ENXIO; +} +static inline void pci_tsm_unbind(struct pci_dev *pdev) +{ +} +static inline ssize_t pci_tsm_guest_req(struct pci_dev *pdev, + enum pci_tsm_req_scope scope, + sockptr_t req_in, size_t in_len, + sockptr_t req_out, size_t out_len, + u64 *tsm_code) +{ + return -ENXIO; +} +#endif +#endif /*__PCI_TSM_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index b16127c6a7b4..864775651c6f 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -452,6 +452,7 @@ struct pci_dev { unsigned int pasid_enabled:1; /* Process Address Space ID */ unsigned int pri_enabled:1; /* Page Request Interface */ unsigned int tph_enabled:1; /* TLP Processing Hints */ + unsigned int fm_enabled:1; /* Flit Mode (segment captured) */ unsigned int is_managed:1; /* Managed via devres */ unsigned int is_msi_managed:1; /* MSI release via devres installed */ unsigned int needs_freset:1; /* Requires fundamental reset */ @@ -544,6 +545,18 @@ struct pci_dev { #ifdef CONFIG_PCI_NPEM struct npem *npem; /* Native PCIe Enclosure Management */ #endif +#ifdef CONFIG_PCI_IDE + u16 ide_cap; /* Link Integrity & Data Encryption */ + u8 nr_ide_mem; /* Address association resources for streams */ + u8 nr_link_ide; /* Link Stream count (Selective Stream offset) */ + u16 nr_sel_ide; /* Selective Stream count (register block allocator) */ + struct ida ide_stream_ida; + unsigned int ide_cfg:1; /* Config cycles over IDE */ + unsigned int ide_tee_limit:1; /* Disallow T=0 traffic over IDE */ +#endif +#ifdef CONFIG_PCI_TSM + struct pci_tsm *tsm; /* TSM operation state */ +#endif u16 acs_cap; /* ACS Capability offset */ u8 supported_speeds; /* Supported Link Speeds Vector */ phys_addr_t rom; /* Physical address if not from BAR */ @@ -579,6 +592,8 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus); #define to_pci_dev(n) container_of(n, struct pci_dev, dev) #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) +#define for_each_pci_dev_reverse(d) \ + while ((d = pci_get_device_reverse(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) static inline int pci_channel_offline(struct pci_dev *pdev) { @@ -605,6 +620,11 @@ struct pci_host_bridge { int domain_nr; struct list_head windows; /* resource_entry */ struct list_head dma_ranges; /* dma ranges resource list */ +#ifdef CONFIG_PCI_IDE + u16 nr_ide_streams; /* Max streams possibly active in @ide_stream_ida */ + struct ida ide_stream_ida; + struct ida ide_stream_ids_ida; /* track unique ids per domain */ +#endif u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */ int (*map_irq)(const struct pci_dev *, u8, u8); void (*release_fn)(struct pci_host_bridge *); @@ -857,6 +877,11 @@ struct pci_bus_region { pci_bus_addr_t end; }; +static inline pci_bus_addr_t pci_bus_region_size(const struct pci_bus_region *region) +{ + return region->end - region->start + 1; +} + struct pci_dynids { spinlock_t lock; /* Protects list, index */ struct list_head list; /* For IDs added at runtime */ @@ -1241,6 +1266,8 @@ u64 pci_get_dsn(struct pci_dev *dev); struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from); +struct pci_dev *pci_get_device_reverse(unsigned int vendor, unsigned int device, + struct pci_dev *from); struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, unsigned int ss_vendor, unsigned int ss_device, struct pci_dev *from); @@ -1660,6 +1687,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata); +void pci_walk_bus_reverse(struct pci_bus *top, + int (*cb)(struct pci_dev *, void *), void *userdata); int pci_cfg_space_size(struct pci_dev *dev); unsigned char pci_bus_max_busnr(struct pci_bus *bus); resource_size_t pcibios_window_alignment(struct pci_bus *bus, @@ -2055,6 +2084,11 @@ static inline struct pci_dev *pci_get_device(unsigned int vendor, struct pci_dev *from) { return NULL; } +static inline struct pci_dev *pci_get_device_reverse(unsigned int vendor, + unsigned int device, + struct pci_dev *from) +{ return NULL; } + static inline struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, unsigned int ss_vendor, diff --git a/include/linux/platform_data/usb-davinci.h b/include/linux/platform_data/usb-davinci.h deleted file mode 100644 index 879f5c78b91a..000000000000 --- a/include/linux/platform_data/usb-davinci.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * USB related definitions - * - * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com> - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#ifndef __ASM_ARCH_USB_H -#define __ASM_ARCH_USB_H - -/* Passed as the platform data to the OHCI driver */ -struct da8xx_ohci_root_hub { - /* Time from power on to power good (in 2 ms units) */ - u8 potpgt; -}; - -void davinci_setup_usb(unsigned mA, unsigned potpgt_ms); - -#endif /* ifndef __ASM_ARCH_USB_H */ diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 93c945331f39..813da101b5bf 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -80,7 +80,7 @@ static inline void __iomem * devm_platform_get_and_ioremap_resource(struct platform_device *pdev, unsigned int index, struct resource **res) { - return ERR_PTR(-EINVAL); + return IOMEM_ERR_PTR(-EINVAL); } @@ -88,14 +88,14 @@ static inline void __iomem * devm_platform_ioremap_resource(struct platform_device *pdev, unsigned int index) { - return ERR_PTR(-EINVAL); + return IOMEM_ERR_PTR(-EINVAL); } static inline void __iomem * devm_platform_ioremap_resource_byname(struct platform_device *pdev, const char *name) { - return ERR_PTR(-EINVAL); + return IOMEM_ERR_PTR(-EINVAL); } #endif diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index e0dbcb4b4fd9..69ffa4b4d1fa 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -14,6 +14,39 @@ #include <uapi/linux/psp-sev.h> +/* As defined by SEV API, under "Guest Policy". */ +#define SEV_POLICY_MASK_NODBG BIT(0) +#define SEV_POLICY_MASK_NOKS BIT(1) +#define SEV_POLICY_MASK_ES BIT(2) +#define SEV_POLICY_MASK_NOSEND BIT(3) +#define SEV_POLICY_MASK_DOMAIN BIT(4) +#define SEV_POLICY_MASK_SEV BIT(5) +#define SEV_POLICY_MASK_API_MAJOR GENMASK(23, 16) +#define SEV_POLICY_MASK_API_MINOR GENMASK(31, 24) + +/* As defined by SEV-SNP Firmware ABI, under "Guest Policy". */ +#define SNP_POLICY_MASK_API_MINOR GENMASK_ULL(7, 0) +#define SNP_POLICY_MASK_API_MAJOR GENMASK_ULL(15, 8) +#define SNP_POLICY_MASK_SMT BIT_ULL(16) +#define SNP_POLICY_MASK_RSVD_MBO BIT_ULL(17) +#define SNP_POLICY_MASK_MIGRATE_MA BIT_ULL(18) +#define SNP_POLICY_MASK_DEBUG BIT_ULL(19) +#define SNP_POLICY_MASK_SINGLE_SOCKET BIT_ULL(20) +#define SNP_POLICY_MASK_CXL_ALLOW BIT_ULL(21) +#define SNP_POLICY_MASK_MEM_AES_256_XTS BIT_ULL(22) +#define SNP_POLICY_MASK_RAPL_DIS BIT_ULL(23) +#define SNP_POLICY_MASK_CIPHERTEXT_HIDING_DRAM BIT_ULL(24) +#define SNP_POLICY_MASK_PAGE_SWAP_DISABLE BIT_ULL(25) + +/* Base SEV-SNP policy bitmask for minimum supported SEV firmware version */ +#define SNP_POLICY_MASK_BASE (SNP_POLICY_MASK_API_MINOR | \ + SNP_POLICY_MASK_API_MAJOR | \ + SNP_POLICY_MASK_SMT | \ + SNP_POLICY_MASK_RSVD_MBO | \ + SNP_POLICY_MASK_MIGRATE_MA | \ + SNP_POLICY_MASK_DEBUG | \ + SNP_POLICY_MASK_SINGLE_SOCKET) + #define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */ /** @@ -109,6 +142,13 @@ enum sev_cmd { SEV_CMD_SNP_VLEK_LOAD = 0x0CD, SEV_CMD_SNP_FEATURE_INFO = 0x0CE, + /* SEV-TIO commands */ + SEV_CMD_TIO_STATUS = 0x0D0, + SEV_CMD_TIO_INIT = 0x0D1, + SEV_CMD_TIO_DEV_CREATE = 0x0D2, + SEV_CMD_TIO_DEV_RECLAIM = 0x0D3, + SEV_CMD_TIO_DEV_CONNECT = 0x0D4, + SEV_CMD_TIO_DEV_DISCONNECT = 0x0D5, SEV_CMD_MAX, }; @@ -750,7 +790,8 @@ struct sev_data_snp_init_ex { u32 list_paddr_en:1; u32 rapl_dis:1; u32 ciphertext_hiding_en:1; - u32 rsvd:28; + u32 tio_en:1; + u32 rsvd:27; u32 rsvd1; u64 list_paddr; u16 max_snp_asid; @@ -849,7 +890,14 @@ struct snp_feature_info { u32 edx; } __packed; +/* Feature bits in ECX */ +#define SNP_RAPL_DISABLE_SUPPORTED BIT(2) #define SNP_CIPHER_TEXT_HIDING_SUPPORTED BIT(3) +#define SNP_AES_256_XTS_POLICY_SUPPORTED BIT(4) +#define SNP_CXL_ALLOW_POLICY_SUPPORTED BIT(5) + +/* Feature bits in EBX */ +#define SNP_SEV_TIO_SUPPORTED BIT(1) #ifdef CONFIG_CRYPTO_DEV_SP_PSP @@ -992,9 +1040,11 @@ int sev_do_cmd(int cmd, void *data, int *psp_ret); void *psp_copy_user_blob(u64 uaddr, u32 len); void *snp_alloc_firmware_page(gfp_t mask); +int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked); void snp_free_firmware_page(void *addr); void sev_platform_shutdown(void); bool sev_is_snp_ciphertext_hiding_supported(void); +u64 sev_get_snp_policy_bits(void); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ @@ -1027,6 +1077,11 @@ static inline void *snp_alloc_firmware_page(gfp_t mask) return NULL; } +static inline int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked) +{ + return -ENODEV; +} + static inline void snp_free_firmware_page(void *addr) { } static inline void sev_platform_shutdown(void) { } diff --git a/include/linux/raspberrypi/vchiq.h b/include/linux/raspberrypi/vchiq.h new file mode 100644 index 000000000000..ee4469f4fc51 --- /dev/null +++ b/include/linux/raspberrypi/vchiq.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */ + +#ifndef VCHIQ_H +#define VCHIQ_H + +#define VCHIQ_MAKE_FOURCC(x0, x1, x2, x3) \ + (((x0) << 24) | ((x1) << 16) | ((x2) << 8) | (x3)) + +enum vchiq_reason { + VCHIQ_SERVICE_OPENED, /* service, -, - */ + VCHIQ_SERVICE_CLOSED, /* service, -, - */ + VCHIQ_MESSAGE_AVAILABLE, /* service, header, - */ + VCHIQ_BULK_TRANSMIT_DONE, /* service, -, bulk_userdata */ + VCHIQ_BULK_RECEIVE_DONE, /* service, -, bulk_userdata */ + VCHIQ_BULK_TRANSMIT_ABORTED, /* service, -, bulk_userdata */ + VCHIQ_BULK_RECEIVE_ABORTED /* service, -, bulk_userdata */ +}; + +enum vchiq_bulk_mode { + VCHIQ_BULK_MODE_CALLBACK, + VCHIQ_BULK_MODE_BLOCKING, + VCHIQ_BULK_MODE_NOCALLBACK, + VCHIQ_BULK_MODE_WAITING /* Reserved for internal use */ +}; + +enum vchiq_service_option { + VCHIQ_SERVICE_OPTION_AUTOCLOSE, + VCHIQ_SERVICE_OPTION_SLOT_QUOTA, + VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA, + VCHIQ_SERVICE_OPTION_SYNCHRONOUS, + VCHIQ_SERVICE_OPTION_TRACE +}; + +struct vchiq_header { + /* The message identifier - opaque to applications. */ + int msgid; + + /* Size of message data. */ + unsigned int size; + + char data[]; /* message */ +}; + +struct vchiq_element { + const void __user *data; + unsigned int size; +}; + +struct vchiq_instance; +struct vchiq_state; + +struct vchiq_service_base { + int fourcc; + int (*callback)(struct vchiq_instance *instance, + enum vchiq_reason reason, + struct vchiq_header *header, + unsigned int handle, + void *cb_data, void __user *cb_userdata); + void *userdata; +}; + +struct vchiq_completion_data_kernel { + enum vchiq_reason reason; + struct vchiq_header *header; + void *service_userdata; + void *cb_data; + void __user *cb_userdata; +}; + +struct vchiq_service_params_kernel { + int fourcc; + int (*callback)(struct vchiq_instance *instance, + enum vchiq_reason reason, + struct vchiq_header *header, + unsigned int handle, + void *cb_data, void __user *cb_userdata); + void *userdata; + short version; /* Increment for non-trivial changes */ + short version_min; /* Update for incompatible changes */ +}; + +extern int vchiq_initialise(struct vchiq_state *state, + struct vchiq_instance **pinstance); +extern int vchiq_shutdown(struct vchiq_instance *instance); +extern int vchiq_connect(struct vchiq_instance *instance); +extern int vchiq_open_service(struct vchiq_instance *instance, + const struct vchiq_service_params_kernel *params, + unsigned int *pservice); +extern int vchiq_close_service(struct vchiq_instance *instance, + unsigned int service); +extern int vchiq_use_service(struct vchiq_instance *instance, unsigned int service); +extern int vchiq_release_service(struct vchiq_instance *instance, + unsigned int service); +extern void vchiq_msg_queue_push(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_header *header); +extern void vchiq_release_message(struct vchiq_instance *instance, unsigned int service, + struct vchiq_header *header); +extern int vchiq_queue_kernel_message(struct vchiq_instance *instance, unsigned int handle, + void *data, unsigned int size); +extern int vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int service, + const void *data, unsigned int size, void *userdata, + enum vchiq_bulk_mode mode); +extern int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int service, + void *data, unsigned int size, void *userdata, + enum vchiq_bulk_mode mode); +extern void *vchiq_get_service_userdata(struct vchiq_instance *instance, unsigned int service); +extern int vchiq_get_peer_version(struct vchiq_instance *instance, unsigned int handle, + short *peer_version); +extern struct vchiq_header *vchiq_msg_hold(struct vchiq_instance *instance, unsigned int handle); + +#endif /* VCHIQ_H */ diff --git a/include/linux/raspberrypi/vchiq_arm.h b/include/linux/raspberrypi/vchiq_arm.h new file mode 100644 index 000000000000..e32b02f99024 --- /dev/null +++ b/include/linux/raspberrypi/vchiq_arm.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. + * Copyright (c) 2010-2012 Broadcom. All rights reserved. + */ + +#ifndef VCHIQ_ARM_H +#define VCHIQ_ARM_H + +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/semaphore.h> +#include <linux/atomic.h> +#include "vchiq_core.h" +#include "vchiq_debugfs.h" + +/* Some per-instance constants */ +#define MAX_COMPLETIONS 128 +#define MAX_SERVICES 64 +#define MAX_ELEMENTS 8 +#define MSG_QUEUE_SIZE 128 + +#define VCHIQ_DRV_MAX_CALLBACKS 10 + +struct rpi_firmware; +struct vchiq_device; + +enum USE_TYPE_E { + USE_TYPE_SERVICE, + USE_TYPE_VCHIQ +}; + +struct vchiq_platform_info { + unsigned int cache_line_size; +}; + +struct vchiq_drv_mgmt { + struct rpi_firmware *fw; + const struct vchiq_platform_info *info; + + bool connected; + int num_deferred_callbacks; + /* Protects connected and num_deferred_callbacks */ + struct mutex connected_mutex; + + void (*deferred_callback[VCHIQ_DRV_MAX_CALLBACKS])(void); + + struct semaphore free_fragments_sema; + struct semaphore free_fragments_mutex; + char *fragments_base; + char *free_fragments; + unsigned int fragments_size; + + void __iomem *regs; + + struct vchiq_state state; +}; + +struct user_service { + struct vchiq_service *service; + void __user *userdata; + struct vchiq_instance *instance; + char is_vchi; + char dequeue_pending; + char close_pending; + int message_available_pos; + int msg_insert; + int msg_remove; + struct completion insert_event; + struct completion remove_event; + struct completion close_event; + struct vchiq_header *msg_queue[MSG_QUEUE_SIZE]; +}; + +struct bulk_waiter_node { + struct bulk_waiter bulk_waiter; + int pid; + struct list_head list; +}; + +struct vchiq_instance { + struct vchiq_state *state; + struct vchiq_completion_data_kernel completions[MAX_COMPLETIONS]; + int completion_insert; + int completion_remove; + struct completion insert_event; + struct completion remove_event; + struct mutex completion_mutex; + + int connected; + int closing; + int pid; + int mark; + int use_close_delivered; + int trace; + + struct list_head bulk_waiter_list; + struct mutex bulk_waiter_list_mutex; + + struct vchiq_debugfs_node debugfs_node; +}; + +int +vchiq_use_service(struct vchiq_instance *instance, unsigned int handle); + +extern int +vchiq_release_service(struct vchiq_instance *instance, unsigned int handle); + +extern int +vchiq_check_service(struct vchiq_service *service); + +extern void +vchiq_dump_service_use_state(struct vchiq_state *state); + +extern int +vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service, + enum USE_TYPE_E use_type); +extern int +vchiq_release_internal(struct vchiq_state *state, + struct vchiq_service *service); + +extern struct vchiq_debugfs_node * +vchiq_instance_get_debugfs_node(struct vchiq_instance *instance); + +extern int +vchiq_instance_get_use_count(struct vchiq_instance *instance); + +extern int +vchiq_instance_get_pid(struct vchiq_instance *instance); + +extern int +vchiq_instance_get_trace(struct vchiq_instance *instance); + +extern void +vchiq_instance_set_trace(struct vchiq_instance *instance, int trace); + +extern void +vchiq_add_connected_callback(struct vchiq_device *device, + void (*callback)(void)); + +#if IS_ENABLED(CONFIG_VCHIQ_CDEV) + +extern void +vchiq_deregister_chrdev(void); + +extern int +vchiq_register_chrdev(struct device *parent); + +#else + +static inline void vchiq_deregister_chrdev(void) { } +static inline int vchiq_register_chrdev(struct device *parent) { return 0; } + +#endif /* IS_ENABLED(CONFIG_VCHIQ_CDEV) */ + +extern int +service_callback(struct vchiq_instance *vchiq_instance, enum vchiq_reason reason, + struct vchiq_header *header, unsigned int handle, + void *cb_data, void __user *cb_userdata); + +extern void +free_bulk_waiter(struct vchiq_instance *instance); + +#endif /* VCHIQ_ARM_H */ diff --git a/include/linux/raspberrypi/vchiq_bus.h b/include/linux/raspberrypi/vchiq_bus.h new file mode 100644 index 000000000000..9de179b39f85 --- /dev/null +++ b/include/linux/raspberrypi/vchiq_bus.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2023 Ideas On Board Oy + */ + +#ifndef _VCHIQ_DEVICE_H +#define _VCHIQ_DEVICE_H + +#include <linux/device.h> +#include <linux/mod_devicetable.h> + +struct vchiq_drv_mgmt; + +struct vchiq_device { + struct device dev; + struct vchiq_drv_mgmt *drv_mgmt; +}; + +struct vchiq_driver { + int (*probe)(struct vchiq_device *device); + void (*remove)(struct vchiq_device *device); + int (*resume)(struct vchiq_device *device); + int (*suspend)(struct vchiq_device *device, + pm_message_t state); + + const struct vchiq_device_id *id_table; + struct device_driver driver; +}; + +static inline struct vchiq_device *to_vchiq_device(struct device *d) +{ + return container_of(d, struct vchiq_device, dev); +} + +static inline struct vchiq_driver *to_vchiq_driver(struct device_driver *d) +{ + return container_of(d, struct vchiq_driver, driver); +} + +extern const struct bus_type vchiq_bus_type; + +struct vchiq_device * +vchiq_device_register(struct device *parent, const char *name); +void vchiq_device_unregister(struct vchiq_device *dev); + +int vchiq_driver_register(struct vchiq_driver *vchiq_drv); +void vchiq_driver_unregister(struct vchiq_driver *vchiq_drv); + +/** + * module_vchiq_driver() - Helper macro for registering a vchiq driver + * @__vchiq_driver: vchiq driver struct + * + * Helper macro for vchiq drivers which do not do anything special in + * module init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_vchiq_driver(__vchiq_driver) \ + module_driver(__vchiq_driver, vchiq_driver_register, vchiq_driver_unregister) + +#endif /* _VCHIQ_DEVICE_H */ diff --git a/include/linux/raspberrypi/vchiq_cfg.h b/include/linux/raspberrypi/vchiq_cfg.h new file mode 100644 index 000000000000..a16d0299996c --- /dev/null +++ b/include/linux/raspberrypi/vchiq_cfg.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright (c) 2010-2014 Broadcom. All rights reserved. */ + +#ifndef VCHIQ_CFG_H +#define VCHIQ_CFG_H + +#define VCHIQ_MAGIC VCHIQ_MAKE_FOURCC('V', 'C', 'H', 'I') +/* The version of VCHIQ - change with any non-trivial change */ +#define VCHIQ_VERSION 8 +/* + * The minimum compatible version - update to match VCHIQ_VERSION with any + * incompatible change + */ +#define VCHIQ_VERSION_MIN 3 + +/* The version that introduced the VCHIQ_IOC_LIB_VERSION ioctl */ +#define VCHIQ_VERSION_LIB_VERSION 7 + +/* The version that introduced the VCHIQ_IOC_CLOSE_DELIVERED ioctl */ +#define VCHIQ_VERSION_CLOSE_DELIVERED 7 + +/* The version that made it safe to use SYNCHRONOUS mode */ +#define VCHIQ_VERSION_SYNCHRONOUS_MODE 8 + +#define VCHIQ_MAX_STATES 1 +#define VCHIQ_MAX_SERVICES 4096 +#define VCHIQ_MAX_SLOTS 128 +#define VCHIQ_MAX_SLOTS_PER_SIDE 64 + +#define VCHIQ_NUM_CURRENT_BULKS 32 +#define VCHIQ_NUM_SERVICE_BULKS 4 + +#ifndef VCHIQ_ENABLE_DEBUG +#define VCHIQ_ENABLE_DEBUG 1 +#endif + +#ifndef VCHIQ_ENABLE_STATS +#define VCHIQ_ENABLE_STATS 1 +#endif + +#endif /* VCHIQ_CFG_H */ diff --git a/include/linux/raspberrypi/vchiq_core.h b/include/linux/raspberrypi/vchiq_core.h new file mode 100644 index 000000000000..e7bf7a114985 --- /dev/null +++ b/include/linux/raspberrypi/vchiq_core.h @@ -0,0 +1,646 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */ + +#ifndef VCHIQ_CORE_H +#define VCHIQ_CORE_H + +#include <linux/mutex.h> +#include <linux/completion.h> +#include <linux/dma-mapping.h> +#include <linux/dev_printk.h> +#include <linux/kthread.h> +#include <linux/kref.h> +#include <linux/rcupdate.h> +#include <linux/seq_file.h> +#include <linux/spinlock_types.h> +#include <linux/wait.h> + +#include "vchiq.h" +#include "vchiq_cfg.h" + +/* Do this so that we can test-build the code on non-rpi systems */ +#if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE) + +#else + +#ifndef dsb +#define dsb(a) +#endif + +#endif /* IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE) */ + +#define VCHIQ_SERVICE_HANDLE_INVALID 0 + +#define VCHIQ_SLOT_SIZE 4096 +#define VCHIQ_MAX_MSG_SIZE (VCHIQ_SLOT_SIZE - sizeof(struct vchiq_header)) + +#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1) +#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1) +#define VCHIQ_SLOT_ZERO_SLOTS DIV_ROUND_UP(sizeof(struct vchiq_slot_zero), \ + VCHIQ_SLOT_SIZE) + +#define BITSET_SIZE(b) ((b + 31) >> 5) +#define BITSET_WORD(b) (b >> 5) +#define BITSET_BIT(b) (1 << (b & 31)) +#define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b)) +#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b)) + +enum { + DEBUG_ENTRIES, +#if VCHIQ_ENABLE_DEBUG + DEBUG_SLOT_HANDLER_COUNT, + DEBUG_SLOT_HANDLER_LINE, + DEBUG_PARSE_LINE, + DEBUG_PARSE_HEADER, + DEBUG_PARSE_MSGID, + DEBUG_AWAIT_COMPLETION_LINE, + DEBUG_DEQUEUE_MESSAGE_LINE, + DEBUG_SERVICE_CALLBACK_LINE, + DEBUG_MSG_QUEUE_FULL_COUNT, + DEBUG_COMPLETION_QUEUE_FULL_COUNT, +#endif + DEBUG_MAX +}; + +#if VCHIQ_ENABLE_DEBUG + +#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug +#define DEBUG_TRACE(d) \ + do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(sy); } while (0) +#define DEBUG_VALUE(d, v) \ + do { debug_ptr[DEBUG_ ## d] = (v); dsb(sy); } while (0) +#define DEBUG_COUNT(d) \ + do { debug_ptr[DEBUG_ ## d]++; dsb(sy); } while (0) + +#else /* VCHIQ_ENABLE_DEBUG */ + +#define DEBUG_INITIALISE(local) +#define DEBUG_TRACE(d) +#define DEBUG_VALUE(d, v) +#define DEBUG_COUNT(d) + +#endif /* VCHIQ_ENABLE_DEBUG */ + +enum vchiq_connstate { + VCHIQ_CONNSTATE_DISCONNECTED, + VCHIQ_CONNSTATE_CONNECTING, + VCHIQ_CONNSTATE_CONNECTED, + VCHIQ_CONNSTATE_PAUSING, + VCHIQ_CONNSTATE_PAUSE_SENT, + VCHIQ_CONNSTATE_PAUSED, + VCHIQ_CONNSTATE_RESUMING, + VCHIQ_CONNSTATE_PAUSE_TIMEOUT, + VCHIQ_CONNSTATE_RESUME_TIMEOUT +}; + +enum { + VCHIQ_SRVSTATE_FREE, + VCHIQ_SRVSTATE_HIDDEN, + VCHIQ_SRVSTATE_LISTENING, + VCHIQ_SRVSTATE_OPENING, + VCHIQ_SRVSTATE_OPEN, + VCHIQ_SRVSTATE_OPENSYNC, + VCHIQ_SRVSTATE_CLOSESENT, + VCHIQ_SRVSTATE_CLOSERECVD, + VCHIQ_SRVSTATE_CLOSEWAIT, + VCHIQ_SRVSTATE_CLOSED +}; + +enum vchiq_bulk_dir { + VCHIQ_BULK_TRANSMIT, + VCHIQ_BULK_RECEIVE +}; + +struct vchiq_bulk { + short mode; + short dir; + void *cb_data; + void __user *cb_userdata; + struct bulk_waiter *waiter; + dma_addr_t dma_addr; + int size; + void *remote_data; + int remote_size; + int actual; + void *offset; + void __user *uoffset; +}; + +struct vchiq_bulk_queue { + int local_insert; /* Where to insert the next local bulk */ + int remote_insert; /* Where to insert the next remote bulk (master) */ + int process; /* Bulk to transfer next */ + int remote_notify; /* Bulk to notify the remote client of next (mstr) */ + int remove; /* Bulk to notify the local client of, and remove, next */ + struct vchiq_bulk bulks[VCHIQ_NUM_SERVICE_BULKS]; +}; + +/* + * Remote events provide a way of presenting several virtual doorbells to a + * peer (ARM host to VPU) using only one physical doorbell. They can be thought + * of as a way for the peer to signal a semaphore, in this case implemented as + * a workqueue. + * + * Remote events remain signalled until acknowledged by the receiver, and they + * are non-counting. They are designed in such a way as to minimise the number + * of interrupts and avoid unnecessary waiting. + * + * A remote_event is as small data structures that live in shared memory. It + * comprises two booleans - armed and fired: + * + * The sender sets fired when they signal the receiver. + * If fired is set, the receiver has been signalled and need not wait. + * The receiver sets the armed field before they begin to wait. + * If armed is set, the receiver is waiting and wishes to be woken by interrupt. + */ +struct remote_event { + int armed; + int fired; + u32 __unused; +}; + +struct opaque_platform_state; + +struct vchiq_slot { + char data[VCHIQ_SLOT_SIZE]; +}; + +struct vchiq_slot_info { + /* Use two counters rather than one to avoid the need for a mutex. */ + short use_count; + short release_count; +}; + +/* + * VCHIQ is a reliable connection-oriented datagram protocol. + * + * A VCHIQ service is equivalent to a TCP connection, except: + * + FOURCCs are used for the rendezvous, and port numbers are assigned at the + * time the connection is established. + * + There is less of a distinction between server and client sockets, the only + * difference being which end makes the first move. + * + For a multi-client server, the server creates new "listening" services as + * the existing one becomes connected - there is no need to specify the + * maximum number of clients up front. + * + Data transfer is reliable but packetized (messages have defined ends). + * + Messages can be either short (capable of fitting in a slot) and in-band, + * or copied between external buffers (bulk transfers). + */ +struct vchiq_service { + struct vchiq_service_base base; + unsigned int handle; + struct kref ref_count; + struct rcu_head rcu; + int srvstate; + void (*userdata_term)(void *userdata); + unsigned int localport; + unsigned int remoteport; + int public_fourcc; + int client_id; + char auto_close; + char sync; + char closing; + char trace; + atomic_t poll_flags; + short version; + short version_min; + short peer_version; + + struct vchiq_state *state; + struct vchiq_instance *instance; + + int service_use_count; + + struct vchiq_bulk_queue bulk_tx; + struct vchiq_bulk_queue bulk_rx; + + struct completion remove_event; + struct completion bulk_remove_event; + struct mutex bulk_mutex; + + struct service_stats_struct { + int quota_stalls; + int slot_stalls; + int bulk_stalls; + int error_count; + int ctrl_tx_count; + int ctrl_rx_count; + int bulk_tx_count; + int bulk_rx_count; + int bulk_aborted_count; + u64 ctrl_tx_bytes; + u64 ctrl_rx_bytes; + u64 bulk_tx_bytes; + u64 bulk_rx_bytes; + } stats; + + int msg_queue_read; + int msg_queue_write; + struct completion msg_queue_pop; + struct completion msg_queue_push; + struct vchiq_header *msg_queue[VCHIQ_MAX_SLOTS]; +}; + +/* + * The quota information is outside struct vchiq_service so that it can + * be statically allocated, since for accounting reasons a service's slot + * usage is carried over between users of the same port number. + */ +struct vchiq_service_quota { + unsigned short slot_quota; + unsigned short slot_use_count; + unsigned short message_quota; + unsigned short message_use_count; + struct completion quota_event; + int previous_tx_index; +}; + +struct vchiq_shared_state { + /* A non-zero value here indicates that the content is valid. */ + int initialised; + + /* The first and last (inclusive) slots allocated to the owner. */ + int slot_first; + int slot_last; + + /* The slot allocated to synchronous messages from the owner. */ + int slot_sync; + + /* + * Signalling this event indicates that owner's slot handler thread + * should run. + */ + struct remote_event trigger; + + /* + * Indicates the byte position within the stream where the next message + * will be written. The least significant bits are an index into the + * slot. The next bits are the index of the slot in slot_queue. + */ + int tx_pos; + + /* This event should be signalled when a slot is recycled. */ + struct remote_event recycle; + + /* The slot_queue index where the next recycled slot will be written. */ + int slot_queue_recycle; + + /* This event should be signalled when a synchronous message is sent. */ + struct remote_event sync_trigger; + + /* + * This event should be signalled when a synchronous message has been + * released. + */ + struct remote_event sync_release; + + /* A circular buffer of slot indexes. */ + int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE]; + + /* Debugging state */ + int debug[DEBUG_MAX]; +}; + +/* + * vchiq_slot_zero describes the memory shared between the ARM host and the + * VideoCore VPU. The "master" and "slave" states are owned by the respective + * sides but visible to the other; the slots are shared, and the remaining + * fields are read-only. + * + * In the configuration used by this implementation, the memory is allocated + * by the host, the VPU is the master (the side which controls the DMA for bulk + * transfers), and the host is the slave. + * + * The ownership of slots changes with use: + * + When empty they are owned by the sender. + * + When partially filled they are shared with the receiver. + * + When completely full they are owned by the receiver. + * + When the receiver has finished processing the contents, they are recycled + * back to the sender. + */ +struct vchiq_slot_zero { + int magic; + short version; + short version_min; + int slot_zero_size; + int slot_size; + int max_slots; + int max_slots_per_side; + int platform_data[2]; + struct vchiq_shared_state master; + struct vchiq_shared_state slave; + struct vchiq_slot_info slots[VCHIQ_MAX_SLOTS]; +}; + +/* + * This is the private runtime state used by each side. The same structure was + * originally used by both sides, but implementations have since diverged. + */ +struct vchiq_state { + struct device *dev; + int id; + int initialised; + enum vchiq_connstate conn_state; + short version_common; + + struct vchiq_shared_state *local; + struct vchiq_shared_state *remote; + struct vchiq_slot *slot_data; + + unsigned short default_slot_quota; + unsigned short default_message_quota; + + /* Event indicating connect message received */ + struct completion connect; + + /* Mutex protecting services */ + struct mutex mutex; + struct vchiq_instance **instance; + + /* Processes all incoming messages which aren't synchronous */ + struct task_struct *slot_handler_thread; + + /* + * Slots which have been fully processed and released by the (peer) + * receiver are added to the receiver queue, which is asynchronously + * processed by the recycle thread. + */ + struct task_struct *recycle_thread; + + /* + * Processes incoming synchronous messages + * + * The synchronous message channel is shared between all synchronous + * services, and provides a way for urgent messages to bypass + * potentially long queues of asynchronous messages in the normal slots. + * + * There can be only one outstanding synchronous message in + * each direction, and as a precious shared resource synchronous + * services should be used sparingly. + */ + struct task_struct *sync_thread; + + /* Local implementation of the trigger remote event */ + wait_queue_head_t trigger_event; + + /* Local implementation of the recycle remote event */ + wait_queue_head_t recycle_event; + + /* Local implementation of the sync trigger remote event */ + wait_queue_head_t sync_trigger_event; + + /* Local implementation of the sync release remote event */ + wait_queue_head_t sync_release_event; + + char *tx_data; + char *rx_data; + struct vchiq_slot_info *rx_info; + + struct mutex slot_mutex; + + struct mutex recycle_mutex; + + struct mutex sync_mutex; + + spinlock_t msg_queue_spinlock; + + spinlock_t bulk_waiter_spinlock; + + spinlock_t quota_spinlock; + + /* + * Indicates the byte position within the stream from where the next + * message will be read. The least significant bits are an index into + * the slot.The next bits are the index of the slot in + * remote->slot_queue. + */ + int rx_pos; + + /* + * A cached copy of local->tx_pos. Only write to local->tx_pos, and read + * from remote->tx_pos. + */ + int local_tx_pos; + + /* The slot_queue index of the slot to become available next. */ + int slot_queue_available; + + /* A flag to indicate if any poll has been requested */ + int poll_needed; + + /* Ths index of the previous slot used for data messages. */ + int previous_data_index; + + /* The number of slots occupied by data messages. */ + unsigned short data_use_count; + + /* The maximum number of slots to be occupied by data messages. */ + unsigned short data_quota; + + /* An array of bit sets indicating which services must be polled. */ + atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)]; + + /* The number of the first unused service */ + int unused_service; + + /* Signalled when a free slot becomes available. */ + struct completion slot_available_event; + + /* Signalled when a free data slot becomes available. */ + struct completion data_quota_event; + + struct state_stats_struct { + int slot_stalls; + int data_stalls; + int ctrl_tx_count; + int ctrl_rx_count; + int error_count; + } stats; + + struct vchiq_service __rcu *services[VCHIQ_MAX_SERVICES]; + struct vchiq_service_quota service_quotas[VCHIQ_MAX_SERVICES]; + struct vchiq_slot_info slot_info[VCHIQ_MAX_SLOTS]; + + struct opaque_platform_state *platform_state; +}; + +struct pagelist { + u32 length; + u16 type; + u16 offset; + u32 addrs[1]; /* N.B. 12 LSBs hold the number + * of following pages at consecutive + * addresses. + */ +}; + +struct vchiq_pagelist_info { + struct pagelist *pagelist; + size_t pagelist_buffer_size; + dma_addr_t dma_addr; + enum dma_data_direction dma_dir; + unsigned int num_pages; + unsigned int pages_need_release; + struct page **pages; + struct scatterlist *scatterlist; + unsigned int scatterlist_mapped; +}; + +static inline bool vchiq_remote_initialised(const struct vchiq_state *state) +{ + return state->remote && state->remote->initialised; +} + +struct bulk_waiter { + struct vchiq_bulk *bulk; + struct completion event; + int actual; +}; + +struct vchiq_config { + unsigned int max_msg_size; + unsigned int bulk_threshold; /* The message size above which it + * is better to use a bulk transfer + * (<= max_msg_size) + */ + unsigned int max_outstanding_bulks; + unsigned int max_services; + short version; /* The version of VCHIQ */ + short version_min; /* The minimum compatible version of VCHIQ */ +}; + +extern spinlock_t bulk_waiter_spinlock; + +extern const char * +get_conn_state_name(enum vchiq_connstate conn_state); + +extern struct vchiq_slot_zero * +vchiq_init_slots(struct device *dev, void *mem_base, int mem_size); + +extern int +vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev); + +extern int +vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance); + +struct vchiq_service * +vchiq_add_service_internal(struct vchiq_state *state, + const struct vchiq_service_params_kernel *params, + int srvstate, struct vchiq_instance *instance, + void (*userdata_term)(void *userdata)); + +extern int +vchiq_open_service_internal(struct vchiq_service *service, int client_id); + +extern int +vchiq_close_service_internal(struct vchiq_service *service, int close_recvd); + +extern void +vchiq_terminate_service_internal(struct vchiq_service *service); + +extern void +vchiq_free_service_internal(struct vchiq_service *service); + +extern void +vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance); + +extern void +remote_event_pollall(struct vchiq_state *state); + +extern int +vchiq_bulk_xfer_waiting(struct vchiq_instance *instance, unsigned int handle, + struct bulk_waiter *userdata); + +extern int +vchiq_bulk_xfer_blocking(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_bulk *bulk); + +extern int +vchiq_bulk_xfer_callback(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_bulk *bulk); + +extern void +vchiq_dump_state(struct seq_file *f, struct vchiq_state *state); + +extern void +request_poll(struct vchiq_state *state, struct vchiq_service *service, + int poll_type); + +struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle); + +extern struct vchiq_service * +find_service_by_handle(struct vchiq_instance *instance, unsigned int handle); + +extern struct vchiq_service * +find_service_by_port(struct vchiq_state *state, unsigned int localport); + +extern struct vchiq_service * +find_service_for_instance(struct vchiq_instance *instance, unsigned int handle); + +extern struct vchiq_service * +find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle); + +extern struct vchiq_service * +__next_service_by_instance(struct vchiq_state *state, + struct vchiq_instance *instance, + int *pidx); + +extern struct vchiq_service * +next_service_by_instance(struct vchiq_state *state, + struct vchiq_instance *instance, + int *pidx); + +extern void +vchiq_service_get(struct vchiq_service *service); + +extern void +vchiq_service_put(struct vchiq_service *service); + +extern int +vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle, + ssize_t (*copy_callback)(void *context, void *dest, + size_t offset, size_t maxsize), + void *context, + size_t size); + +void vchiq_dump_platform_state(struct seq_file *f); + +void vchiq_dump_platform_instances(struct vchiq_state *state, struct seq_file *f); + +void vchiq_dump_platform_service_state(struct seq_file *f, struct vchiq_service *service); + +int vchiq_use_service_internal(struct vchiq_service *service); + +int vchiq_release_service_internal(struct vchiq_service *service); + +void vchiq_on_remote_use(struct vchiq_state *state); + +void vchiq_on_remote_release(struct vchiq_state *state); + +int vchiq_platform_init_state(struct vchiq_state *state); + +int vchiq_check_service(struct vchiq_service *service); + +int vchiq_send_remote_use(struct vchiq_state *state); + +int vchiq_send_remote_use_active(struct vchiq_state *state); + +void vchiq_platform_conn_state_changed(struct vchiq_state *state, + enum vchiq_connstate oldstate, + enum vchiq_connstate newstate); + +void vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate); + +void vchiq_log_dump_mem(struct device *dev, const char *label, u32 addr, + const void *void_mem, size_t num_bytes); + +int vchiq_remove_service(struct vchiq_instance *instance, unsigned int service); + +int vchiq_get_client_id(struct vchiq_instance *instance, unsigned int service); + +void vchiq_get_config(struct vchiq_config *config); + +int vchiq_set_service_option(struct vchiq_instance *instance, unsigned int service, + enum vchiq_service_option option, int value); + +#endif diff --git a/include/linux/raspberrypi/vchiq_debugfs.h b/include/linux/raspberrypi/vchiq_debugfs.h new file mode 100644 index 000000000000..b29e6693c949 --- /dev/null +++ b/include/linux/raspberrypi/vchiq_debugfs.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. */ + +#ifndef VCHIQ_DEBUGFS_H +#define VCHIQ_DEBUGFS_H + +struct vchiq_state; +struct vchiq_instance; + +struct vchiq_debugfs_node { + struct dentry *dentry; +}; + +void vchiq_debugfs_init(struct vchiq_state *state); + +void vchiq_debugfs_deinit(void); + +void vchiq_debugfs_add_instance(struct vchiq_instance *instance); + +void vchiq_debugfs_remove_instance(struct vchiq_instance *instance); + +#endif /* VCHIQ_DEBUGFS_H */ diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index 8d2ba3749866..4091e978aef2 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -43,8 +43,36 @@ extern void rb_erase(struct rb_node *, struct rb_root *); /* Find logical next and previous nodes in a tree */ extern struct rb_node *rb_next(const struct rb_node *); extern struct rb_node *rb_prev(const struct rb_node *); -extern struct rb_node *rb_first(const struct rb_root *); -extern struct rb_node *rb_last(const struct rb_root *); + +/* + * This function returns the first node (in sort order) of the tree. + */ +static inline struct rb_node *rb_first(const struct rb_root *root) +{ + struct rb_node *n; + + n = root->rb_node; + if (!n) + return NULL; + while (n->rb_left) + n = n->rb_left; + return n; +} + +/* + * This function returns the last node (in sort order) of the tree. + */ +static inline struct rb_node *rb_last(const struct rb_root *root) +{ + struct rb_node *n; + + n = root->rb_node; + if (!n) + return NULL; + while (n->rb_right) + n = n->rb_right; + return n; +} /* Postorder iteration - always visit the parent after its children */ extern struct rb_node *rb_first_postorder(const struct rb_root *); diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h index 357df16ede32..46514cb1b9e0 100644 --- a/include/linux/reset-controller.h +++ b/include/linux/reset-controller.h @@ -27,31 +27,6 @@ struct device_node; struct of_phandle_args; /** - * struct reset_control_lookup - represents a single lookup entry - * - * @list: internal list of all reset lookup entries - * @provider: name of the reset controller device controlling this reset line - * @index: ID of the reset controller in the reset controller device - * @dev_id: name of the device associated with this reset line - * @con_id: name of the reset line (can be NULL) - */ -struct reset_control_lookup { - struct list_head list; - const char *provider; - unsigned int index; - const char *dev_id; - const char *con_id; -}; - -#define RESET_LOOKUP(_provider, _index, _dev_id, _con_id) \ - { \ - .provider = _provider, \ - .index = _index, \ - .dev_id = _dev_id, \ - .con_id = _con_id, \ - } - -/** * struct reset_controller_dev - reset controller entity that might * provide multiple reset controls * @ops: a pointer to device specific struct reset_control_ops @@ -90,9 +65,6 @@ void reset_controller_unregister(struct reset_controller_dev *rcdev); struct device; int devm_reset_controller_register(struct device *dev, struct reset_controller_dev *rcdev); - -void reset_controller_add_lookup(struct reset_control_lookup *lookup, - unsigned int num_entries); #else static inline int reset_controller_register(struct reset_controller_dev *rcdev) { @@ -108,11 +80,6 @@ static inline int devm_reset_controller_register(struct device *dev, { return 0; } - -static inline void reset_controller_add_lookup(struct reset_control_lookup *lookup, - unsigned int num_entries) -{ -} #endif #endif diff --git a/include/linux/reset.h b/include/linux/reset.h index 840d75d172f6..44f9e3415f92 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -2,6 +2,7 @@ #ifndef _LINUX_RESET_H_ #define _LINUX_RESET_H_ +#include <linux/bits.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/types.h> diff --git a/include/linux/rio.h b/include/linux/rio.h index 3c29f40f3c94..2c29f21ba9e5 100644 --- a/include/linux/rio.h +++ b/include/linux/rio.h @@ -78,7 +78,7 @@ #define RIO_CTAG_RESRVD 0xfffe0000 /* Reserved */ #define RIO_CTAG_UDEVID 0x0001ffff /* Unique device identifier */ -extern struct bus_type rio_bus_type; +extern const struct bus_type rio_bus_type; extern struct class rio_mport_class; struct rio_mport; diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index a8a8661839b6..221123660e71 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -1224,7 +1224,7 @@ struct ss_tmp { spinlock_t *lock_irqsave; }; -static inline void __scoped_seqlock_cleanup(struct ss_tmp *sst) +static __always_inline void __scoped_seqlock_cleanup(struct ss_tmp *sst) { if (sst->lock) spin_unlock(sst->lock); @@ -1252,7 +1252,7 @@ static inline void __scoped_seqlock_bug(void) { } extern void __scoped_seqlock_bug(void); #endif -static inline void +static __always_inline void __scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target) { switch (sst->state) { diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 5e4b3c1ae5c2..e2069b3179c4 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -10,6 +10,7 @@ #include <linux/xattr.h> #include <linux/fs_parser.h> #include <linux/userfaultfd_k.h> +#include <linux/bits.h> struct swap_iocb; @@ -19,6 +20,19 @@ struct swap_iocb; #define SHMEM_MAXQUOTAS 2 #endif +/* Suppress pre-accounting of the entire object size. */ +#define SHMEM_F_NORESERVE BIT(0) +/* Disallow swapping. */ +#define SHMEM_F_LOCKED BIT(1) +/* + * Disallow growing, shrinking, or hole punching in the inode. Combined with + * folio pinning, makes sure the inode's mapping stays fixed. + * + * In some ways similar to F_SEAL_GROW | F_SEAL_SHRINK, but can be removed and + * isn't directly visible to userspace. + */ +#define SHMEM_F_MAPPING_FROZEN BIT(2) + struct shmem_inode_info { spinlock_t lock; unsigned int seals; /* shmem seals */ @@ -186,6 +200,15 @@ static inline bool shmem_file(struct file *file) return shmem_mapping(file->f_mapping); } +/* Must be called with inode lock taken exclusive. */ +static inline void shmem_freeze(struct inode *inode, bool freeze) +{ + if (freeze) + SHMEM_I(inode)->flags |= SHMEM_F_MAPPING_FROZEN; + else + SHMEM_I(inode)->flags &= ~SHMEM_F_MAPPING_FROZEN; +} + /* * If fallocate(FALLOC_FL_KEEP_SIZE) has been used, there may be pages * beyond i_size's notion of EOF, which fallocate has committed to reserving: diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h index 7a69210a250c..0287f9182c4d 100644 --- a/include/linux/soc/qcom/llcc-qcom.h +++ b/include/linux/soc/qcom/llcc-qcom.h @@ -74,7 +74,14 @@ #define LLCC_CAMSRTIP 73 #define LLCC_CAMRTRF 74 #define LLCC_CAMSRTRF 75 +#define LLCC_VIDEO_APV 83 +#define LLCC_COMPUTE1 87 +#define LLCC_CPUSS_OPP 88 #define LLCC_CPUSSMPAM 89 +#define LLCC_CAM_IPE_STROV 92 +#define LLCC_CAM_OFE_STROV 93 +#define LLCC_CPUSS_HEU 94 +#define LLCC_MDM_PNG_FIXED 100 /** * struct llcc_slice_desc - Cache slice descriptor diff --git a/include/linux/soc/qcom/socinfo.h b/include/linux/soc/qcom/socinfo.h index 608950443eee..ba823a0013c5 100644 --- a/include/linux/soc/qcom/socinfo.h +++ b/include/linux/soc/qcom/socinfo.h @@ -82,6 +82,10 @@ struct socinfo { __le32 num_func_clusters; __le32 boot_cluster; __le32 boot_core; + /* Version 20 */ + __le32 raw_package_type; + /* Version 21, 22, 23 */ + __le32 reserve1[4]; }; /* Internal feature codes */ diff --git a/include/linux/soc/qcom/ubwc.h b/include/linux/soc/qcom/ubwc.h index 1ed8b1b16bc9..0a4edfe3d96d 100644 --- a/include/linux/soc/qcom/ubwc.h +++ b/include/linux/soc/qcom/ubwc.h @@ -52,6 +52,7 @@ struct qcom_ubwc_cfg_data { #define UBWC_4_0 0x40000000 #define UBWC_4_3 0x40030000 #define UBWC_5_0 0x50000000 +#define UBWC_6_0 0x60000000 #if IS_ENABLED(CONFIG_QCOM_UBWC_CONFIG) const struct qcom_ubwc_cfg_data *qcom_ubwc_config_get_data(void); diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h index 71e0c09a49eb..532c6c2d1195 100644 --- a/include/linux/soc/samsung/exynos-regs-pmu.h +++ b/include/linux/soc/samsung/exynos-regs-pmu.h @@ -672,14 +672,341 @@ /* For Tensor GS101 */ /* PMU ALIVE */ -#define GS101_SYSIP_DAT0 (0x810) -#define GS101_CPU0_INFORM (0x860) -#define GS101_CPU_INFORM(cpu) \ - (GS101_CPU0_INFORM + (cpu*4)) -#define GS101_SYSTEM_CONFIGURATION (0x3A00) -#define GS101_EINT_WAKEUP_MASK (0x3A80) -#define GS101_PHY_CTRL_USB20 (0x3EB0) -#define GS101_PHY_CTRL_USBDP (0x3EB4) +#define GS101_OM_STAT 0x0000 +#define GS101_VERSION 0x0004 +#define GS101_PORESET_CHECK 0x0008 +#define GS101_OTP_STATUS 0x000c +#define GS101_SYSTEM_INFO 0x0010 +#define GS101_IDLE_IP(n) (0x03e0 + ((n) & 3) * 4) +#define GS101_IDLE_IP_MASK(n) (0x03f0 + ((n) & 3) * 4) +#define GS101_SLC_CH_OFFSET(ch) (0x0400 + ((ch) & 3) * 0x10) +#define GS101_DATARAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x00) +#define GS101_TAGRAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x04) +#define GS101_LRURAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x08) +#define GS101_PPMPURAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x0c) +#define GS101_DATARAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x40) +#define GS101_TAGRAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x44) +#define GS101_LRURAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x48) +#define GS101_PPMPURAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x4c) +#define GS101_INFORM0 0x0800 +#define GS101_INFORM1 0x0804 +#define GS101_INFORM2 0x0808 +#define GS101_INFORM3 0x080c +#define GS101_SYSIP_DAT(n) (0x0810 + ((n) & 3) * 4) +#define GS101_PWR_HOLD_HW_TRIP 0x0820 +#define GS101_PWR_HOLD_SW_TRIP 0x0824 +#define GS101_GSA_INFORM(n) (0x0830 + ((n) & 1) * 4) +#define GS101_INFORM4 0x0840 +#define GS101_INFORM5 0x0844 +#define GS101_INFORM6 0x0848 +#define GS101_INFORM7 0x084c +#define GS101_INFORM8 0x0850 +#define GS101_INFORM9 0x0854 +#define GS101_INFORM10 0x0858 +#define GS101_INFORM11 0x085c +#define GS101_CPU_INFORM(cpu) (0x0860 + ((cpu) & 7) * 4) +#define GS101_IROM_INFORM 0x0880 +#define GS101_IROM_CPU_INFORM(cpu) (0x0890 + ((cpu) & 7) * 4) +#define GS101_PMU_SPARE(n) (0x0900 + ((n) & 3) * 4) +#define GS101_IROM_DATA_REG(n) (0x0980 + ((n) & 3) * 4) +#define GS101_IROM_PWRMODE 0x0990 +#define GS101_DREX_CALIBRATION(n) (0x09a0 + ((n) & 7) * 4) + +#define GS101_CLUSTER0_OFFSET 0x1000 +#define GS101_CLUSTER1_OFFSET 0x1300 +#define GS101_CLUSTER2_OFFSET 0x1500 +#define GS101_CLUSTER_CPU_OFFSET(cl, cpu) ((cl) + ((cpu) * 0x80)) +#define GS101_CLUSTER_CPU_CONFIGURATION(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x00) +#define GS101_CLUSTER_CPU_STATUS(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x04) +#define GS101_CLUSTER_CPU_STATES(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x08) +#define GS101_CLUSTER_CPU_OPTION(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x0c) +#define GS101_CLUSTER_CPU_OUT(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x20) +#define GS101_CLUSTER_CPU_IN(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x24) +#define GS101_CLUSTER_CPU_INT_IN(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x40) +#define GS101_CLUSTER_CPU_INT_EN(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x44) +#define GS101_CLUSTER_CPU_INT_TYPE(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x48) +#define GS101_CLUSTER_CPU_INT_DIR(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x4c) + +#define GS101_CLUSTER_NONCPU_OFFSET(cl) (0x1200 + ((cl) * 0x200)) +#define GS101_CLUSTER_NONCPU_CONFIGURATION(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x00) +#define GS101_CLUSTER_NONCPU_STATUS(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x04) +#define GS101_CLUSTER_NONCPU_STATES(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x08) +#define GS101_CLUSTER_NONCPU_OPTION(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x0c) +#define GS101_CLUSTER_NONCPU_OUT(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x20) +#define GS101_CLUSTER_NONCPU_IN(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x24) +#define GS101_CLUSTER_NONCPU_INT_IN(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x40) +#define GS101_CLUSTER_NONCPU_INT_EN(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x44) +#define GS101_CLUSTER_NONCPU_INT_TYPE(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x48) +#define GS101_CLUSTER_NONCPU_INT_DIR(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x4c) +#define GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_OUT(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x60) +#define GS101_CLUSTER_NONCPU_DUALRAIL_POS_OUT(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x64) +#define GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_IN(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x6c) +#define GS101_CLUSTER0_NONCPU_DSU_PCH \ + (GS101_CLUSTER_NONCPU_OFFSET(0) + 0x80) + +#define GS101_SUBBBLK_OFFSET_ALIVE 0x1800 +#define GS101_SUBBBLK_OFFSET_AOC 0x1880 +#define GS101_SUBBBLK_OFFSET_APM 0x1900 +#define GS101_SUBBBLK_OFFSET_CMU 0x1980 +#define GS101_SUBBBLK_OFFSET_BUS0 0x1a00 +#define GS101_SUBBBLK_OFFSET_BUS1 0x1a80 +#define GS101_SUBBBLK_OFFSET_BUS2 0x1b00 +#define GS101_SUBBBLK_OFFSET_CORE 0x1b80 +#define GS101_SUBBBLK_OFFSET_EH 0x1c00 +#define GS101_SUBBBLK_OFFSET_CPUCL0 0x1c80 +#define GS101_SUBBBLK_OFFSET_CPUCL1 0x1d00 +#define GS101_SUBBBLK_OFFSET_CPUCL2 0x1d80 +#define GS101_SUBBBLK_OFFSET_G3D 0x1e00 +#define GS101_SUBBBLK_OFFSET_EMBEDDED_CPUCL0 0x1e80 +#define GS101_SUBBBLK_OFFSET_EMBEDDED_G3D 0x2000 +#define GS101_SUBBBLK_OFFSET_HSI0 0x2080 +#define GS101_SUBBBLK_OFFSET_HSI1 0x2100 +#define GS101_SUBBBLK_OFFSET_HSI2 0x2180 +#define GS101_SUBBBLK_OFFSET_DPU 0x2200 +#define GS101_SUBBBLK_OFFSET_DISP 0x2280 +#define GS101_SUBBBLK_OFFSET_G2D 0x2300 +#define GS101_SUBBBLK_OFFSET_MFC 0x2380 +#define GS101_SUBBBLK_OFFSET_CSIS 0x2400 +#define GS101_SUBBBLK_OFFSET_PDP 0x2480 +#define GS101_SUBBBLK_OFFSET_DNS 0x2500 +#define GS101_SUBBBLK_OFFSET_G3AA 0x2580 +#define GS101_SUBBBLK_OFFSET_IPP 0x2600 +#define GS101_SUBBBLK_OFFSET_ITP 0x2680 +#define GS101_SUBBBLK_OFFSET_MCSC 0x2700 +#define GS101_SUBBBLK_OFFSET_GDC 0x2780 +#define GS101_SUBBBLK_OFFSET_TNR 0x2800 +#define GS101_SUBBBLK_OFFSET_BO 0x2880 +#define GS101_SUBBBLK_OFFSET_TPU 0x2900 +#define GS101_SUBBBLK_OFFSET_MIF0 0x2980 +#define GS101_SUBBBLK_OFFSET_MIF1 0x2a00 +#define GS101_SUBBBLK_OFFSET_MIF2 0x2a80 +#define GS101_SUBBBLK_OFFSET_MIF3 0x2b00 +#define GS101_SUBBBLK_OFFSET_MISC 0x2b80 +#define GS101_SUBBBLK_OFFSET_PERIC0 0x2c00 +#define GS101_SUBBBLK_OFFSET_PERIC1 0x2c80 +#define GS101_SUBBBLK_OFFSET_S2D 0x2d00 +#define GS101_SUBBLK_CONFIGURATION(blk) ((blk) + 0x00) +#define GS101_SUBBLK_STATUS(blk) ((blk) + 0x04) +#define GS101_SUBBLK_STATES(blk) ((blk) + 0x08) +#define GS101_SUBBLK_OPTION(blk) ((blk) + 0x0c) +#define GS101_SUBBLK_CTRL(blk) ((blk) + 0x10) +#define GS101_SUBBLK_OUT(blk) ((blk) + 0x20) +#define GS101_SUBBLK_IN(blk) ((blk) + 0x24) +#define GS101_SUBBLK_INT_IN(blk) ((blk) + 0x40) +#define GS101_SUBBLK_INT_EN(blk) ((blk) + 0x44) +#define GS101_SUBBLK_INT_TYPE(blk) ((blk) + 0x48) +#define GS101_SUBBLK_INT_DIR(blk) ((blk) + 0x4c) +#define GS101_SUBBLK_MEMORY_OUT(blk) ((blk) + 0x60) +#define GS101_SUBBLK_MEMORY_IN(blk) ((blk) + 0x64) + +#define GS101_SUBBBLK_CPU_OFFSET_APM 0x3000 +#define GS101_SUBBBLK_CPU_OFFSET_DBGCORE 0x3080 +#define GS101_SUBBBLK_CPU_OFFSET_SSS 0x3100 +#define GS101_SUBBLK_CPU_CONFIGURATION(blk) ((blk) + 0x00) +#define GS101_SUBBLK_CPU_STATUS(blk) ((blk) + 0x04) +#define GS101_SUBBLK_CPU_STATES(blk) ((blk) + 0x08) +#define GS101_SUBBLK_CPU_OPTION(blk) ((blk) + 0x0c) +#define GS101_SUBBLK_CPU_OUT(blk) ((blk) + 0x20) +#define GS101_SUBBLK_CPU_IN(blk) ((blk) + 0x24) +#define GS101_SUBBLK_CPU_INT_IN(blk) ((blk) + 0x40) +#define GS101_SUBBLK_CPU_INT_EN(blk) ((blk) + 0x44) +#define GS101_SUBBLK_CPU_INT_TYPE(blk) ((blk) + 0x48) +#define GS101_SUBBLK_CPU_INT_DIR(blk) ((blk) + 0x4c) + +#define GS101_MIF_CONFIGURATION 0x3800 +#define GS101_MIF_STATUS 0x3804 +#define GS101_MIF_STATES 0x3808 +#define GS101_MIF_OPTION 0x380c +#define GS101_MIF_CTRL 0x3810 +#define GS101_MIF_OUT 0x3820 +#define GS101_MIF_IN 0x3824 +#define GS101_MIF_INT_IN 0x3840 +#define GS101_MIF_INT_EN 0x3844 +#define GS101_MIF_INT_TYPE 0x3848 +#define GS101_MIF_INT_DIR 0x384c +#define GS101_TOP_CONFIGURATION 0x3900 +#define GS101_TOP_STATUS 0x3904 +#define GS101_TOP_STATES 0x3908 +#define GS101_TOP_OPTION 0x390c +#define GS101_TOP_OUT 0x3920 +#define GS101_TOP_IN 0x3924 +#define GS101_TOP_INT_IN 0x3940 +#define GS101_TOP_INT_EN 0x3944 +#define GS101_TOP_INT_TYPE 0x3948 +#define GS101_TOP_INT_DIR 0x394c +#define GS101_WAKEUP_STAT 0x3950 +#define GS101_WAKEUP2_STAT 0x3954 +#define GS101_WAKEUP2_INT_IN 0x3960 +#define GS101_WAKEUP2_INT_EN 0x3964 +#define GS101_WAKEUP2_INT_TYPE 0x3968 +#define GS101_WAKEUP2_INT_DIR 0x396c +#define GS101_SYSTEM_CONFIGURATION 0x3a00 +#define GS101_SYSTEM_STATUS 0x3a04 +#define GS101_SYSTEM_STATES 0x3a08 +#define GS101_SYSTEM_OPTION 0x3a0c +#define GS101_SYSTEM_CTRL 0x3a10 +#define GS101_SPARE_CTRL 0x3a14 +#define GS101_USER_DEFINED_OUT 0x3a18 +#define GS101_SYSTEM_OUT 0x3a20 +#define GS101_SYSTEM_IN 0x3a24 +#define GS101_SYSTEM_INT_IN 0x3a40 +#define GS101_SYSTEM_INT_EN 0x3a44 +#define GS101_SYSTEM_INT_TYPE 0x3a48 +#define GS101_SYSTEM_INT_DIR 0x3a4c +#define GS101_EINT_INT_IN 0x3a50 +#define GS101_EINT_INT_EN 0x3a54 +#define GS101_EINT_INT_TYPE 0x3a58 +#define GS101_EINT_INT_DIR 0x3a5c +#define GS101_EINT2_INT_IN 0x3a60 +#define GS101_EINT2_INT_EN 0x3a64 +#define GS101_EINT2_INT_TYPE 0x3a68 +#define GS101_EINT2_INT_DIR 0x3a6c +#define GS101_EINT3_INT_IN 0x3a70 +#define GS101_EINT3_INT_EN 0x3a74 +#define GS101_EINT3_INT_TYPE 0x3a78 +#define GS101_EINT3_INT_DIR 0x3a7c +#define GS101_EINT_WAKEUP_MASK 0x3a80 +#define GS101_EINT_WAKEUP_MASK2 0x3a84 +#define GS101_EINT_WAKEUP_MASK3 0x3a88 +#define GS101_USER_DEFINED_INT_IN 0x3a90 +#define GS101_USER_DEFINED_INT_EN 0x3a94 +#define GS101_USER_DEFINED_INT_TYPE 0x3a98 +#define GS101_USER_DEFINED_INT_DIR 0x3a9c +#define GS101_SCAN2DRAM_INT_IN 0x3aa0 +#define GS101_SCAN2DRAM_INT_EN 0x3aa4 +#define GS101_SCAN2DRAM_INT_TYPE 0x3aa8 +#define GS101_SCAN2DRAM_INT_DIR 0x3aac +#define GS101_HCU_START 0x3ab0 +#define GS101_CUSTOM_OUT 0x3ac0 +#define GS101_CUSTOM_IN 0x3ac4 +#define GS101_CUSTOM_INT_IN 0x3ad0 +#define GS101_CUSTOM_INT_EN 0x3ad4 +#define GS101_CUSTOM_INT_TYPE 0x3ad8 +#define GS101_CUSTOM_INT_DIR 0x3adc +#define GS101_ACK_LAST_CPU 0x3afc +#define GS101_HCU_R(n) (0x3b00 + ((n) & 3) * 4) +#define GS101_HCU_SP 0x3b14 +#define GS101_HCU_PC 0x3b18 +#define GS101_PMU_RAM_CTRL 0x3b20 +#define GS101_APM_HCU_CTRL 0x3b24 +#define GS101_APM_NMI_ENABLE 0x3b30 +#define GS101_DBGCORE_NMI_ENABLE 0x3b34 +#define GS101_HCU_NMI_ENABLE 0x3b38 +#define GS101_PWR_HOLD_WDT_ENABLE 0x3b3c +#define GS101_NMI_SRC_IN 0x3b40 +#define GS101_RST_STAT 0x3b44 +#define GS101_RST_STAT_PMU 0x3b48 +#define GS101_HPM_INT_IN 0x3b60 +#define GS101_HPM_INT_EN 0x3b64 +#define GS101_HPM_INT_TYPE 0x3b68 +#define GS101_HPM_INT_DIR 0x3b6c +#define GS101_S2D_AUTH 0x3b70 +#define GS101_BOOT_STAT 0x3b74 +#define GS101_PMLINK_OUT 0x3c00 +#define GS101_PMLINK_AOC_OUT 0x3c04 +#define GS101_PMLINK_AOC_CTRL 0x3c08 +#define GS101_TCXO_BUF_CTRL 0x3c10 +#define GS101_ADD_CTRL 0x3c14 +#define GS101_HCU_TIMEOUT_RESET 0x3c20 +#define GS101_HCU_TIMEOUT_SCAN2DRAM 0x3c24 +#define GS101_TIMER(n) (0x3c80 + ((n) & 3) * 4) +#define GS101_PPC_MIF(n) (0x3c90 + ((n) & 3) * 4) +#define GS101_PPC_CORE 0x3ca0 +#define GS101_PPC_EH 0x3ca4 +#define GS101_PPC_CPUCL1_0 0x3ca8 +#define GS101_PPC_CPUCL1_1 0x3cac +#define GS101_EXT_REGULATOR_MIF_DURATION 0x3cb0 +#define GS101_EXT_REGULATOR_TOP_DURATION 0x3cb4 +#define GS101_EXT_REGULATOR_CPUCL2_DURATION 0x3cb8 +#define GS101_EXT_REGULATOR_CPUCL1_DURATION 0x3cbc +#define GS101_EXT_REGULATOR_G3D_DURATION 0x3cc0 +#define GS101_EXT_REGULATOR_TPU_DURATION 0x3cc4 +#define GS101_TCXO_DURATION 0x3cc8 +#define GS101_BURNIN_CTRL 0x3cd0 +#define GS101_JTAG_DBG_DET 0x3cd4 +#define GS101_MMC_CONWKUP_CTRL 0x3cd8 +#define GS101_USBDPPHY0_USBDP_WAKEUP 0x3cdc +#define GS101_TMU_TOP_TRIP 0x3ce0 +#define GS101_TMU_SUB_TRIP 0x3ce4 +#define GS101_MEMORY_CEN 0x3d00 +#define GS101_MEMORY_PGEN 0x3d04 +#define GS101_MEMORY_RET 0x3d08 +#define GS101_MEMORY_PGEN_FEEDBACK 0x3d0c +#define GS101_MEMORY_SMX 0x3d10 +#define GS101_MEMORY_SMX_FEEDBACK 0x3d14 +#define GS101_SLC_PCH_CHANNEL 0x3d20 +#define GS101_SLC_PCH_CB 0x3d24 +#define GS101_FORCE_NOMC 0x3d3c +#define GS101_FORCE_BOOST 0x3d4c +#define GS101_PMLINK_SLC_REQ 0x3d50 +#define GS101_PMLINK_SLC_ACK 0x3d54 +#define GS101_PMLINK_SLC_BUSY 0x3d58 +#define GS101_BOOTSYNC_OUT 0x3d80 +#define GS101_BOOTSYNC_IN 0x3d84 +#define GS101_SCAN_READY_OUT 0x3d88 +#define GS101_SCAN_READY_IN 0x3d8c +#define GS101_GSA_RESTORE 0x3d90 +#define GS101_ALIVE_OTP_LATCH 0x3d94 +#define GS101_DEBUG_OVERRIDE 0x3d98 +#define GS101_WDT_OPTION 0x3d9c +#define GS101_AOC_WDT_CFG 0x3da0 +#define GS101_CTRL_SECJTAG_ALIVE 0x3da4 +#define GS101_CTRL_DIV_PLL_ALV_DIVLOW 0x3e00 +#define GS101_CTRL_MUX_CLK_APM_REFSRC_AUTORESTORE 0x3e04 +#define GS101_CTRL_MUX_CLK_APM_REFSRC 0x3e08 +#define GS101_CTRL_MUX_CLK_APM_REF 0x3e0c +#define GS101_CTRL_MUX_PLL_ALV_DIV4 0x3e10 +#define GS101_CTRL_PLL_ALV_DIV4 0x3e14 +#define GS101_CTRL_OSCCLK_APMGSA 0x3e18 +#define GS101_CTRL_BLK_AOC_CLKS 0x3e1c +#define GS101_CTRL_PLL_ALV_LOCK 0x3e20 +#define GS101_CTRL_CLKDIV__CLKRTC 0x3e24 +#define GS101_CTRL_SOC32K 0x3e30 +#define GS101_CTRL_STM_PMU 0x3e34 +#define GS101_CTRL_PMU_DEBUG 0x3e38 +#define GS101_CTRL_DEBUG_UART 0x3e3c +#define GS101_CTRL_TCK 0x3e40 +#define GS101_CTRL_SBU_SW_EN 0x3e44 +#define GS101_PAD_CTRL_CLKOUT0 0x3e80 +#define GS101_PAD_CTRL_CLKOUT1 0x3e84 +#define GS101_PAD_CTRL_APM_24MOUT_0 0x3e88 +#define GS101_PAD_CTRL_APM_24MOUT_1 0x3e8c +#define GS101_PAD_CTRL_IO_FORCE_RETENTION 0x3e90 +#define GS101_PAD_CTRL_APACTIVE_n 0x3e94 +#define GS101_PAD_CTRL_TCXO_ON 0x3e98 +#define GS101_PAD_CTRL_PWR_HOLD 0x3e9c +#define GS101_PAD_CTRL_RESETO_n 0x3ea0 +#define GS101_PAD_CTRL_WRESETO_n 0x3ea4 +#define GS101_PHY_CTRL_USB20 0x3eb0 +#define GS101_PHY_CTRL_USBDP 0x3eb4 +#define GS101_PHY_CTRL_MIPI_DCPHY_M4M4 0x3eb8 +#define GS101_PHY_CTRL_MIPI_DCPHY_S4S4S4S4 0x3ebc +#define GS101_PHY_CTRL_PCIE_GEN4_0 0x3ec0 +#define GS101_PHY_CTRL_PCIE_GEN4_1 0x3ec4 +#define GS101_PHY_CTRL_UFS 0x3ec8 /* PMU INTR GEN */ #define GS101_GRP1_INTR_BID_UPEND (0x0108) diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 22704c2e5b9b..57f4fd94166a 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -131,7 +131,7 @@ static inline struct svcxprt_rdma *svc_rdma_rqst_rdma(struct svc_rqst *rqstp) */ enum { RPCRDMA_LISTEN_BACKLOG = 10, - RPCRDMA_MAX_REQUESTS = 64, + RPCRDMA_MAX_REQUESTS = 128, RPCRDMA_MAX_BC_REQUESTS = 2, }; diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index 963bbe251e52..de37069aba90 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h @@ -26,6 +26,9 @@ struct svc_sock { void (*sk_odata)(struct sock *); void (*sk_owspace)(struct sock *); + /* For sends (protected by xpt_mutex) */ + struct bio_vec *sk_bvec; + /* private TCP part */ /* On-the-wire fragment header: */ __be32 sk_marker; diff --git a/include/linux/sys_info.h b/include/linux/sys_info.h index 89d77dc4f2ed..a5bc3ea3d44b 100644 --- a/include/linux/sys_info.h +++ b/include/linux/sys_info.h @@ -14,7 +14,7 @@ #define SYS_INFO_LOCKS 0x00000008 #define SYS_INFO_FTRACE 0x00000010 #define SYS_INFO_PANIC_CONSOLE_REPLAY 0x00000020 -#define SYS_INFO_ALL_CPU_BT 0x00000040 +#define SYS_INFO_ALL_BT 0x00000040 #define SYS_INFO_BLOCKED_TASKS 0x00000080 void sys_info(unsigned long si_mask); diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h index ae4d48e4c970..ac6d71be5c38 100644 --- a/include/linux/syscore_ops.h +++ b/include/linux/syscore_ops.h @@ -11,14 +11,19 @@ #include <linux/list.h> struct syscore_ops { + int (*suspend)(void *data); + void (*resume)(void *data); + void (*shutdown)(void *data); +}; + +struct syscore { struct list_head node; - int (*suspend)(void); - void (*resume)(void); - void (*shutdown)(void); + const struct syscore_ops *ops; + void *data; }; -extern void register_syscore_ops(struct syscore_ops *ops); -extern void unregister_syscore_ops(struct syscore_ops *ops); +extern void register_syscore(struct syscore *syscore); +extern void unregister_syscore(struct syscore *syscore); #ifdef CONFIG_PM_SLEEP extern int syscore_suspend(void); extern void syscore_resume(void); diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 9a25a2911652..c33a96b7391a 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -58,6 +58,12 @@ do { \ #define sysfs_attr_init(attr) do {} while (0) #endif +#ifdef CONFIG_CFI +#define __SYSFS_FUNCTION_ALTERNATIVE(MEMBERS...) struct { MEMBERS } +#else +#define __SYSFS_FUNCTION_ALTERNATIVE(MEMBERS...) union { MEMBERS } +#endif + /** * struct attribute_group - data structure used to declare an attribute group. * @name: Optional: Attribute group name @@ -98,14 +104,21 @@ do { \ */ struct attribute_group { const char *name; - umode_t (*is_visible)(struct kobject *, - struct attribute *, int); + __SYSFS_FUNCTION_ALTERNATIVE( + umode_t (*is_visible)(struct kobject *, + struct attribute *, int); + umode_t (*is_visible_const)(struct kobject *, + const struct attribute *, int); + ); umode_t (*is_bin_visible)(struct kobject *, const struct bin_attribute *, int); size_t (*bin_size)(struct kobject *, const struct bin_attribute *, int); - struct attribute **attrs; + union { + struct attribute **attrs; + const struct attribute *const *attrs_const; + }; const struct bin_attribute *const *bin_attrs; }; @@ -238,28 +251,20 @@ struct attribute_group { .store = _store, \ } -#define __ATTR_RO(_name) { \ - .attr = { .name = __stringify(_name), .mode = 0444 }, \ - .show = _name##_show, \ -} - #define __ATTR_RO_MODE(_name, _mode) { \ .attr = { .name = __stringify(_name), \ .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ .show = _name##_show, \ } -#define __ATTR_RW_MODE(_name, _mode) { \ - .attr = { .name = __stringify(_name), \ - .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ - .show = _name##_show, \ - .store = _name##_store, \ -} +#define __ATTR_RO(_name) \ + __ATTR_RO_MODE(_name, 0444) -#define __ATTR_WO(_name) { \ - .attr = { .name = __stringify(_name), .mode = 0200 }, \ - .store = _name##_store, \ -} +#define __ATTR_RW_MODE(_name, _mode) \ + __ATTR(_name, _mode, _name##_show, _name##_store) + +#define __ATTR_WO(_name) \ + __ATTR(_name, 0200, NULL, _name##_store) #define __ATTR_RW(_name) __ATTR(_name, 0644, _name##_show, _name##_store) @@ -284,7 +289,12 @@ static const struct attribute_group *_name##_groups[] = { \ #define ATTRIBUTE_GROUPS(_name) \ static const struct attribute_group _name##_group = { \ - .attrs = _name##_attrs, \ + .attrs = _Generic(_name##_attrs, \ + struct attribute **: \ + _name##_attrs, \ + const struct attribute *const *: \ + (void *)_name##_attrs \ + ), \ }; \ __ATTRIBUTE_GROUPS(_name) diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 3d8f7d1ce2b8..202da079d500 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -529,41 +529,18 @@ static inline struct tpm2_auth *tpm2_chip_auth(struct tpm_chip *chip) #endif } -void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, - u32 handle, u8 *name); +int tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, + u32 handle, u8 *name); void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf, u8 attributes, u8 *passphrase, int passphraselen); void tpm_buf_append_auth(struct tpm_chip *chip, struct tpm_buf *buf, - u8 attributes, u8 *passphrase, int passphraselen); -static inline void tpm_buf_append_hmac_session_opt(struct tpm_chip *chip, - struct tpm_buf *buf, - u8 attributes, - u8 *passphrase, - int passphraselen) -{ - struct tpm_header *head; - int offset; - - if (tpm2_chip_auth(chip)) { - tpm_buf_append_hmac_session(chip, buf, attributes, passphrase, passphraselen); - } else { - offset = buf->handles * 4 + TPM_HEADER_SIZE; - head = (struct tpm_header *)buf->data; - - /* - * If the only sessions are optional, the command tag must change to - * TPM2_ST_NO_SESSIONS. - */ - if (tpm_buf_length(buf) == offset) - head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS); - } -} + u8 *passphrase, int passphraselen); #ifdef CONFIG_TCG_TPM2_HMAC int tpm2_start_auth_session(struct tpm_chip *chip); -void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf); +int tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf); int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf, int rc); void tpm2_end_auth_session(struct tpm_chip *chip); @@ -577,10 +554,13 @@ static inline int tpm2_start_auth_session(struct tpm_chip *chip) static inline void tpm2_end_auth_session(struct tpm_chip *chip) { } -static inline void tpm_buf_fill_hmac_session(struct tpm_chip *chip, - struct tpm_buf *buf) + +static inline int tpm_buf_fill_hmac_session(struct tpm_chip *chip, + struct tpm_buf *buf) { + return 0; } + static inline int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf, int rc) diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 04307a19cde3..3690221ba3d8 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -138,6 +138,7 @@ enum trace_iter_flags { TRACE_FILE_LAT_FMT = 1, TRACE_FILE_ANNOTATE = 2, TRACE_FILE_TIME_IN_NS = 4, + TRACE_FILE_PAUSE = 8, }; diff --git a/include/linux/tsm.h b/include/linux/tsm.h index 431054810dca..a3b7ab668eff 100644 --- a/include/linux/tsm.h +++ b/include/linux/tsm.h @@ -5,6 +5,7 @@ #include <linux/sizes.h> #include <linux/types.h> #include <linux/uuid.h> +#include <linux/device.h> #define TSM_REPORT_INBLOB_MAX 64 #define TSM_REPORT_OUTBLOB_MAX SZ_32K @@ -107,6 +108,22 @@ struct tsm_report_ops { bool (*report_bin_attr_visible)(int n); }; +struct pci_tsm_ops; +struct tsm_dev { + struct device dev; + int id; + const struct pci_tsm_ops *pci_ops; +}; + +DEFINE_FREE(put_tsm_dev, struct tsm_dev *, + if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev)) + int tsm_report_register(const struct tsm_report_ops *ops, void *priv); int tsm_report_unregister(const struct tsm_report_ops *ops); +struct tsm_dev *tsm_register(struct device *parent, struct pci_tsm_ops *ops); +void tsm_unregister(struct tsm_dev *tsm_dev); +struct tsm_dev *find_tsm_dev(int id); +struct pci_ide; +int tsm_ide_stream_register(struct pci_ide *ide); +void tsm_ide_stream_unregister(struct pci_ide *ide); #endif /* __TSM_H */ diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index be395f5f7ee3..1f3804245c06 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -161,8 +161,6 @@ __copy_to_user(void __user *to, const void *from, unsigned long n) * directly in the normal copy_to/from_user(), the other ones go * through an extern _copy_to/from_user(), which expands the same code * here. - * - * Rust code always uses the extern definition. */ static inline __must_check unsigned long _inline_copy_from_user(void *to, const void __user *from, unsigned long n) @@ -192,8 +190,10 @@ fail: memset(to + (n - res), 0, res); return res; } +#ifndef INLINE_COPY_FROM_USER extern __must_check unsigned long _copy_from_user(void *, const void __user *, unsigned long); +#endif static inline __must_check unsigned long _inline_copy_to_user(void __user *to, const void *from, unsigned long n) @@ -207,8 +207,10 @@ _inline_copy_to_user(void __user *to, const void *from, unsigned long n) } return n; } +#ifndef INLINE_COPY_TO_USER extern __must_check unsigned long _copy_to_user(void __user *, const void *, unsigned long); +#endif static __always_inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h index 3068c3084eb6..6ccd1b2af993 100644 --- a/include/linux/usb/pd.h +++ b/include/linux/usb/pd.h @@ -6,6 +6,7 @@ #ifndef __LINUX_USB_PD_H #define __LINUX_USB_PD_H +#include <linux/bitfield.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/usb/typec.h> @@ -271,9 +272,11 @@ enum pd_pdo_type { enum pd_apdo_type { APDO_TYPE_PPS = 0, + APDO_TYPE_EPR_AVS = 1, + APDO_TYPE_SPR_AVS = 2, }; -#define PDO_APDO_TYPE_SHIFT 28 /* Only valid value currently is 0x0 - PPS */ +#define PDO_APDO_TYPE_SHIFT 28 #define PDO_APDO_TYPE_MASK 0x3 #define PDO_APDO_TYPE(t) ((t) << PDO_APDO_TYPE_SHIFT) @@ -297,6 +300,35 @@ enum pd_apdo_type { PDO_PPS_APDO_MIN_VOLT(min_mv) | PDO_PPS_APDO_MAX_VOLT(max_mv) | \ PDO_PPS_APDO_MAX_CURR(max_ma)) +/* + * Applicable only to EPR AVS APDO source cap as per + * Table 6.15 EPR Adjustable Voltage Supply APDO – Source + */ +#define PDO_EPR_AVS_APDO_PEAK_CURRENT GENMASK(27, 26) + +/* + * Applicable to both EPR AVS APDO source and sink cap as per + * Table 6.15 EPR Adjustable Voltage Supply APDO – Source + * Table 6.22 EPR Adjustable Voltage Supply APDO – Sink + */ +#define PDO_EPR_AVS_APDO_MAX_VOLT GENMASK(25, 17) /* 100mV unit */ +#define PDO_EPR_AVS_APDO_MIN_VOLT GENMASK(15, 8) /* 100mV unit */ +#define PDO_EPR_AVS_APDO_PDP GENMASK(7, 0) /* 1W unit */ + +/* + * Applicable only SPR AVS APDO source cap as per + * Table 6.14 SPR Adjustable Voltage Supply APDO – Source + */ +#define PDO_SPR_AVS_APDO_PEAK_CURRENT GENMASK(27, 26) + +/* + * Applicable to both SPR AVS APDO source and sink cap as per + * Table 6.14 SPR Adjustable Voltage Supply APDO – Source + * Table 6.21 SPR Adjustable Voltage Supply APDO – Sink + */ +#define PDO_SPR_AVS_APDO_9V_TO_15V_MAX_CURR GENMASK(19, 10) /* 10mA unit */ +#define PDO_SPR_AVS_APDO_15V_TO_20V_MAX_CURR GENMASK(9, 0) /* 10mA unit */ + static inline enum pd_pdo_type pdo_type(u32 pdo) { return (pdo >> PDO_TYPE_SHIFT) & PDO_TYPE_MASK; @@ -350,6 +382,41 @@ static inline unsigned int pdo_pps_apdo_max_current(u32 pdo) PDO_PPS_APDO_CURR_MASK) * 50; } +static inline unsigned int pdo_epr_avs_apdo_src_peak_current(u32 pdo) +{ + return FIELD_GET(PDO_EPR_AVS_APDO_PEAK_CURRENT, pdo); +} + +static inline unsigned int pdo_epr_avs_apdo_min_voltage_mv(u32 pdo) +{ + return FIELD_GET(PDO_EPR_AVS_APDO_MIN_VOLT, pdo) * 100; +} + +static inline unsigned int pdo_epr_avs_apdo_max_voltage_mv(u32 pdo) +{ + return FIELD_GET(PDO_EPR_AVS_APDO_MIN_VOLT, pdo) * 100; +} + +static inline unsigned int pdo_epr_avs_apdo_pdp_w(u32 pdo) +{ + return FIELD_GET(PDO_EPR_AVS_APDO_PDP, pdo); +} + +static inline unsigned int pdo_spr_avs_apdo_src_peak_current(u32 pdo) +{ + return FIELD_GET(PDO_SPR_AVS_APDO_PEAK_CURRENT, pdo); +} + +static inline unsigned int pdo_spr_avs_apdo_9v_to_15v_max_current_ma(u32 pdo) +{ + return FIELD_GET(PDO_SPR_AVS_APDO_9V_TO_15V_MAX_CURR, pdo) * 10; +} + +static inline unsigned int pdo_spr_avs_apdo_15v_to_20v_max_current_ma(u32 pdo) +{ + return FIELD_GET(PDO_SPR_AVS_APDO_15V_TO_20V_MAX_CURR, pdo) * 10; +} + /* RDO: Request Data Object */ #define RDO_OBJ_POS_SHIFT 28 #define RDO_OBJ_POS_MASK 0x7 diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index 252af3f77039..309251572e2e 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -337,6 +337,7 @@ struct typec_plug *typec_register_plug(struct typec_cable *cable, void typec_unregister_plug(struct typec_plug *plug); void typec_set_data_role(struct typec_port *port, enum typec_data_role role); +enum typec_data_role typec_get_data_role(struct typec_port *port); void typec_set_pwr_role(struct typec_port *port, enum typec_role role); void typec_set_vconn_role(struct typec_port *port, enum typec_role role); void typec_set_pwr_opmode(struct typec_port *port, enum typec_pwr_opmode mode); diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h index b3c0866ea70f..f7db3bd4c90e 100644 --- a/include/linux/usb/typec_altmode.h +++ b/include/linux/usb/typec_altmode.h @@ -173,6 +173,19 @@ typec_altmode_get_svdm_version(struct typec_altmode *altmode) } /** + * typec_altmode_get_data_role - Get port data role + * @altmode: Handle to the alternate mode + * + * Alt Mode drivers should only issue Enter Mode through the port if they are + * the DFP. + */ +static inline enum typec_data_role +typec_altmode_get_data_role(struct typec_altmode *altmode) +{ + return typec_get_data_role(typec_altmode2port(altmode)); +} + +/** * struct typec_altmode_driver - USB Type-C alternate mode device driver * @id_table: Null terminated array of SVIDs * @probe: Callback for device binding diff --git a/include/linux/usb/typec_tbt.h b/include/linux/usb/typec_tbt.h index 55dcea12082c..0b570f1b8bc8 100644 --- a/include/linux/usb/typec_tbt.h +++ b/include/linux/usb/typec_tbt.h @@ -55,6 +55,7 @@ struct typec_thunderbolt_data { /* TBT3 Device Enter Mode VDO bits */ #define TBT_ENTER_MODE_CABLE_SPEED(s) TBT_SET_CABLE_SPEED(s) +#define TBT_ENTER_MODE_UNI_DIR_LSRX BIT(23) #define TBT_ENTER_MODE_ACTIVE_CABLE BIT(24) #endif /* __USB_TYPEC_TBT_H */ diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h index 9373962aade9..2eb528058d0d 100644 --- a/include/linux/util_macros.h +++ b/include/linux/util_macros.h @@ -136,10 +136,10 @@ #define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL) /** - * to_user_ptr - cast a pointer passed as u64 from user space to void __user * + * u64_to_user_ptr - cast a pointer passed as u64 from user space to void __user * * @x: The u64 value from user space, usually via IOCTL * - * to_user_ptr() simply casts a pointer passed as u64 from user space to void + * u64_to_user_ptr() simply casts a pointer passed as u64 from user space to void * __user * correctly. Using this lets us get rid of all the tiresome casts. */ #define u64_to_user_ptr(x) \ diff --git a/include/linux/vmcore_info.h b/include/linux/vmcore_info.h index 37e003ae5262..e71518caacdf 100644 --- a/include/linux/vmcore_info.h +++ b/include/linux/vmcore_info.h @@ -5,6 +5,7 @@ #include <linux/linkage.h> #include <linux/elfcore.h> #include <linux/elf.h> +#include <uapi/linux/vmcore.h> #define CRASH_CORE_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4) #define CRASH_CORE_NOTE_NAME_BYTES ALIGN(sizeof(NN_PRSTATUS), 4) @@ -77,4 +78,11 @@ extern u32 *vmcoreinfo_note; Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type, void *data, size_t data_len); void final_note(Elf_Word *buf); + +#ifdef CONFIG_VMCORE_INFO +void hwerr_log_error_type(enum hwerr_error_type src); +#else +static inline void hwerr_log_error_type(enum hwerr_error_type src) {}; +#endif + #endif /* LINUX_VMCORE_INFO_H */ diff --git a/include/linux/xxhash.h b/include/linux/xxhash.h index 27f57eca8cb1..587122e2c29c 100644 --- a/include/linux/xxhash.h +++ b/include/linux/xxhash.h @@ -141,21 +141,7 @@ static inline unsigned long xxhash(const void *input, size_t length, */ /** - * struct xxh32_state - private xxh32 state, do not use members directly - */ -struct xxh32_state { - uint32_t total_len_32; - uint32_t large_len; - uint32_t v1; - uint32_t v2; - uint32_t v3; - uint32_t v4; - uint32_t mem32[4]; - uint32_t memsize; -}; - -/** - * struct xxh32_state - private xxh64 state, do not use members directly + * struct xxh64_state - private xxh64 state, do not use members directly */ struct xxh64_state { uint64_t total_len; @@ -168,16 +154,6 @@ struct xxh64_state { }; /** - * xxh32_reset() - reset the xxh32 state to start a new hashing operation - * - * @state: The xxh32 state to reset. - * @seed: Initialize the hash state with this seed. - * - * Call this function on any xxh32_state to prepare for a new hashing operation. - */ -void xxh32_reset(struct xxh32_state *state, uint32_t seed); - -/** * xxh64_reset() - reset the xxh64 state to start a new hashing operation * * @state: The xxh64 state to reset. @@ -210,24 +186,4 @@ int xxh64_update(struct xxh64_state *state, const void *input, size_t length); */ uint64_t xxh64_digest(const struct xxh64_state *state); -/*-************************** - * Utils - ***************************/ - -/** - * xxh32_copy_state() - copy the source state into the destination state - * - * @src: The source xxh32 state. - * @dst: The destination xxh32 state. - */ -void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src); - -/** - * xxh64_copy_state() - copy the source state into the destination state - * - * @src: The source xxh64 state. - * @dst: The destination xxh64 state. - */ -void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src); - #endif /* XXHASH_H */ |
