diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/atomic.h | 4 | ||||
-rw-r--r-- | include/asm-generic/cmpxchg-local.h | 12 | ||||
-rw-r--r-- | include/asm-generic/cmpxchg.h | 6 | ||||
-rw-r--r-- | include/asm-generic/io.h | 16 | ||||
-rw-r--r-- | include/drm/gpu_scheduler.h | 7 | ||||
-rw-r--r-- | include/linux/context_tracking.h | 1 | ||||
-rw-r--r-- | include/linux/context_tracking_state.h | 2 | ||||
-rw-r--r-- | include/linux/cpuhotplug.h | 1 | ||||
-rw-r--r-- | include/linux/cpumask.h | 17 | ||||
-rw-r--r-- | include/linux/find.h | 37 | ||||
-rw-r--r-- | include/linux/ftrace.h | 2 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 11 | ||||
-rw-r--r-- | include/linux/kvm_irqfd.h | 2 | ||||
-rw-r--r-- | include/linux/percpu_counter.h | 6 | ||||
-rw-r--r-- | include/linux/phy.h | 2 | ||||
-rw-r--r-- | include/linux/phylink.h | 1 | ||||
-rw-r--r-- | include/linux/sfp.h | 5 | ||||
-rw-r--r-- | include/net/raw.h | 4 | ||||
-rw-r--r-- | include/trace/events/f2fs.h | 2 | ||||
-rw-r--r-- | include/trace/events/mmap.h | 4 | ||||
-rw-r--r-- | include/trace/events/rcu.h | 2 | ||||
-rw-r--r-- | include/trace/stages/stage5_get_offsets.h | 21 |
22 files changed, 117 insertions, 48 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 04b8be9f1a77..e271d6708c87 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -130,7 +130,7 @@ ATOMIC_OP(xor, ^) #define arch_atomic_read(v) READ_ONCE((v)->counter) #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) -#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v))) -#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new))) +#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (u32)(v))) +#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (u32)(old), (u32)(new))) #endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h index c3e7315b7c1d..3df9f59a544e 100644 --- a/include/asm-generic/cmpxchg-local.h +++ b/include/asm-generic/cmpxchg-local.h @@ -26,16 +26,16 @@ static inline unsigned long __generic_cmpxchg_local(volatile void *ptr, raw_local_irq_save(flags); switch (size) { case 1: prev = *(u8 *)ptr; - if (prev == (u8)old) - *(u8 *)ptr = (u8)new; + if (prev == (old & 0xffu)) + *(u8 *)ptr = (new & 0xffu); break; case 2: prev = *(u16 *)ptr; - if (prev == (u16)old) - *(u16 *)ptr = (u16)new; + if (prev == (old & 0xffffu)) + *(u16 *)ptr = (new & 0xffffu); break; case 4: prev = *(u32 *)ptr; - if (prev == (u32)old) - *(u32 *)ptr = (u32)new; + if (prev == (old & 0xffffffffffu)) + *(u32 *)ptr = (new & 0xffffffffu); break; case 8: prev = *(u64 *)ptr; if (prev == old) diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h index dca4419922a9..848de25fc4bf 100644 --- a/include/asm-generic/cmpxchg.h +++ b/include/asm-generic/cmpxchg.h @@ -32,7 +32,7 @@ unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size) #else local_irq_save(flags); ret = *(volatile u8 *)ptr; - *(volatile u8 *)ptr = x; + *(volatile u8 *)ptr = (x & 0xffu); local_irq_restore(flags); return ret; #endif /* __xchg_u8 */ @@ -43,7 +43,7 @@ unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size) #else local_irq_save(flags); ret = *(volatile u16 *)ptr; - *(volatile u16 *)ptr = x; + *(volatile u16 *)ptr = (x & 0xffffu); local_irq_restore(flags); return ret; #endif /* __xchg_u16 */ @@ -54,7 +54,7 @@ unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size) #else local_irq_save(flags); ret = *(volatile u32 *)ptr; - *(volatile u32 *)ptr = x; + *(volatile u32 *)ptr = (x & 0xffffffffu); local_irq_restore(flags); return ret; #endif /* __xchg_u32 */ diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 4c44a29b5e8e..587e7e9b9a37 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -236,7 +236,7 @@ static inline u64 readq(const volatile void __iomem *addr) log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); __io_br(); - val = __le64_to_cpu(__raw_readq(addr)); + val = __le64_to_cpu((__le64 __force)__raw_readq(addr)); __io_ar(val); log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); return val; @@ -287,7 +287,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr) { log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); __io_bw(); - __raw_writeq(__cpu_to_le64(value), addr); + __raw_writeq((u64 __force)__cpu_to_le64(value), addr); __io_aw(); log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); } @@ -319,7 +319,7 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr) u16 val; log_read_mmio(16, addr, _THIS_IP_, _RET_IP_); - val = __le16_to_cpu(__raw_readw(addr)); + val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_); return val; } @@ -332,7 +332,7 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr) u32 val; log_read_mmio(32, addr, _THIS_IP_, _RET_IP_); - val = __le32_to_cpu(__raw_readl(addr)); + val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_); return val; } @@ -345,7 +345,7 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr) u64 val; log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); - val = __le64_to_cpu(__raw_readq(addr)); + val = __le64_to_cpu((__le64 __force)__raw_readq(addr)); log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); return val; } @@ -366,7 +366,7 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) static inline void writew_relaxed(u16 value, volatile void __iomem *addr) { log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); - __raw_writew(cpu_to_le16(value), addr); + __raw_writew((u16 __force)cpu_to_le16(value), addr); log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); } #endif @@ -376,7 +376,7 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr) static inline void writel_relaxed(u32 value, volatile void __iomem *addr) { log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); - __raw_writel(__cpu_to_le32(value), addr); + __raw_writel((u32 __force)__cpu_to_le32(value), addr); log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); } #endif @@ -386,7 +386,7 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr) static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) { log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); - __raw_writeq(__cpu_to_le64(value), addr); + __raw_writeq((u64 __force)__cpu_to_le64(value), addr); log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); } #endif diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 9db9e5e504ee..9935d1e2ff69 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -228,13 +228,6 @@ struct drm_sched_entity { */ struct rb_node rb_tree_node; - /** - * @elapsed_ns: - * - * Records the amount of time where jobs from this entity were active - * on the GPU. - */ - uint64_t elapsed_ns; }; /** diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index d4afa8508a80..3a7909ed5498 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -96,6 +96,7 @@ static inline void user_exit_irqoff(void) { } static inline int exception_enter(void) { return 0; } static inline void exception_exit(enum ctx_state prev_ctx) { } static inline int ct_state(void) { return -1; } +static inline int __ct_state(void) { return -1; } static __always_inline bool context_tracking_guest_enter(void) { return false; } static inline void context_tracking_guest_exit(void) { } #define CT_WARN_ON(cond) do { } while (0) diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h index 4a4d56f77180..fdd537ea513f 100644 --- a/include/linux/context_tracking_state.h +++ b/include/linux/context_tracking_state.h @@ -46,7 +46,9 @@ struct context_tracking { #ifdef CONFIG_CONTEXT_TRACKING DECLARE_PER_CPU(struct context_tracking, context_tracking); +#endif +#ifdef CONFIG_CONTEXT_TRACKING_USER static __always_inline int __ct_state(void) { return arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK; diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index c6fab004104a..5b2f8147d1ae 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -218,7 +218,6 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_CQM_ONLINE, CPUHP_AP_PERF_X86_CSTATE_ONLINE, CPUHP_AP_PERF_X86_IDXD_ONLINE, - CPUHP_AP_PERF_X86_IOMMU_PERF_ONLINE, CPUHP_AP_PERF_S390_CF_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE, diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index d4901ca8883c..ca736b05ec7b 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -351,6 +351,23 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta for_each_andnot_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits) /** + * for_each_cpu_or - iterate over every cpu present in either mask + * @cpu: the (optionally unsigned) integer iterator + * @mask1: the first cpumask pointer + * @mask2: the second cpumask pointer + * + * This saves a temporary CPU mask in many places. It is equivalent to: + * struct cpumask tmp; + * cpumask_or(&tmp, &mask1, &mask2); + * for_each_cpu(cpu, &tmp) + * ... + * + * After the loop, cpu is >= nr_cpu_ids. + */ +#define for_each_cpu_or(cpu, mask1, mask2) \ + for_each_or_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits) + +/** * cpumask_any_but - return a "random" in a cpumask, but not this one. * @mask: the cpumask to search * @cpu: the cpu to ignore. diff --git a/include/linux/find.h b/include/linux/find.h index 4647864a5ffd..5e4f39ef2e72 100644 --- a/include/linux/find.h +++ b/include/linux/find.h @@ -14,6 +14,8 @@ unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long unsigned long nbits, unsigned long start); unsigned long _find_next_andnot_bit(const unsigned long *addr1, const unsigned long *addr2, unsigned long nbits, unsigned long start); +unsigned long _find_next_or_bit(const unsigned long *addr1, const unsigned long *addr2, + unsigned long nbits, unsigned long start); unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits, unsigned long start); extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size); @@ -127,6 +129,36 @@ unsigned long find_next_andnot_bit(const unsigned long *addr1, } #endif +#ifndef find_next_or_bit +/** + * find_next_or_bit - find the next set bit in either memory regions + * @addr1: The first address to base the search on + * @addr2: The second address to base the search on + * @size: The bitmap size in bits + * @offset: The bitnumber to start searching at + * + * Returns the bit number for the next set bit + * If no bits are set, returns @size. + */ +static inline +unsigned long find_next_or_bit(const unsigned long *addr1, + const unsigned long *addr2, unsigned long size, + unsigned long offset) +{ + if (small_const_nbits(size)) { + unsigned long val; + + if (unlikely(offset >= size)) + return size; + + val = (*addr1 | *addr2) & GENMASK(size - 1, offset); + return val ? __ffs(val) : size; + } + + return _find_next_or_bit(addr1, addr2, size, offset); +} +#endif + #ifndef find_next_zero_bit /** * find_next_zero_bit - find the next cleared bit in a memory region @@ -536,6 +568,11 @@ unsigned long find_next_bit_le(const void *addr, unsigned (bit) = find_next_andnot_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\ (bit)++) +#define for_each_or_bit(bit, addr1, addr2, size) \ + for ((bit) = 0; \ + (bit) = find_next_or_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\ + (bit)++) + /* same as for_each_set_bit() but use bit as value to start with */ #define for_each_set_bit_from(bit, addr, size) \ for (; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 366c730beaa3..402fc061de75 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -980,7 +980,7 @@ static inline void __ftrace_enabled_restore(int enabled) #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) -static inline unsigned long get_lock_parent_ip(void) +static __always_inline unsigned long get_lock_parent_ip(void) { unsigned long addr = CALLER_ADDR0; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 8ada23756b0e..a9adf75344be 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -755,6 +755,7 @@ struct kvm { struct { spinlock_t lock; struct list_head items; + /* resampler_list update side is protected by resampler_lock. */ struct list_head resampler_list; struct mutex resampler_lock; } irqfds; @@ -1986,6 +1987,9 @@ int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); #ifdef CONFIG_HAVE_KVM_IRQFD int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); void kvm_irqfd_release(struct kvm *kvm); +bool kvm_notify_irqfd_resampler(struct kvm *kvm, + unsigned int irqchip, + unsigned int pin); void kvm_irq_routing_update(struct kvm *); #else static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) @@ -1994,6 +1998,13 @@ static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) } static inline void kvm_irqfd_release(struct kvm *kvm) {} + +static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm, + unsigned int irqchip, + unsigned int pin) +{ + return false; +} #endif #else diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h index dac047abdba7..8ad43692e3bb 100644 --- a/include/linux/kvm_irqfd.h +++ b/include/linux/kvm_irqfd.h @@ -31,7 +31,7 @@ struct kvm_kernel_irqfd_resampler { /* * Entry in list of kvm->irqfd.resampler_list. Use for sharing * resamplers among irqfds on the same gsi. - * Accessed and modified under kvm->irqfds.resampler_lock + * RCU list modified under kvm->irqfds.resampler_lock */ struct list_head link; }; diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 521a733e21a9..75b73c83bc9d 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -45,7 +45,6 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount); void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch); s64 __percpu_counter_sum(struct percpu_counter *fbc); -s64 percpu_counter_sum_all(struct percpu_counter *fbc); int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); void percpu_counter_sync(struct percpu_counter *fbc); @@ -196,11 +195,6 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc) return percpu_counter_read(fbc); } -static inline s64 percpu_counter_sum_all(struct percpu_counter *fbc) -{ - return percpu_counter_read(fbc); -} - static inline bool percpu_counter_initialized(struct percpu_counter *fbc) { return true; diff --git a/include/linux/phy.h b/include/linux/phy.h index 36bf0bbc8efa..db7c0bd67559 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1547,7 +1547,7 @@ int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id); struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode); struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode); struct phy_device *device_phy_find_device(struct device *dev); -struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode); +struct fwnode_handle *fwnode_get_phy_node(const struct fwnode_handle *fwnode); struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); int phy_device_register(struct phy_device *phy); void phy_device_free(struct phy_device *phydev); diff --git a/include/linux/phylink.h b/include/linux/phylink.h index c492c26202b5..637698ed5cb6 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -574,6 +574,7 @@ struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *, phy_interface_t iface, const struct phylink_mac_ops *mac_ops); void phylink_destroy(struct phylink *); +bool phylink_expects_phy(struct phylink *pl); int phylink_connect_phy(struct phylink *, struct phy_device *); int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags); diff --git a/include/linux/sfp.h b/include/linux/sfp.h index 52b98f9666a2..ef06a195b3c2 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -557,7 +557,7 @@ int sfp_get_module_eeprom_by_page(struct sfp_bus *bus, void sfp_upstream_start(struct sfp_bus *bus); void sfp_upstream_stop(struct sfp_bus *bus); void sfp_bus_put(struct sfp_bus *bus); -struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode); +struct sfp_bus *sfp_bus_find_fwnode(const struct fwnode_handle *fwnode); int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream, const struct sfp_upstream_ops *ops); void sfp_bus_del_upstream(struct sfp_bus *bus); @@ -619,7 +619,8 @@ static inline void sfp_bus_put(struct sfp_bus *bus) { } -static inline struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode) +static inline struct sfp_bus * +sfp_bus_find_fwnode(const struct fwnode_handle *fwnode) { return NULL; } diff --git a/include/net/raw.h b/include/net/raw.h index 2c004c20ed99..3af5289fdead 100644 --- a/include/net/raw.h +++ b/include/net/raw.h @@ -37,7 +37,7 @@ int raw_rcv(struct sock *, struct sk_buff *); struct raw_hashinfo { spinlock_t lock; - struct hlist_nulls_head ht[RAW_HTABLE_SIZE] ____cacheline_aligned; + struct hlist_head ht[RAW_HTABLE_SIZE] ____cacheline_aligned; }; static inline u32 raw_hashfunc(const struct net *net, u32 proto) @@ -51,7 +51,7 @@ static inline void raw_hashinfo_init(struct raw_hashinfo *hashinfo) spin_lock_init(&hashinfo->lock); for (i = 0; i < RAW_HTABLE_SIZE; i++) - INIT_HLIST_NULLS_HEAD(&hashinfo->ht[i], i); + INIT_HLIST_HEAD(&hashinfo->ht[i]); } #ifdef CONFIG_PROC_FS diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 1322d34a5dfc..99cbc5949e3c 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -512,7 +512,7 @@ TRACE_EVENT(f2fs_truncate_partial_nodes, TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) - __field(nid_t, nid[3]) + __array(nid_t, nid, 3) __field(int, depth) __field(int, err) ), diff --git a/include/trace/events/mmap.h b/include/trace/events/mmap.h index 216de5f03621..f8d61485de16 100644 --- a/include/trace/events/mmap.h +++ b/include/trace/events/mmap.h @@ -35,7 +35,7 @@ TRACE_EVENT(vm_unmapped_area, __entry->align_offset = info->align_offset; ), - TP_printk("addr=0x%lx err=%ld total_vm=0x%lx flags=0x%lx len=0x%lx lo=0x%lx hi=0x%lx mask=0x%lx ofs=0x%lx\n", + TP_printk("addr=0x%lx err=%ld total_vm=0x%lx flags=0x%lx len=0x%lx lo=0x%lx hi=0x%lx mask=0x%lx ofs=0x%lx", IS_ERR_VALUE(__entry->addr) ? 0 : __entry->addr, IS_ERR_VALUE(__entry->addr) ? __entry->addr : 0, __entry->total_vm, __entry->flags, __entry->length, @@ -110,7 +110,7 @@ TRACE_EVENT(exit_mmap, __entry->mt = &mm->mm_mt; ), - TP_printk("mt_mod %p, DESTROY\n", + TP_printk("mt_mod %p, DESTROY", __entry->mt ) ); diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 90b2fb0292cb..012fa0d171b2 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -768,7 +768,7 @@ TRACE_EVENT_RCU(rcu_torture_read, TP_ARGS(rcutorturename, rhp, secs, c_old, c), TP_STRUCT__entry( - __field(char, rcutorturename[RCUTORTURENAME_LEN]) + __array(char, rcutorturename, RCUTORTURENAME_LEN) __field(struct rcu_head *, rhp) __field(unsigned long, secs) __field(unsigned long, c_old) diff --git a/include/trace/stages/stage5_get_offsets.h b/include/trace/stages/stage5_get_offsets.h index ac5c24d3beeb..e30a13be46ba 100644 --- a/include/trace/stages/stage5_get_offsets.h +++ b/include/trace/stages/stage5_get_offsets.h @@ -9,17 +9,30 @@ #undef __entry #define __entry entry +/* + * Fields should never declare an array: i.e. __field(int, arr[5]) + * If they do, it will cause issues in parsing and possibly corrupt the + * events. To prevent that from happening, test the sizeof() a fictitious + * type called "struct _test_no_array_##item" which will fail if "item" + * contains array elements (like "arr[5]"). + * + * If you hit this, use __array(int, arr, 5) instead. + */ #undef __field -#define __field(type, item) +#define __field(type, item) \ + { (void)sizeof(struct _test_no_array_##item *); } #undef __field_ext -#define __field_ext(type, item, filter_type) +#define __field_ext(type, item, filter_type) \ + { (void)sizeof(struct _test_no_array_##item *); } #undef __field_struct -#define __field_struct(type, item) +#define __field_struct(type, item) \ + { (void)sizeof(struct _test_no_array_##item *); } #undef __field_struct_ext -#define __field_struct_ext(type, item, filter_type) +#define __field_struct_ext(type, item, filter_type) \ + { (void)sizeof(struct _test_no_array_##item *); } #undef __array #define __array(type, item, len) |