diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 2 | ||||
-rw-r--r-- | include/drm/drm_file.h | 3 | ||||
-rw-r--r-- | include/linux/can/can-ml.h | 12 | ||||
-rw-r--r-- | include/linux/dma-fence.h | 3 | ||||
-rw-r--r-- | include/linux/dma-heap.h | 12 | ||||
-rw-r--r-- | include/linux/icmpv6.h | 26 | ||||
-rw-r--r-- | include/linux/ipv6.h | 1 | ||||
-rw-r--r-- | include/linux/netdevice.h | 34 | ||||
-rw-r--r-- | include/linux/nfs_fs.h | 3 | ||||
-rw-r--r-- | include/linux/nfs_fs_sb.h | 4 | ||||
-rw-r--r-- | include/linux/platform_profile.h | 6 | ||||
-rw-r--r-- | include/linux/vdpa.h | 44 | ||||
-rw-r--r-- | include/linux/virtio_pci_modern.h | 111 | ||||
-rw-r--r-- | include/net/icmp.h | 6 | ||||
-rw-r--r-- | include/trace/events/rpcrdma.h | 50 | ||||
-rw-r--r-- | include/uapi/linux/vdpa.h | 40 |
16 files changed, 329 insertions, 28 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index fbc0ca8d903b..6786f8c0182f 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -98,7 +98,7 @@ */ #if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG) #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* -#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$Lubsan_* +#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L* #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L* #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral* diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index 716990bace10..b81b3bfb08c8 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -399,6 +399,9 @@ void drm_event_cancel_free(struct drm_device *dev, struct drm_pending_event *p); void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e); void drm_send_event(struct drm_device *dev, struct drm_pending_event *e); +void drm_send_event_timestamp_locked(struct drm_device *dev, + struct drm_pending_event *e, + ktime_t timestamp); struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags); diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h index 2f5d731ae251..8afa92d15a66 100644 --- a/include/linux/can/can-ml.h +++ b/include/linux/can/can-ml.h @@ -44,6 +44,7 @@ #include <linux/can.h> #include <linux/list.h> +#include <linux/netdevice.h> #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS) #define CAN_EFF_RCV_HASH_BITS 10 @@ -65,4 +66,15 @@ struct can_ml_priv { #endif }; +static inline struct can_ml_priv *can_get_ml_priv(struct net_device *dev) +{ + return netdev_get_ml_priv(dev, ML_PRIV_CAN); +} + +static inline void can_set_ml_priv(struct net_device *dev, + struct can_ml_priv *ml_priv) +{ + netdev_set_ml_priv(dev, ml_priv, ML_PRIV_CAN); +} + #endif /* CAN_ML_H */ diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 09e23adb351d..9f12efaaa93a 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -372,6 +372,9 @@ static inline void __dma_fence_might_wait(void) {} int dma_fence_signal(struct dma_fence *fence); int dma_fence_signal_locked(struct dma_fence *fence); +int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp); +int dma_fence_signal_timestamp_locked(struct dma_fence *fence, + ktime_t timestamp); signed long dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout); int dma_fence_add_callback(struct dma_fence *fence, diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h index 454e354d1ffb..5bc5c946af58 100644 --- a/include/linux/dma-heap.h +++ b/include/linux/dma-heap.h @@ -16,15 +16,15 @@ struct dma_heap; /** * struct dma_heap_ops - ops to operate on a given heap - * @allocate: allocate dmabuf and return fd + * @allocate: allocate dmabuf and return struct dma_buf ptr * - * allocate returns dmabuf fd on success, -errno on error. + * allocate returns dmabuf on success, ERR_PTR(-errno) on error. */ struct dma_heap_ops { - int (*allocate)(struct dma_heap *heap, - unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags); + struct dma_buf *(*allocate)(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags); }; /** diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index 452d8978ffc7..9055cb380ee2 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -3,6 +3,7 @@ #define _LINUX_ICMPV6_H #include <linux/skbuff.h> +#include <linux/ipv6.h> #include <uapi/linux/icmpv6.h> static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) @@ -15,13 +16,16 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) #if IS_ENABLED(CONFIG_IPV6) typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info, - const struct in6_addr *force_saddr); + const struct in6_addr *force_saddr, + const struct inet6_skb_parm *parm); void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, - const struct in6_addr *force_saddr); + const struct in6_addr *force_saddr, + const struct inet6_skb_parm *parm); #if IS_BUILTIN(CONFIG_IPV6) -static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) +static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct inet6_skb_parm *parm) { - icmp6_send(skb, type, code, info, NULL); + icmp6_send(skb, type, code, info, NULL, parm); } static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn) { @@ -34,18 +38,28 @@ static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn) return 0; } #else -extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); +extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct inet6_skb_parm *parm); extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn); extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn); #endif +static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) +{ + __icmpv6_send(skb, type, code, info, IP6CB(skb)); +} + int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, unsigned int data_len); #if IS_ENABLED(CONFIG_NF_NAT) void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info); #else -#define icmpv6_ndo_send icmpv6_send +static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) +{ + struct inet6_skb_parm parm = { 0 }; + __icmpv6_send(skb_in, type, code, info, &parm); +} #endif #else diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 9d1f29f0c512..70b2ad3b9884 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -85,7 +85,6 @@ struct ipv6_params { __s32 autoconf; }; extern struct ipv6_params ipv6_defaults; -#include <linux/icmpv6.h> #include <linux/tcp.h> #include <linux/udp.h> diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ddf4cfc12615..f06fbee8638e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1584,6 +1584,12 @@ enum netdev_priv_flags { #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK +/* Specifies the type of the struct net_device::ml_priv pointer */ +enum netdev_ml_priv_type { + ML_PRIV_NONE, + ML_PRIV_CAN, +}; + /** * struct net_device - The DEVICE structure. * @@ -1779,6 +1785,7 @@ enum netdev_priv_flags { * @nd_net: Network namespace this network device is inside * * @ml_priv: Mid-layer private + * @ml_priv_type: Mid-layer private type * @lstats: Loopback statistics * @tstats: Tunnel statistics * @dstats: Dummy statistics @@ -2094,8 +2101,10 @@ struct net_device { possible_net_t nd_net; /* mid-layer private */ + void *ml_priv; + enum netdev_ml_priv_type ml_priv_type; + union { - void *ml_priv; struct pcpu_lstats __percpu *lstats; struct pcpu_sw_netstats __percpu *tstats; struct pcpu_dstats __percpu *dstats; @@ -2286,6 +2295,29 @@ static inline void netdev_reset_rx_headroom(struct net_device *dev) netdev_set_rx_headroom(dev, -1); } +static inline void *netdev_get_ml_priv(struct net_device *dev, + enum netdev_ml_priv_type type) +{ + if (dev->ml_priv_type != type) + return NULL; + + return dev->ml_priv; +} + +static inline void netdev_set_ml_priv(struct net_device *dev, + void *ml_priv, + enum netdev_ml_priv_type type) +{ + WARN(dev->ml_priv_type && dev->ml_priv_type != type, + "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n", + dev->ml_priv_type, type); + WARN(!dev->ml_priv_type && dev->ml_priv, + "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n"); + + dev->ml_priv = ml_priv; + dev->ml_priv_type = type; +} + /* * Net namespace inlines */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 8c6c4e32fc2f..eadaabd18dc7 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -388,6 +388,7 @@ extern int nfs_open(struct inode *, struct file *); extern int nfs_attribute_cache_expired(struct inode *inode); extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); +extern int nfs_clear_invalid_mapping(struct address_space *mapping); extern bool nfs_mapping_need_revalidate_inode(struct inode *inode); extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); extern int nfs_revalidate_mapping_rcu(struct inode *inode); @@ -571,8 +572,6 @@ nfs_have_writebacks(struct inode *inode) extern int nfs_readpage(struct file *, struct page *); extern int nfs_readpages(struct file *, struct address_space *, struct list_head *, unsigned); -extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, - struct page *); /* * inline functions diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 38e60ec742df..6f76b32a0238 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -142,7 +142,7 @@ struct nfs_server { struct nlm_host *nlm_host; /* NLM client handle */ struct nfs_iostats __percpu *io_stats; /* I/O statistics */ atomic_long_t writeback; /* number of writeback pages */ - int flags; /* various flags */ + unsigned int flags; /* various flags */ /* The following are for internal use only. Also see uapi/linux/nfs_mount.h */ #define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000 @@ -153,6 +153,8 @@ struct nfs_server { #define NFS_MOUNT_LOCAL_FCNTL 0x200000 #define NFS_MOUNT_SOFTERR 0x400000 #define NFS_MOUNT_SOFTREVAL 0x800000 +#define NFS_MOUNT_WRITE_EAGER 0x01000000 +#define NFS_MOUNT_WRITE_WAIT 0x02000000 unsigned int caps; /* server capabilities */ unsigned int rsize; /* read size */ diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h index a26542d53058..a6329003aee7 100644 --- a/include/linux/platform_profile.h +++ b/include/linux/platform_profile.h @@ -12,9 +12,8 @@ #include <linux/bitops.h> /* - * If more options are added please update profile_names - * array in platform-profile.c and sysfs-platform-profile.rst - * documentation. + * If more options are added please update profile_names array in + * platform_profile.c and sysfs-platform_profile documentation. */ enum platform_profile_option { @@ -22,6 +21,7 @@ enum platform_profile_option { PLATFORM_PROFILE_COOL, PLATFORM_PROFILE_QUIET, PLATFORM_PROFILE_BALANCED, + PLATFORM_PROFILE_BALANCED_PERFORMANCE, PLATFORM_PROFILE_PERFORMANCE, PLATFORM_PROFILE_LAST, /*must always be last */ }; diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 0fefeb976877..4ab5494503a8 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -35,6 +35,8 @@ struct vdpa_vq_state { u16 avail_index; }; +struct vdpa_mgmt_dev; + /** * vDPA device - representation of a vDPA device * @dev: underlying device @@ -43,6 +45,8 @@ struct vdpa_vq_state { * @index: device index * @features_valid: were features initialized? for legacy guests * @nvqs: maximum number of supported virtqueues + * @mdev: management device pointer; caller must setup when registering device as part + * of dev_add() mgmtdev ops callback before invoking _vdpa_register_device(). */ struct vdpa_device { struct device dev; @@ -51,6 +55,7 @@ struct vdpa_device { unsigned int index; bool features_valid; int nvqs; + struct vdpa_mgmt_dev *mdev; }; /** @@ -245,20 +250,22 @@ struct vdpa_config_ops { struct vdpa_device *__vdpa_alloc_device(struct device *parent, const struct vdpa_config_ops *config, - int nvqs, - size_t size); + int nvqs, size_t size, const char *name); -#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs) \ +#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs, name) \ container_of(__vdpa_alloc_device( \ parent, config, nvqs, \ sizeof(dev_struct) + \ BUILD_BUG_ON_ZERO(offsetof( \ - dev_struct, member))), \ + dev_struct, member)), name), \ dev_struct, member) int vdpa_register_device(struct vdpa_device *vdev); void vdpa_unregister_device(struct vdpa_device *vdev); +int _vdpa_register_device(struct vdpa_device *vdev); +void _vdpa_unregister_device(struct vdpa_device *vdev); + /** * vdpa_driver - operations for a vDPA driver * @driver: underlying device driver @@ -336,4 +343,33 @@ static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset, ops->get_config(vdev, offset, buf, len); } +/** + * vdpa_mgmtdev_ops - vdpa device ops + * @dev_add: Add a vdpa device using alloc and register + * @mdev: parent device to use for device addition + * @name: name of the new vdpa device + * Driver need to add a new device using _vdpa_register_device() + * after fully initializing the vdpa device. Driver must return 0 + * on success or appropriate error code. + * @dev_del: Remove a vdpa device using unregister + * @mdev: parent device to use for device removal + * @dev: vdpa device to remove + * Driver need to remove the specified device by calling + * _vdpa_unregister_device(). + */ +struct vdpa_mgmtdev_ops { + int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name); + void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev); +}; + +struct vdpa_mgmt_dev { + struct device *device; + const struct vdpa_mgmtdev_ops *ops; + const struct virtio_device_id *id_table; /* supported ids */ + struct list_head list; +}; + +int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev); +void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev); + #endif /* _LINUX_VDPA_H */ diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h new file mode 100644 index 000000000000..f26acbeec965 --- /dev/null +++ b/include/linux/virtio_pci_modern.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_VIRTIO_PCI_MODERN_H +#define _LINUX_VIRTIO_PCI_MODERN_H + +#include <linux/pci.h> +#include <linux/virtio_pci.h> + +struct virtio_pci_modern_device { + struct pci_dev *pci_dev; + + struct virtio_pci_common_cfg __iomem *common; + /* Device-specific data (non-legacy mode) */ + void __iomem *device; + /* Base of vq notifications (non-legacy mode). */ + void __iomem *notify_base; + /* Where to read and clear interrupt */ + u8 __iomem *isr; + + /* So we can sanity-check accesses. */ + size_t notify_len; + size_t device_len; + + /* Capability for when we need to map notifications per-vq. */ + int notify_map_cap; + + /* Multiply queue_notify_off by this value. (non-legacy mode). */ + u32 notify_offset_multiplier; + + int modern_bars; + + struct virtio_device_id id; +}; + +/* + * Type-safe wrappers for io accesses. + * Use these to enforce at compile time the following spec requirement: + * + * The driver MUST access each field using the “natural” access + * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses + * for 16-bit fields and 8-bit accesses for 8-bit fields. + */ +static inline u8 vp_ioread8(const u8 __iomem *addr) +{ + return ioread8(addr); +} +static inline u16 vp_ioread16 (const __le16 __iomem *addr) +{ + return ioread16(addr); +} + +static inline u32 vp_ioread32(const __le32 __iomem *addr) +{ + return ioread32(addr); +} + +static inline void vp_iowrite8(u8 value, u8 __iomem *addr) +{ + iowrite8(value, addr); +} + +static inline void vp_iowrite16(u16 value, __le16 __iomem *addr) +{ + iowrite16(value, addr); +} + +static inline void vp_iowrite32(u32 value, __le32 __iomem *addr) +{ + iowrite32(value, addr); +} + +static inline void vp_iowrite64_twopart(u64 val, + __le32 __iomem *lo, + __le32 __iomem *hi) +{ + vp_iowrite32((u32)val, lo); + vp_iowrite32(val >> 32, hi); +} + +u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev); +void vp_modern_set_features(struct virtio_pci_modern_device *mdev, + u64 features); +u32 vp_modern_generation(struct virtio_pci_modern_device *mdev); +u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev); +void vp_modern_set_status(struct virtio_pci_modern_device *mdev, + u8 status); +u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev, + u16 idx, u16 vector); +u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev, + u16 vector); +void vp_modern_queue_address(struct virtio_pci_modern_device *mdev, + u16 index, u64 desc_addr, u64 driver_addr, + u64 device_addr); +void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev, + u16 idx, bool enable); +bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev, + u16 idx); +void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev, + u16 idx, u16 size); +u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev, + u16 idx); +u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev); +u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, + u16 idx); +void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, + size_t minlen, + u32 align, + u32 start, u32 size, + size_t *len); +int vp_modern_probe(struct virtio_pci_modern_device *mdev); +void vp_modern_remove(struct virtio_pci_modern_device *mdev); +#endif diff --git a/include/net/icmp.h b/include/net/icmp.h index 9ac2d2672a93..fd84adc47963 100644 --- a/include/net/icmp.h +++ b/include/net/icmp.h @@ -46,7 +46,11 @@ static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 #if IS_ENABLED(CONFIG_NF_NAT) void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info); #else -#define icmp_ndo_send icmp_send +static inline void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) +{ + struct ip_options opts = { 0 }; + __icmp_send(skb_in, type, code, info, &opts); +} #endif int icmp_rcv(struct sk_buff *skb); diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 76e85e16854b..c838e7ac1c2d 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -60,6 +60,51 @@ DECLARE_EVENT_CLASS(rpcrdma_completion_class, ), \ TP_ARGS(wc, cid)) +DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class, + TP_PROTO( + const struct ib_wc *wc, + const struct rpc_rdma_cid *cid + ), + + TP_ARGS(wc, cid), + + TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) + __field(u32, received) + __field(unsigned long, status) + __field(unsigned int, vendor_err) + ), + + TP_fast_assign( + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; + __entry->status = wc->status; + if (wc->status) { + __entry->received = 0; + __entry->vendor_err = wc->vendor_err; + } else { + __entry->received = wc->byte_len; + __entry->vendor_err = 0; + } + ), + + TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u", + __entry->cq_id, __entry->completion_id, + rdma_show_wc_status(__entry->status), + __entry->status, __entry->vendor_err, + __entry->received + ) +); + +#define DEFINE_RECEIVE_COMPLETION_EVENT(name) \ + DEFINE_EVENT(rpcrdma_receive_completion_class, name, \ + TP_PROTO( \ + const struct ib_wc *wc, \ + const struct rpc_rdma_cid *cid \ + ), \ + TP_ARGS(wc, cid)) + DECLARE_EVENT_CLASS(xprtrdma_reply_class, TP_PROTO( const struct rpcrdma_rep *rep @@ -838,7 +883,8 @@ TRACE_EVENT(xprtrdma_post_linv_err, ** Completion events **/ -DEFINE_COMPLETION_EVENT(xprtrdma_wc_receive); +DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive); + DEFINE_COMPLETION_EVENT(xprtrdma_wc_send); DEFINE_COMPLETION_EVENT(xprtrdma_wc_fastreg); DEFINE_COMPLETION_EVENT(xprtrdma_wc_li); @@ -1790,7 +1836,7 @@ TRACE_EVENT(svcrdma_post_recv, ) ); -DEFINE_COMPLETION_EVENT(svcrdma_wc_receive); +DEFINE_RECEIVE_COMPLETION_EVENT(svcrdma_wc_receive); TRACE_EVENT(svcrdma_rq_post_err, TP_PROTO( diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h new file mode 100644 index 000000000000..66a41e4ec163 --- /dev/null +++ b/include/uapi/linux/vdpa.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * vdpa device management interface + * Copyright (c) 2020 Mellanox Technologies Ltd. All rights reserved. + */ + +#ifndef _UAPI_LINUX_VDPA_H_ +#define _UAPI_LINUX_VDPA_H_ + +#define VDPA_GENL_NAME "vdpa" +#define VDPA_GENL_VERSION 0x1 + +enum vdpa_command { + VDPA_CMD_UNSPEC, + VDPA_CMD_MGMTDEV_NEW, + VDPA_CMD_MGMTDEV_GET, /* can dump */ + VDPA_CMD_DEV_NEW, + VDPA_CMD_DEV_DEL, + VDPA_CMD_DEV_GET, /* can dump */ +}; + +enum vdpa_attr { + VDPA_ATTR_UNSPEC, + + /* bus name (optional) + dev name together make the parent device handle */ + VDPA_ATTR_MGMTDEV_BUS_NAME, /* string */ + VDPA_ATTR_MGMTDEV_DEV_NAME, /* string */ + VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES, /* u64 */ + + VDPA_ATTR_DEV_NAME, /* string */ + VDPA_ATTR_DEV_ID, /* u32 */ + VDPA_ATTR_DEV_VENDOR_ID, /* u32 */ + VDPA_ATTR_DEV_MAX_VQS, /* u32 */ + VDPA_ATTR_DEV_MAX_VQ_SIZE, /* u16 */ + + /* new attributes must be added above here */ + VDPA_ATTR_MAX, +}; + +#endif |