diff options
Diffstat (limited to 'include/linux')
234 files changed, 15682 insertions, 1867 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index f102c0fe3431..1c5bb1e887cd 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -759,13 +759,13 @@ int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count) #endif #ifndef ACPI_HAVE_ARCH_SET_ROOT_POINTER -static inline void acpi_arch_set_root_pointer(u64 addr) +static __always_inline void acpi_arch_set_root_pointer(u64 addr) { } #endif #ifndef ACPI_HAVE_ARCH_GET_ROOT_POINTER -static inline u64 acpi_arch_get_root_pointer(void) +static __always_inline u64 acpi_arch_get_root_pointer(void) { return 0; } @@ -1503,7 +1503,7 @@ int acpi_parse_spcr(bool enable_earlycon, bool enable_console); #else static inline int acpi_parse_spcr(bool enable_earlycon, bool enable_console) { - return 0; + return -ENODEV; } #endif diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index 062fbd4c9b77..8cced632ecd0 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -12,20 +12,6 @@ struct amd_iommu; -/* - * This is mainly used to communicate information back-and-forth - * between SVM and IOMMU for setting up and tearing down posted - * interrupt - */ -struct amd_iommu_pi_data { - u32 ga_tag; - u32 prev_ga_tag; - u64 base; - bool is_guest_mode; - struct vcpu_data *vcpu_data; - void *ir_data; -}; - #ifdef CONFIG_AMD_IOMMU struct task_struct; @@ -44,10 +30,8 @@ static inline void amd_iommu_detect(void) { } /* IOMMU AVIC Function */ extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32)); -extern int -amd_iommu_update_ga(int cpu, bool is_run, void *data); - -extern int amd_iommu_activate_guest_mode(void *data); +extern int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr); +extern int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr); extern int amd_iommu_deactivate_guest_mode(void *data); #else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ @@ -58,13 +42,12 @@ amd_iommu_register_ga_log_notifier(int (*notifier)(u32)) return 0; } -static inline int -amd_iommu_update_ga(int cpu, bool is_run, void *data) +static inline int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr) { return 0; } -static inline int amd_iommu_activate_guest_mode(void *data) +static inline int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr) { return 0; } diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 784ebe4607a4..50b47eba7d01 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -113,7 +113,7 @@ /* KVM UID value: 28b46fb6-2ec5-11e9-a9ca-4b564d003a74 */ #define ARM_SMCCC_VENDOR_HYP_UID_KVM UUID_INIT(\ - 0xb66fb428, 0xc52e, 0xe911, \ + 0x28b46fb6, 0x2ec5, 0x11e9, \ 0xa9, 0xca, 0x4b, 0x56, \ 0x4d, 0x00, 0x3a, 0x74) diff --git a/include/linux/audit.h b/include/linux/audit.h index 0050ef288ab3..a394614ccd0b 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -417,7 +417,7 @@ extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm, extern void __audit_log_capset(const struct cred *new, const struct cred *old); extern void __audit_mmap_fd(int fd, int flags); extern void __audit_openat2_how(struct open_how *how); -extern void __audit_log_kern_module(char *name); +extern void __audit_log_kern_module(const char *name); extern void __audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar); extern void __audit_tk_injoffset(struct timespec64 offset); extern void __audit_ntp_log(const struct audit_ntp_data *ad); @@ -519,7 +519,7 @@ static inline void audit_openat2_how(struct open_how *how) __audit_openat2_how(how); } -static inline void audit_log_kern_module(char *name) +static inline void audit_log_kern_module(const char *name) { if (!audit_dummy_context()) __audit_log_kern_module(name); @@ -677,9 +677,8 @@ static inline void audit_mmap_fd(int fd, int flags) static inline void audit_openat2_how(struct open_how *how) { } -static inline void audit_log_kern_module(char *name) -{ -} +static inline void audit_log_kern_module(const char *name) +{ } static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar) { } diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index cf0afa60e4a7..5be1881abbb6 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -132,8 +132,8 @@ enum virtchnl_ops { VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, VIRTCHNL_OP_CONFIG_RSS_KEY = 23, VIRTCHNL_OP_CONFIG_RSS_LUT = 24, - VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, - VIRTCHNL_OP_SET_RSS_HENA = 26, + VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS = 25, + VIRTCHNL_OP_SET_RSS_HASHCFG = 26, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, VIRTCHNL_OP_REQUEST_QUEUES = 29, @@ -974,18 +974,19 @@ struct virtchnl_rss_lut { VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_lut); #define virtchnl_rss_lut_LEGACY_SIZEOF 6 -/* VIRTCHNL_OP_GET_RSS_HENA_CAPS - * VIRTCHNL_OP_SET_RSS_HENA - * VF sends these messages to get and set the hash filter enable bits for RSS. +/* VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS + * VIRTCHNL_OP_SET_RSS_HASHCFG + * VF sends these messages to get and set the hash filter configuration for RSS. * By default, the PF sets these to all possible traffic types that the * hardware supports. The VF can query this value if it wants to change the * traffic types that are hashed by the hardware. */ -struct virtchnl_rss_hena { - u64 hena; +struct virtchnl_rss_hashcfg { + /* Bits defined by enum libie_filter_pctype */ + u64 hashcfg; }; -VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hashcfg); /* Type of RSS algorithm */ enum virtchnl_rss_algorithm { @@ -1779,10 +1780,10 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, case VIRTCHNL_OP_CONFIG_RSS_HFUNC: valid_len = sizeof(struct virtchnl_rss_hfunc); break; - case VIRTCHNL_OP_GET_RSS_HENA_CAPS: + case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS: break; - case VIRTCHNL_OP_SET_RSS_HENA: - valid_len = sizeof(struct virtchnl_rss_hena); + case VIRTCHNL_OP_SET_RSS_HASHCFG: + valid_len = sizeof(struct virtchnl_rss_hashcfg); break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h index c7eae0bfb013..e67a2b6e8f11 100644 --- a/include/linux/blk-integrity.h +++ b/include/linux/blk-integrity.h @@ -29,11 +29,13 @@ int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf, ssize_t bytes); +int blk_get_meta_cap(struct block_device *bdev, unsigned int cmd, + struct logical_block_metadata_cap __user *argp); static inline bool blk_integrity_queue_supports_integrity(struct request_queue *q) { - return q->limits.integrity.tuple_size; + return q->limits.integrity.metadata_size; } static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) @@ -74,7 +76,7 @@ static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, unsigned int sectors) { - return bio_integrity_intervals(bi, sectors) * bi->tuple_size; + return bio_integrity_intervals(bi, sectors) * bi->metadata_size; } static inline bool blk_integrity_rq(struct request *rq) @@ -92,6 +94,11 @@ static inline struct bio_vec rq_integrity_vec(struct request *rq) rq->bio->bi_integrity->bip_iter); } #else /* CONFIG_BLK_DEV_INTEGRITY */ +static inline int blk_get_meta_cap(struct block_device *bdev, unsigned int cmd, + struct logical_block_metadata_cap __user *argp) +{ + return -ENOIOCTLCMD; +} static inline int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *b) { diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h new file mode 100644 index 000000000000..c26a01aeae00 --- /dev/null +++ b/include/linux/blk-mq-dma.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef BLK_MQ_DMA_H +#define BLK_MQ_DMA_H + +#include <linux/blk-mq.h> +#include <linux/pci-p2pdma.h> + +struct blk_dma_iter { + /* Output address range for this iteration */ + dma_addr_t addr; + u32 len; + + /* Status code. Only valid when blk_rq_dma_map_iter_* returned false */ + blk_status_t status; + + /* Internal to blk_rq_dma_map_iter_* */ + struct req_iterator iter; + struct pci_p2pdma_map_state p2pdma; +}; + +bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev, + struct dma_iova_state *state, struct blk_dma_iter *iter); +bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev, + struct dma_iova_state *state, struct blk_dma_iter *iter); + +/** + * blk_rq_dma_map_coalesce - were all segments coalesced? + * @state: DMA state to check + * + * Returns true if blk_rq_dma_map_iter_start coalesced all segments into a + * single DMA range. + */ +static inline bool blk_rq_dma_map_coalesce(struct dma_iova_state *state) +{ + return dma_use_iova(state); +} + +/** + * blk_rq_dma_unmap - try to DMA unmap a request + * @req: request to unmap + * @dma_dev: device to unmap from + * @state: DMA IOVA state + * @mapped_len: number of bytes to unmap + * + * Returns %false if the callers need to manually unmap every DMA segment + * mapped using @iter or %true if no work is left to be done. + */ +static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev, + struct dma_iova_state *state, size_t mapped_len) +{ + if (req->cmd_flags & REQ_P2PDMA) + return true; + + if (dma_use_iova(state)) { + dma_iova_destroy(dma_dev, state, mapped_len, rq_dma_dir(req), + 0); + return true; + } + + return !dma_need_unmap(dma_dev); +} + +#endif /* BLK_MQ_DMA_H */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index de8c85a03bb7..2a5a828f19a0 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -947,6 +947,8 @@ int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, void blk_mq_unfreeze_queue_non_owner(struct request_queue *q); void blk_freeze_queue_start_non_owner(struct request_queue *q); +unsigned int blk_mq_num_possible_queues(unsigned int max_queues); +unsigned int blk_mq_num_online_queues(unsigned int max_queues); void blk_mq_map_queues(struct blk_mq_queue_map *qmap); void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap, struct device *dev, unsigned int offset); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 3d1577f07c1c..09b99d52fd36 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -350,11 +350,11 @@ enum req_op { /* Close a zone */ REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11, /* Transition a zone to full */ - REQ_OP_ZONE_FINISH = (__force blk_opf_t)12, + REQ_OP_ZONE_FINISH = (__force blk_opf_t)13, /* reset a zone write pointer */ - REQ_OP_ZONE_RESET = (__force blk_opf_t)13, + REQ_OP_ZONE_RESET = (__force blk_opf_t)15, /* reset all the zone present on the device */ - REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)15, + REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17, /* Driver private requests */ REQ_OP_DRV_IN = (__force blk_opf_t)34, @@ -386,6 +386,7 @@ enum req_flag_bits { __REQ_DRV, /* for driver use */ __REQ_FS_PRIVATE, /* for file system (submitter) use */ __REQ_ATOMIC, /* for atomic write operations */ + __REQ_P2PDMA, /* contains P2P DMA pages */ /* * Command specific flags, keep last: */ @@ -418,6 +419,7 @@ enum req_flag_bits { #define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV) #define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE) #define REQ_ATOMIC (__force blk_opf_t)(1ULL << __REQ_ATOMIC) +#define REQ_P2PDMA (__force blk_opf_t)(1ULL << __REQ_P2PDMA) #define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 181a0deadc9e..95886b404b16 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -116,10 +116,11 @@ enum blk_integrity_checksum { struct blk_integrity { unsigned char flags; enum blk_integrity_checksum csum_type; - unsigned char tuple_size; + unsigned char metadata_size; unsigned char pi_offset; unsigned char interval_exp; unsigned char tag_size; + unsigned char pi_tuple_size; }; typedef unsigned int __bitwise blk_mode_t; @@ -388,6 +389,9 @@ struct queue_limits { unsigned int max_user_discard_sectors; unsigned int max_secure_erase_sectors; unsigned int max_write_zeroes_sectors; + unsigned int max_wzeroes_unmap_sectors; + unsigned int max_hw_wzeroes_unmap_sectors; + unsigned int max_user_wzeroes_unmap_sectors; unsigned int max_hw_zone_append_sectors; unsigned int max_zone_append_sectors; unsigned int discard_granularity; @@ -842,6 +846,55 @@ static inline unsigned int disk_nr_zones(struct gendisk *disk) { return disk->nr_zones; } + +/** + * bio_needs_zone_write_plugging - Check if a BIO needs to be handled with zone + * write plugging + * @bio: The BIO being submitted + * + * Return true whenever @bio execution needs to be handled through zone + * write plugging (using blk_zone_plug_bio()). Return false otherwise. + */ +static inline bool bio_needs_zone_write_plugging(struct bio *bio) +{ + enum req_op op = bio_op(bio); + + /* + * Only zoned block devices have a zone write plug hash table. But not + * all of them have one (e.g. DM devices may not need one). + */ + if (!bio->bi_bdev->bd_disk->zone_wplugs_hash) + return false; + + /* Only write operations need zone write plugging. */ + if (!op_is_write(op)) + return false; + + /* Ignore empty flush */ + if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) + return false; + + /* Ignore BIOs that already have been handled by zone write plugging. */ + if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING)) + return false; + + /* + * All zone write operations must be handled through zone write plugging + * using blk_zone_plug_bio(). + */ + switch (op) { + case REQ_OP_ZONE_APPEND: + case REQ_OP_WRITE: + case REQ_OP_WRITE_ZEROES: + case REQ_OP_ZONE_FINISH: + case REQ_OP_ZONE_RESET: + case REQ_OP_ZONE_RESET_ALL: + return true; + default: + return false; + } +} + bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs); /** @@ -871,6 +924,12 @@ static inline unsigned int disk_nr_zones(struct gendisk *disk) { return 0; } + +static inline bool bio_needs_zone_write_plugging(struct bio *bio) +{ + return false; +} + static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) { return false; @@ -1047,6 +1106,7 @@ static inline void blk_queue_disable_secure_erase(struct request_queue *q) static inline void blk_queue_disable_write_zeroes(struct request_queue *q) { q->limits.max_write_zeroes_sectors = 0; + q->limits.max_wzeroes_unmap_sectors = 0; } /* @@ -1225,15 +1285,6 @@ enum blk_default_limits { BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, }; -/* - * Default upper limit for the software max_sectors limit used for - * regular file system I/O. This can be increased through sysfs. - * - * Not to be confused with the max_hw_sector limit that is entirely - * controlled by the driver, usually based on hardware limits. - */ -#define BLK_DEF_MAX_SECTORS_CAP 2560u - static inline struct queue_limits *bdev_limits(struct block_device *bdev) { return &bdev_get_queue(bdev)->limits; @@ -1383,6 +1434,12 @@ static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) return bdev_limits(bdev)->max_write_zeroes_sectors; } +static inline unsigned int +bdev_write_zeroes_unmap_sectors(struct block_device *bdev) +{ + return bdev_limits(bdev)->max_wzeroes_unmap_sectors; +} + static inline bool bdev_nonrot(struct block_device *bdev) { return blk_queue_nonrot(bdev_get_queue(bdev)); diff --git a/include/linux/bnxt/hsi.h b/include/linux/bnxt/hsi.h new file mode 100644 index 000000000000..549231703bce --- /dev/null +++ b/include/linux/bnxt/hsi.h @@ -0,0 +1,10914 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2014-2018 Broadcom Limited + * Copyright (c) 2018-2025 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * DO NOT MODIFY!!! This file is automatically generated. + */ + +#ifndef _BNXT_HSI_H_ +#define _BNXT_HSI_H_ + +/* hwrm_cmd_hdr (size:128b/16B) */ +struct hwrm_cmd_hdr { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_resp_hdr (size:64b/8B) */ +struct hwrm_resp_hdr { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; +}; + +#define CMD_DISCR_TLV_ENCAP 0x8000UL +#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP + + +#define TLV_TYPE_HWRM_REQUEST 0x1UL +#define TLV_TYPE_HWRM_RESPONSE 0x2UL +#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL +#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL +#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL +#define TLV_TYPE_QUERY_ROCE_CC_GEN2 0x6UL +#define TLV_TYPE_MODIFY_ROCE_CC_GEN2 0x7UL +#define TLV_TYPE_QUERY_ROCE_CC_GEN1_EXT 0x8UL +#define TLV_TYPE_MODIFY_ROCE_CC_GEN1_EXT 0x9UL +#define TLV_TYPE_QUERY_ROCE_CC_GEN2_EXT 0xaUL +#define TLV_TYPE_MODIFY_ROCE_CC_GEN2_EXT 0xbUL +#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL +#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL +#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL +#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL +#define TLV_TYPE_ENGINE_CKV_HOST_ALGORITHMS 0x8006UL +#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL +#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL +#define TLV_TYPE_ENGINE_CKV_FW_ECC_PUBLIC_KEY 0x8009UL +#define TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS 0x800aUL +#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS + + +/* tlv (size:64b/8B) */ +struct tlv { + __le16 cmd_discr; + u8 reserved_8b; + u8 flags; + #define TLV_FLAGS_MORE 0x1UL + #define TLV_FLAGS_MORE_LAST 0x0UL + #define TLV_FLAGS_MORE_NOT_LAST 0x1UL + #define TLV_FLAGS_REQUIRED 0x2UL + #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1) + #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1) + #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES + __le16 tlv_type; + __le16 length; +}; + +/* input (size:128b/16B) */ +struct input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* output (size:64b/8B) */ +struct output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; +}; + +/* hwrm_short_input (size:128b/16B) */ +struct hwrm_short_input { + __le16 req_type; + __le16 signature; + #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL + #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD + __le16 target_id; + #define SHORT_REQ_TARGET_ID_DEFAULT 0x0UL + #define SHORT_REQ_TARGET_ID_TOOLS 0xfffdUL + #define SHORT_REQ_TARGET_ID_LAST SHORT_REQ_TARGET_ID_TOOLS + __le16 size; + __le64 req_addr; +}; + +/* cmd_nums (size:64b/8B) */ +struct cmd_nums { + __le16 req_type; + #define HWRM_VER_GET 0x0UL + #define HWRM_FUNC_ECHO_RESPONSE 0xbUL + #define HWRM_ERROR_RECOVERY_QCFG 0xcUL + #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL + #define HWRM_FUNC_BUF_UNRGTR 0xeUL + #define HWRM_FUNC_VF_CFG 0xfUL + #define HWRM_RESERVED1 0x10UL + #define HWRM_FUNC_RESET 0x11UL + #define HWRM_FUNC_GETFID 0x12UL + #define HWRM_FUNC_VF_ALLOC 0x13UL + #define HWRM_FUNC_VF_FREE 0x14UL + #define HWRM_FUNC_QCAPS 0x15UL + #define HWRM_FUNC_QCFG 0x16UL + #define HWRM_FUNC_CFG 0x17UL + #define HWRM_FUNC_QSTATS 0x18UL + #define HWRM_FUNC_CLR_STATS 0x19UL + #define HWRM_FUNC_DRV_UNRGTR 0x1aUL + #define HWRM_FUNC_VF_RESC_FREE 0x1bUL + #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL + #define HWRM_FUNC_DRV_RGTR 0x1dUL + #define HWRM_FUNC_DRV_QVER 0x1eUL + #define HWRM_FUNC_BUF_RGTR 0x1fUL + #define HWRM_PORT_PHY_CFG 0x20UL + #define HWRM_PORT_MAC_CFG 0x21UL + #define HWRM_PORT_TS_QUERY 0x22UL + #define HWRM_PORT_QSTATS 0x23UL + #define HWRM_PORT_LPBK_QSTATS 0x24UL + #define HWRM_PORT_CLR_STATS 0x25UL + #define HWRM_PORT_LPBK_CLR_STATS 0x26UL + #define HWRM_PORT_PHY_QCFG 0x27UL + #define HWRM_PORT_MAC_QCFG 0x28UL + #define HWRM_PORT_MAC_PTP_QCFG 0x29UL + #define HWRM_PORT_PHY_QCAPS 0x2aUL + #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL + #define HWRM_PORT_PHY_I2C_READ 0x2cUL + #define HWRM_PORT_LED_CFG 0x2dUL + #define HWRM_PORT_LED_QCFG 0x2eUL + #define HWRM_PORT_LED_QCAPS 0x2fUL + #define HWRM_QUEUE_QPORTCFG 0x30UL + #define HWRM_QUEUE_QCFG 0x31UL + #define HWRM_QUEUE_CFG 0x32UL + #define HWRM_FUNC_VLAN_CFG 0x33UL + #define HWRM_FUNC_VLAN_QCFG 0x34UL + #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL + #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL + #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL + #define HWRM_QUEUE_PRI2COS_CFG 0x38UL + #define HWRM_QUEUE_COS2BW_QCFG 0x39UL + #define HWRM_QUEUE_COS2BW_CFG 0x3aUL + #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL + #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL + #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL + #define HWRM_VNIC_ALLOC 0x40UL + #define HWRM_VNIC_FREE 0x41UL + #define HWRM_VNIC_CFG 0x42UL + #define HWRM_VNIC_QCFG 0x43UL + #define HWRM_VNIC_TPA_CFG 0x44UL + #define HWRM_VNIC_TPA_QCFG 0x45UL + #define HWRM_VNIC_RSS_CFG 0x46UL + #define HWRM_VNIC_RSS_QCFG 0x47UL + #define HWRM_VNIC_PLCMODES_CFG 0x48UL + #define HWRM_VNIC_PLCMODES_QCFG 0x49UL + #define HWRM_VNIC_QCAPS 0x4aUL + #define HWRM_VNIC_UPDATE 0x4bUL + #define HWRM_RING_ALLOC 0x50UL + #define HWRM_RING_FREE 0x51UL + #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL + #define HWRM_RING_AGGINT_QCAPS 0x54UL + #define HWRM_RING_SCHQ_ALLOC 0x55UL + #define HWRM_RING_SCHQ_CFG 0x56UL + #define HWRM_RING_SCHQ_FREE 0x57UL + #define HWRM_RING_RESET 0x5eUL + #define HWRM_RING_GRP_ALLOC 0x60UL + #define HWRM_RING_GRP_FREE 0x61UL + #define HWRM_RING_CFG 0x62UL + #define HWRM_RING_QCFG 0x63UL + #define HWRM_RESERVED5 0x64UL + #define HWRM_RESERVED6 0x65UL + #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL + #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL + #define HWRM_QUEUE_MPLS_QCAPS 0x80UL + #define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL + #define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL + #define HWRM_QUEUE_VLANPRI_QCAPS 0x83UL + #define HWRM_QUEUE_VLANPRI2PRI_QCFG 0x84UL + #define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL + #define HWRM_QUEUE_GLOBAL_CFG 0x86UL + #define HWRM_QUEUE_GLOBAL_QCFG 0x87UL + #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG 0x88UL + #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG 0x89UL + #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG 0x8aUL + #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG 0x8bUL + #define HWRM_QUEUE_QCAPS 0x8cUL + #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_QCFG 0x8dUL + #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG 0x8eUL + #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_QCFG 0x8fUL + #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL + #define HWRM_CFA_L2_FILTER_FREE 0x91UL + #define HWRM_CFA_L2_FILTER_CFG 0x92UL + #define HWRM_CFA_L2_SET_RX_MASK 0x93UL + #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL + #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL + #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL + #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL + #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL + #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL + #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL + #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL + #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL + #define HWRM_CFA_EM_FLOW_FREE 0x9dUL + #define HWRM_CFA_EM_FLOW_CFG 0x9eUL + #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL + #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL + #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL + #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG 0xa3UL + #define HWRM_STAT_CTX_ENG_QUERY 0xafUL + #define HWRM_STAT_CTX_ALLOC 0xb0UL + #define HWRM_STAT_CTX_FREE 0xb1UL + #define HWRM_STAT_CTX_QUERY 0xb2UL + #define HWRM_STAT_CTX_CLR_STATS 0xb3UL + #define HWRM_PORT_QSTATS_EXT 0xb4UL + #define HWRM_PORT_PHY_MDIO_WRITE 0xb5UL + #define HWRM_PORT_PHY_MDIO_READ 0xb6UL + #define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL + #define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL + #define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL + #define HWRM_RESERVED7 0xbaUL + #define HWRM_PORT_TX_FIR_CFG 0xbbUL + #define HWRM_PORT_TX_FIR_QCFG 0xbcUL + #define HWRM_PORT_ECN_QSTATS 0xbdUL + #define HWRM_FW_LIVEPATCH_QUERY 0xbeUL + #define HWRM_FW_LIVEPATCH 0xbfUL + #define HWRM_FW_RESET 0xc0UL + #define HWRM_FW_QSTATUS 0xc1UL + #define HWRM_FW_HEALTH_CHECK 0xc2UL + #define HWRM_FW_SYNC 0xc3UL + #define HWRM_FW_STATE_QCAPS 0xc4UL + #define HWRM_FW_STATE_QUIESCE 0xc5UL + #define HWRM_FW_STATE_BACKUP 0xc6UL + #define HWRM_FW_STATE_RESTORE 0xc7UL + #define HWRM_FW_SET_TIME 0xc8UL + #define HWRM_FW_GET_TIME 0xc9UL + #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL + #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL + #define HWRM_FW_IPC_MAILBOX 0xccUL + #define HWRM_FW_ECN_CFG 0xcdUL + #define HWRM_FW_ECN_QCFG 0xceUL + #define HWRM_FW_SECURE_CFG 0xcfUL + #define HWRM_EXEC_FWD_RESP 0xd0UL + #define HWRM_REJECT_FWD_RESP 0xd1UL + #define HWRM_FWD_RESP 0xd2UL + #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL + #define HWRM_OEM_CMD 0xd4UL + #define HWRM_PORT_PRBS_TEST 0xd5UL + #define HWRM_PORT_SFP_SIDEBAND_CFG 0xd6UL + #define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL + #define HWRM_FW_STATE_UNQUIESCE 0xd8UL + #define HWRM_PORT_DSC_DUMP 0xd9UL + #define HWRM_PORT_EP_TX_QCFG 0xdaUL + #define HWRM_PORT_EP_TX_CFG 0xdbUL + #define HWRM_PORT_CFG 0xdcUL + #define HWRM_PORT_QCFG 0xddUL + #define HWRM_PORT_MAC_QCAPS 0xdfUL + #define HWRM_TEMP_MONITOR_QUERY 0xe0UL + #define HWRM_REG_POWER_QUERY 0xe1UL + #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL + #define HWRM_REG_POWER_HISTOGRAM 0xe3UL + #define HWRM_WOL_FILTER_ALLOC 0xf0UL + #define HWRM_WOL_FILTER_FREE 0xf1UL + #define HWRM_WOL_FILTER_QCFG 0xf2UL + #define HWRM_WOL_REASON_QCFG 0xf3UL + #define HWRM_CFA_METER_QCAPS 0xf4UL + #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL + #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL + #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL + #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL + #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL + #define HWRM_CFA_METER_INSTANCE_CFG 0xfaUL + #define HWRM_CFA_VFR_ALLOC 0xfdUL + #define HWRM_CFA_VFR_FREE 0xfeUL + #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL + #define HWRM_CFA_VF_PAIR_FREE 0x101UL + #define HWRM_CFA_VF_PAIR_INFO 0x102UL + #define HWRM_CFA_FLOW_ALLOC 0x103UL + #define HWRM_CFA_FLOW_FREE 0x104UL + #define HWRM_CFA_FLOW_FLUSH 0x105UL + #define HWRM_CFA_FLOW_STATS 0x106UL + #define HWRM_CFA_FLOW_INFO 0x107UL + #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL + #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL + #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL + #define HWRM_CFA_PAIR_ALLOC 0x10dUL + #define HWRM_CFA_PAIR_FREE 0x10eUL + #define HWRM_CFA_PAIR_INFO 0x10fUL + #define HWRM_FW_IPC_MSG 0x110UL + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL + #define HWRM_CFA_FLOW_AGING_TIMER_RESET 0x113UL + #define HWRM_CFA_FLOW_AGING_CFG 0x114UL + #define HWRM_CFA_FLOW_AGING_QCFG 0x115UL + #define HWRM_CFA_FLOW_AGING_QCAPS 0x116UL + #define HWRM_CFA_CTX_MEM_RGTR 0x117UL + #define HWRM_CFA_CTX_MEM_UNRGTR 0x118UL + #define HWRM_CFA_CTX_MEM_QCTX 0x119UL + #define HWRM_CFA_CTX_MEM_QCAPS 0x11aUL + #define HWRM_CFA_COUNTER_QCAPS 0x11bUL + #define HWRM_CFA_COUNTER_CFG 0x11cUL + #define HWRM_CFA_COUNTER_QCFG 0x11dUL + #define HWRM_CFA_COUNTER_QSTATS 0x11eUL + #define HWRM_CFA_TCP_FLAG_PROCESS_QCFG 0x11fUL + #define HWRM_CFA_EEM_QCAPS 0x120UL + #define HWRM_CFA_EEM_CFG 0x121UL + #define HWRM_CFA_EEM_QCFG 0x122UL + #define HWRM_CFA_EEM_OP 0x123UL + #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL + #define HWRM_CFA_TFLIB 0x125UL + #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR 0x126UL + #define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL + #define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL + #define HWRM_CFA_TLS_FILTER_FREE 0x129UL + #define HWRM_CFA_RELEASE_AFM_FUNC 0x12aUL + #define HWRM_ENGINE_CKV_STATUS 0x12eUL + #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL + #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL + #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL + #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL + #define HWRM_ENGINE_CKV_FLUSH 0x133UL + #define HWRM_ENGINE_CKV_RNG_GET 0x134UL + #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL + #define HWRM_ENGINE_CKV_KEY_LABEL_CFG 0x136UL + #define HWRM_ENGINE_CKV_KEY_LABEL_QCFG 0x137UL + #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL + #define HWRM_ENGINE_QG_QUERY 0x13dUL + #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL + #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL + #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL + #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL + #define HWRM_ENGINE_QG_METER_QUERY 0x142UL + #define HWRM_ENGINE_QG_METER_BIND 0x143UL + #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL + #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL + #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL + #define HWRM_ENGINE_SG_QUERY 0x147UL + #define HWRM_ENGINE_SG_METER_QUERY 0x148UL + #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL + #define HWRM_ENGINE_SG_QG_BIND 0x14aUL + #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL + #define HWRM_ENGINE_CONFIG_QUERY 0x154UL + #define HWRM_ENGINE_STATS_CONFIG 0x155UL + #define HWRM_ENGINE_STATS_CLEAR 0x156UL + #define HWRM_ENGINE_STATS_QUERY 0x157UL + #define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR 0x158UL + #define HWRM_ENGINE_RQ_ALLOC 0x15eUL + #define HWRM_ENGINE_RQ_FREE 0x15fUL + #define HWRM_ENGINE_CQ_ALLOC 0x160UL + #define HWRM_ENGINE_CQ_FREE 0x161UL + #define HWRM_ENGINE_NQ_ALLOC 0x162UL + #define HWRM_ENGINE_NQ_FREE 0x163UL + #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL + #define HWRM_ENGINE_FUNC_QCFG 0x165UL + #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL + #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL + #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL + #define HWRM_FUNC_BACKING_STORE_CFG 0x193UL + #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL + #define HWRM_FUNC_VF_BW_CFG 0x195UL + #define HWRM_FUNC_VF_BW_QCFG 0x196UL + #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL + #define HWRM_FUNC_QSTATS_EXT 0x198UL + #define HWRM_STAT_EXT_CTX_QUERY 0x199UL + #define HWRM_FUNC_SPD_CFG 0x19aUL + #define HWRM_FUNC_SPD_QCFG 0x19bUL + #define HWRM_FUNC_PTP_PIN_QCFG 0x19cUL + #define HWRM_FUNC_PTP_PIN_CFG 0x19dUL + #define HWRM_FUNC_PTP_CFG 0x19eUL + #define HWRM_FUNC_PTP_TS_QUERY 0x19fUL + #define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL + #define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL + #define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL + #define HWRM_FUNC_BACKING_STORE_CFG_V2 0x1a3UL + #define HWRM_FUNC_BACKING_STORE_QCFG_V2 0x1a4UL + #define HWRM_FUNC_DBR_PACING_CFG 0x1a5UL + #define HWRM_FUNC_DBR_PACING_QCFG 0x1a6UL + #define HWRM_FUNC_DBR_PACING_BROADCAST_EVENT 0x1a7UL + #define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL + #define HWRM_FUNC_DBR_PACING_NQLIST_QUERY 0x1a9UL + #define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL + #define HWRM_FUNC_SYNCE_CFG 0x1abUL + #define HWRM_FUNC_SYNCE_QCFG 0x1acUL + #define HWRM_FUNC_KEY_CTX_FREE 0x1adUL + #define HWRM_FUNC_LAG_MODE_CFG 0x1aeUL + #define HWRM_FUNC_LAG_MODE_QCFG 0x1afUL + #define HWRM_FUNC_LAG_CREATE 0x1b0UL + #define HWRM_FUNC_LAG_UPDATE 0x1b1UL + #define HWRM_FUNC_LAG_FREE 0x1b2UL + #define HWRM_FUNC_LAG_QCFG 0x1b3UL + #define HWRM_FUNC_TIMEDTX_PACING_RATE_ADD 0x1c2UL + #define HWRM_FUNC_TIMEDTX_PACING_RATE_DELETE 0x1c3UL + #define HWRM_FUNC_TIMEDTX_PACING_RATE_QUERY 0x1c4UL + #define HWRM_SELFTEST_QLIST 0x200UL + #define HWRM_SELFTEST_EXEC 0x201UL + #define HWRM_SELFTEST_IRQ 0x202UL + #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL + #define HWRM_PCIE_QSTATS 0x204UL + #define HWRM_MFG_FRU_WRITE_CONTROL 0x205UL + #define HWRM_MFG_TIMERS_QUERY 0x206UL + #define HWRM_MFG_OTP_CFG 0x207UL + #define HWRM_MFG_OTP_QCFG 0x208UL + #define HWRM_MFG_HDMA_TEST 0x209UL + #define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL + #define HWRM_MFG_FRU_EEPROM_READ 0x20bUL + #define HWRM_MFG_SOC_IMAGE 0x20cUL + #define HWRM_MFG_SOC_QSTATUS 0x20dUL + #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE 0x20eUL + #define HWRM_MFG_PARAM_CRITICAL_DATA_READ 0x20fUL + #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH 0x210UL + #define HWRM_MFG_PRVSN_EXPORT_CSR 0x211UL + #define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL + #define HWRM_MFG_PRVSN_GET_STATE 0x213UL + #define HWRM_MFG_GET_NVM_MEASUREMENT 0x214UL + #define HWRM_MFG_PSOC_QSTATUS 0x215UL + #define HWRM_MFG_SELFTEST_QLIST 0x216UL + #define HWRM_MFG_SELFTEST_EXEC 0x217UL + #define HWRM_STAT_GENERIC_QSTATS 0x218UL + #define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL + #define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL + #define HWRM_MFG_TESTS 0x21bUL + #define HWRM_MFG_WRITE_CERT_NVM 0x21cUL + #define HWRM_PORT_POE_CFG 0x230UL + #define HWRM_PORT_POE_QCFG 0x231UL + #define HWRM_UDCC_QCAPS 0x258UL + #define HWRM_UDCC_CFG 0x259UL + #define HWRM_UDCC_QCFG 0x25aUL + #define HWRM_UDCC_SESSION_CFG 0x25bUL + #define HWRM_UDCC_SESSION_QCFG 0x25cUL + #define HWRM_UDCC_SESSION_QUERY 0x25dUL + #define HWRM_UDCC_COMP_CFG 0x25eUL + #define HWRM_UDCC_COMP_QCFG 0x25fUL + #define HWRM_UDCC_COMP_QUERY 0x260UL + #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x261UL + #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x262UL + #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x263UL + #define HWRM_TF 0x2bcUL + #define HWRM_TF_VERSION_GET 0x2bdUL + #define HWRM_TF_SESSION_OPEN 0x2c6UL + #define HWRM_TF_SESSION_REGISTER 0x2c8UL + #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL + #define HWRM_TF_SESSION_CLOSE 0x2caUL + #define HWRM_TF_SESSION_QCFG 0x2cbUL + #define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL + #define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL + #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL + #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL + #define HWRM_TF_SESSION_RESC_INFO 0x2d0UL + #define HWRM_TF_SESSION_HOTUP_STATE_SET 0x2d1UL + #define HWRM_TF_SESSION_HOTUP_STATE_GET 0x2d2UL + #define HWRM_TF_TBL_TYPE_GET 0x2daUL + #define HWRM_TF_TBL_TYPE_SET 0x2dbUL + #define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL + #define HWRM_TF_EM_INSERT 0x2eaUL + #define HWRM_TF_EM_DELETE 0x2ebUL + #define HWRM_TF_EM_HASH_INSERT 0x2ecUL + #define HWRM_TF_EM_MOVE 0x2edUL + #define HWRM_TF_TCAM_SET 0x2f8UL + #define HWRM_TF_TCAM_GET 0x2f9UL + #define HWRM_TF_TCAM_MOVE 0x2faUL + #define HWRM_TF_TCAM_FREE 0x2fbUL + #define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL + #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL + #define HWRM_TF_IF_TBL_SET 0x2feUL + #define HWRM_TF_IF_TBL_GET 0x2ffUL + #define HWRM_TF_RESC_USAGE_SET 0x300UL + #define HWRM_TF_RESC_USAGE_QUERY 0x301UL + #define HWRM_TF_TBL_TYPE_ALLOC 0x302UL + #define HWRM_TF_TBL_TYPE_FREE 0x303UL + #define HWRM_TFC_TBL_SCOPE_QCAPS 0x380UL + #define HWRM_TFC_TBL_SCOPE_ID_ALLOC 0x381UL + #define HWRM_TFC_TBL_SCOPE_CONFIG 0x382UL + #define HWRM_TFC_TBL_SCOPE_DECONFIG 0x383UL + #define HWRM_TFC_TBL_SCOPE_FID_ADD 0x384UL + #define HWRM_TFC_TBL_SCOPE_FID_REM 0x385UL + #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC 0x386UL + #define HWRM_TFC_TBL_SCOPE_POOL_FREE 0x387UL + #define HWRM_TFC_SESSION_ID_ALLOC 0x388UL + #define HWRM_TFC_SESSION_FID_ADD 0x389UL + #define HWRM_TFC_SESSION_FID_REM 0x38aUL + #define HWRM_TFC_IDENT_ALLOC 0x38bUL + #define HWRM_TFC_IDENT_FREE 0x38cUL + #define HWRM_TFC_IDX_TBL_ALLOC 0x38dUL + #define HWRM_TFC_IDX_TBL_ALLOC_SET 0x38eUL + #define HWRM_TFC_IDX_TBL_SET 0x38fUL + #define HWRM_TFC_IDX_TBL_GET 0x390UL + #define HWRM_TFC_IDX_TBL_FREE 0x391UL + #define HWRM_TFC_GLOBAL_ID_ALLOC 0x392UL + #define HWRM_TFC_TCAM_SET 0x393UL + #define HWRM_TFC_TCAM_GET 0x394UL + #define HWRM_TFC_TCAM_ALLOC 0x395UL + #define HWRM_TFC_TCAM_ALLOC_SET 0x396UL + #define HWRM_TFC_TCAM_FREE 0x397UL + #define HWRM_TFC_IF_TBL_SET 0x398UL + #define HWRM_TFC_IF_TBL_GET 0x399UL + #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL + #define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL + #define HWRM_TFC_GLOBAL_ID_FREE 0x39cUL + #define HWRM_TFC_TCAM_PRI_UPDATE 0x39dUL + #define HWRM_TFC_HOT_UPGRADE_PROCESS 0x3a0UL + #define HWRM_SV 0x400UL + #define HWRM_DBG_SERDES_TEST 0xff0eUL + #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL + #define HWRM_DBG_READ_DIRECT 0xff10UL + #define HWRM_DBG_READ_INDIRECT 0xff11UL + #define HWRM_DBG_WRITE_DIRECT 0xff12UL + #define HWRM_DBG_WRITE_INDIRECT 0xff13UL + #define HWRM_DBG_DUMP 0xff14UL + #define HWRM_DBG_ERASE_NVM 0xff15UL + #define HWRM_DBG_CFG 0xff16UL + #define HWRM_DBG_COREDUMP_LIST 0xff17UL + #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL + #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL + #define HWRM_DBG_FW_CLI 0xff1aUL + #define HWRM_DBG_I2C_CMD 0xff1bUL + #define HWRM_DBG_RING_INFO_GET 0xff1cUL + #define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL + #define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL + #define HWRM_DBG_DRV_TRACE 0xff1fUL + #define HWRM_DBG_QCAPS 0xff20UL + #define HWRM_DBG_QCFG 0xff21UL + #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL + #define HWRM_DBG_USEQ_ALLOC 0xff23UL + #define HWRM_DBG_USEQ_FREE 0xff24UL + #define HWRM_DBG_USEQ_FLUSH 0xff25UL + #define HWRM_DBG_USEQ_QCAPS 0xff26UL + #define HWRM_DBG_USEQ_CW_CFG 0xff27UL + #define HWRM_DBG_USEQ_SCHED_CFG 0xff28UL + #define HWRM_DBG_USEQ_RUN 0xff29UL + #define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL + #define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL + #define HWRM_DBG_COREDUMP_CAPTURE 0xff2cUL + #define HWRM_DBG_PTRACE 0xff2dUL + #define HWRM_DBG_SIM_CABLE_STATE 0xff2eUL + #define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL + #define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL + #define HWRM_NVM_DEFRAG 0xffecUL + #define HWRM_NVM_REQ_ARBITRATION 0xffedUL + #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL + #define HWRM_NVM_VALIDATE_OPTION 0xffefUL + #define HWRM_NVM_FLUSH 0xfff0UL + #define HWRM_NVM_GET_VARIABLE 0xfff1UL + #define HWRM_NVM_SET_VARIABLE 0xfff2UL + #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL + #define HWRM_NVM_MODIFY 0xfff4UL + #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL + #define HWRM_NVM_GET_DEV_INFO 0xfff6UL + #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL + #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL + #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL + #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL + #define HWRM_NVM_GET_DIR_INFO 0xfffbUL + #define HWRM_NVM_RAW_DUMP 0xfffcUL + #define HWRM_NVM_READ 0xfffdUL + #define HWRM_NVM_WRITE 0xfffeUL + #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL + #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK + __le16 unused_0[3]; +}; + +/* ret_codes (size:64b/8B) */ +struct ret_codes { + __le16 error_code; + #define HWRM_ERR_CODE_SUCCESS 0x0UL + #define HWRM_ERR_CODE_FAIL 0x1UL + #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL + #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL + #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL + #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL + #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL + #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL + #define HWRM_ERR_CODE_NO_BUFFER 0x8UL + #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL + #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL + #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL + #define HWRM_ERR_CODE_NO_FLOW_COUNTER_DURING_ALLOC 0xcUL + #define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL + #define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL + #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL + #define HWRM_ERR_CODE_BUSY 0x10UL + #define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL + #define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL + #define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL + #define HWRM_ERR_CODE_SECURE_SOC_ERROR 0x14UL + #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL + #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL + #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL + #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED + __le16 unused_0[3]; +}; + +/* hwrm_err_output (size:128b/16B) */ +struct hwrm_err_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 opaque_0; + __le16 opaque_1; + u8 cmd_err; + u8 valid; +}; +#define HWRM_NA_SIGNATURE ((__le32)(-1)) +#define HWRM_MAX_REQ_LEN 128 +#define HWRM_MAX_RESP_LEN 704 +#define HW_HASH_INDEX_SIZE 0x80 +#define HW_HASH_KEY_SIZE 40 +#define HWRM_RESP_VALID_KEY 1 +#define HWRM_TARGET_ID_BONO 0xFFF8 +#define HWRM_TARGET_ID_KONG 0xFFF9 +#define HWRM_TARGET_ID_APE 0xFFFA +#define HWRM_TARGET_ID_TOOLS 0xFFFD +#define HWRM_VERSION_MAJOR 1 +#define HWRM_VERSION_MINOR 10 +#define HWRM_VERSION_UPDATE 3 +#define HWRM_VERSION_RSVD 97 +#define HWRM_VERSION_STR "1.10.3.97" + +/* hwrm_ver_get_input (size:192b/24B) */ +struct hwrm_ver_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 hwrm_intf_maj; + u8 hwrm_intf_min; + u8 hwrm_intf_upd; + u8 unused_0[5]; +}; + +/* hwrm_ver_get_output (size:1408b/176B) */ +struct hwrm_ver_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 hwrm_intf_maj_8b; + u8 hwrm_intf_min_8b; + u8 hwrm_intf_upd_8b; + u8 hwrm_intf_rsvd_8b; + u8 hwrm_fw_maj_8b; + u8 hwrm_fw_min_8b; + u8 hwrm_fw_bld_8b; + u8 hwrm_fw_rsvd_8b; + u8 mgmt_fw_maj_8b; + u8 mgmt_fw_min_8b; + u8 mgmt_fw_bld_8b; + u8 mgmt_fw_rsvd_8b; + u8 netctrl_fw_maj_8b; + u8 netctrl_fw_min_8b; + u8 netctrl_fw_bld_8b; + u8 netctrl_fw_rsvd_8b; + __le32 dev_caps_cfg; + #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL + #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL + #define VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED 0x10UL + #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED 0x20UL + #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL + #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL + #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL + #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED 0x200UL + #define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL + #define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL + #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL + #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL + #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL + #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL + #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_SOC_CAPABLE 0x10000UL + u8 roce_fw_maj_8b; + u8 roce_fw_min_8b; + u8 roce_fw_bld_8b; + u8 roce_fw_rsvd_8b; + char hwrm_fw_name[16]; + char mgmt_fw_name[16]; + char netctrl_fw_name[16]; + char active_pkg_name[16]; + char roce_fw_name[16]; + __le16 chip_num; + u8 chip_rev; + u8 chip_metal; + u8 chip_bond_id; + u8 chip_platform_type; + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM + __le16 max_req_win_len; + __le16 max_resp_len; + __le16 def_req_timeout; + u8 flags; + #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL + #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL + #define VER_GET_RESP_FLAGS_DEV_NOT_RDY_BACKING_STORE 0x4UL + u8 unused_0[2]; + u8 always_1; + __le16 hwrm_intf_major; + __le16 hwrm_intf_minor; + __le16 hwrm_intf_build; + __le16 hwrm_intf_patch; + __le16 hwrm_fw_major; + __le16 hwrm_fw_minor; + __le16 hwrm_fw_build; + __le16 hwrm_fw_patch; + __le16 mgmt_fw_major; + __le16 mgmt_fw_minor; + __le16 mgmt_fw_build; + __le16 mgmt_fw_patch; + __le16 netctrl_fw_major; + __le16 netctrl_fw_minor; + __le16 netctrl_fw_build; + __le16 netctrl_fw_patch; + __le16 roce_fw_major; + __le16 roce_fw_minor; + __le16 roce_fw_build; + __le16 roce_fw_patch; + __le16 max_ext_req_len; + __le16 max_req_timeout; + u8 unused_1[3]; + u8 valid; +}; + +/* eject_cmpl (size:128b/16B) */ +struct eject_cmpl { + __le16 type; + #define EJECT_CMPL_TYPE_MASK 0x3fUL + #define EJECT_CMPL_TYPE_SFT 0 + #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL + #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT + #define EJECT_CMPL_FLAGS_MASK 0xffc0UL + #define EJECT_CMPL_FLAGS_SFT 6 + #define EJECT_CMPL_FLAGS_ERROR 0x40UL + __le16 len; + __le32 opaque; + __le16 v; + #define EJECT_CMPL_V 0x1UL + #define EJECT_CMPL_ERRORS_MASK 0xfffeUL + #define EJECT_CMPL_ERRORS_SFT 1 + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH + __le16 reserved16; + __le32 unused_2; +}; + +/* hwrm_cmpl (size:128b/16B) */ +struct hwrm_cmpl { + __le16 type; + #define CMPL_TYPE_MASK 0x3fUL + #define CMPL_TYPE_SFT 0 + #define CMPL_TYPE_HWRM_DONE 0x20UL + #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE + __le16 sequence_id; + __le32 unused_1; + __le32 v; + #define CMPL_V 0x1UL + __le32 unused_3; +}; + +/* hwrm_fwd_req_cmpl (size:128b/16B) */ +struct hwrm_fwd_req_cmpl { + __le16 req_len_type; + #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL + #define FWD_REQ_CMPL_TYPE_SFT 0 + #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL + #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ + #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL + #define FWD_REQ_CMPL_REQ_LEN_SFT 6 + __le16 source_id; + __le32 unused0; + __le32 req_buf_addr_v[2]; + #define FWD_REQ_CMPL_V 0x1UL + #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL + #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1 +}; + +/* hwrm_fwd_resp_cmpl (size:128b/16B) */ +struct hwrm_fwd_resp_cmpl { + __le16 type; + #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL + #define FWD_RESP_CMPL_TYPE_SFT 0 + #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL + #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP + __le16 source_id; + __le16 resp_len; + __le16 unused_1; + __le32 resp_buf_addr_v[2]; + #define FWD_RESP_CMPL_V 0x1UL + #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL + #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1 +}; + +/* hwrm_async_event_cmpl (size:128b/16B) */ +struct hwrm_async_event_cmpl { + __le16 type; + #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL + #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL + #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL + #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL + #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL + #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL + #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL + #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL + #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL + #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL + #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL + #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL + #define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL + #define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL + #define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL + #define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL + #define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL + #define ASYNC_EVENT_CMPL_EVENT_ID_REPRESENTOR_PAIR_CHANGE 0x4eUL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_STAT_CHANGE 0x4fUL + #define ASYNC_EVENT_CMPL_EVENT_ID_HOST_COREDUMP 0x50UL + #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x51UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL + #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL + #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_V 0x1UL + #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_status_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1 + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4 + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK 0xff00000UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20 +}; + +/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */ +struct hwrm_async_event_cmpl_port_conn_not_allowed { + __le16 type; + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16 + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN +}; + +/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_cfg_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL +}; + +/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */ +struct hwrm_async_event_cmpl_reset_notify { + __le16 type; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY + __le32 event_data2; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_SFT 0 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK 0xffUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT 0 + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE 0x1UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN 0x2UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK 0xff00UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT 8 + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION (0x5UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16 +}; + +/* hwrm_async_event_cmpl_error_recovery (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_recovery { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY 0x9UL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED 0x2UL +}; + +/* hwrm_async_event_cmpl_ring_monitor_msg (size:128b/16B) */ +struct hwrm_async_event_cmpl_ring_monitor_msg { + __le16 type; + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG 0xaUL + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG + __le32 event_data2; + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_TX 0x0UL + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX 0x1UL + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL 0x2UL + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL + u8 opaque_v; + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_V 0x1UL + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_cfg_change { + __le16 type; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE + __le32 event_data2; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_SFT 0 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TF_OWNERSHIP_RELEASE 0x20UL +}; + +/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_default_vnic_change { + __le16 type; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK 0xffc0UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT 6 + __le16 event_id; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION 0x35UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK 0x3UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT 0 + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC 0x1UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE 0x2UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK 0x3fcUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT 2 + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK 0x3fffc00UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT 10 +}; + +/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */ +struct hwrm_async_event_cmpl_hw_flow_aged { + __le16 type; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED 0x36UL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_V 0x1UL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK 0x7fffffffUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT 0 + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION 0x80000000UL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 31) + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 31) + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX +}; + +/* hwrm_async_event_cmpl_eem_cache_flush_req (size:128b/16B) */ +struct hwrm_async_event_cmpl_eem_cache_flush_req { + __le16 type; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_V 0x1UL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_eem_cache_flush_done (size:128b/16B) */ +struct hwrm_async_event_cmpl_eem_cache_flush_done { + __le16 type; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_V 0x1UL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0 +}; + +/* hwrm_async_event_cmpl_deferred_response (size:128b/16B) */ +struct hwrm_async_event_cmpl_deferred_response { + __le16 type; + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE 0x40UL + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE + __le32 event_data2; + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_SFT 0 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_V 0x1UL + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_echo_request (size:128b/16B) */ +struct hwrm_async_event_cmpl_echo_request { + __le16 type; + #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST 0x42UL + #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ECHO_REQUEST_V 0x1UL + #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_phc_update (size:128b/16B) */ +struct hwrm_async_event_cmpl_phc_update { + __le16 type; + #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE 0x43UL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE + __le32 event_data2; + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_SFT 0 + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_SFT 16 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PHC_UPDATE_V 0x1UL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK 0xfUL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT 0 + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE 0x4UL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK 0xffff0UL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT 4 +}; + +/* hwrm_async_event_cmpl_pps_timestamp (size:128b/16B) */ +struct hwrm_async_event_cmpl_pps_timestamp { + __le16 type; + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP 0x44UL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP + __le32 event_data2; + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE 0x1UL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL 0x0UL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL 0x1UL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK 0xeUL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT 1 + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK 0xffff0UL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT 4 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_V 0x1UL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK 0xffffffffUL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT 0 +}; + +/* hwrm_async_event_cmpl_error_report (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_SFT 0 +}; + +/* hwrm_async_event_cmpl_dbg_buf_producer (size:128b/16B) */ +struct hwrm_async_event_cmpl_dbg_buf_producer { + __le16 type; + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER + __le32 event_data2; + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK 0xffffffffUL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT 0 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_V 0x1UL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT_TRACE 0x0UL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT2_TRACE 0x1UL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT_TRACE 0x2UL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT2_TRACE 0x3UL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP0_TRACE 0x4UL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_L2_HWRM_TRACE 0x5UL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ROCE_HWRM_TRACE 0x6UL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA0_TRACE 0x7UL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA1_TRACE 0x8UL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA2_TRACE 0x9UL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP1_TRACE 0xaUL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_AFM_KONG_HWRM_TRACE 0xbUL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_AFM_KONG_HWRM_TRACE +}; + +/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */ +struct hwrm_async_event_cmpl_hwrm_error { + __le16 type; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR + __le32 event_data2; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0 + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL + u8 opaque_v; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL +}; + +/* hwrm_async_event_cmpl_error_report_base (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report_base { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED +}; + +/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report_pause_storm { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM +}; + +/* hwrm_async_event_cmpl_error_report_invalid_signal (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report_invalid_signal { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT + __le32 event_data2; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT 0 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL +}; + +/* hwrm_async_event_cmpl_error_report_nvm (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report_nvm { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT + __le32 event_data2; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_MASK 0xffffffffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_SFT 0 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR 0x3UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK 0xff00UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_SFT 8 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_WRITE (0x1UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE (0x2UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE +}; + +/* hwrm_async_event_cmpl_error_report_doorbell_drop_threshold (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_MASK 0xffffff00UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8 +}; + +/* hwrm_async_event_cmpl_error_report_thermal (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report_thermal { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT + __le32 event_data2; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK 0xff00UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT 8 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT 0x5UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK 0x700UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SFT 8 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN (0x0UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL (0x1UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL (0x2UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN (0x3UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR 0x800UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_DECREASING (0x0UL << 11) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING (0x1UL << 11) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING +}; + +/* hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED +}; + +/* hwrm_func_reset_input (size:192b/24B) */ +struct hwrm_func_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL + __le16 vf_id; + u8 func_reset_level; + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF + u8 unused_0; +}; + +/* hwrm_func_reset_output (size:128b/16B) */ +struct hwrm_func_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_getfid_input (size:192b/24B) */ +struct hwrm_func_getfid_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL + __le16 pci_id; + u8 unused_0[2]; +}; + +/* hwrm_func_getfid_output (size:128b/16B) */ +struct hwrm_func_getfid_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_func_vf_alloc_input (size:192b/24B) */ +struct hwrm_func_vf_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL + __le16 first_vf_id; + __le16 num_vfs; +}; + +/* hwrm_func_vf_alloc_output (size:128b/16B) */ +struct hwrm_func_vf_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 first_vf_id; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_func_vf_free_input (size:192b/24B) */ +struct hwrm_func_vf_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL + __le16 first_vf_id; + __le16 num_vfs; +}; + +/* hwrm_func_vf_free_output (size:128b/16B) */ +struct hwrm_func_vf_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_vf_cfg_input (size:576b/72B) */ +struct hwrm_func_vf_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL + #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL + #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL + #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_TX_KEY_CTXS 0x1000UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_RX_KEY_CTXS 0x2000UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_TX_KEY_CTXS 0x4000UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_RX_KEY_CTXS 0x8000UL + __le16 mtu; + __le16 guest_vlan; + __le16 async_event_cr; + u8 dflt_mac_addr[6]; + __le32 flags; + #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL + #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL + #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL + #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL + #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL + #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL + #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL + #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL + #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL + #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL + __le16 num_rsscos_ctxs; + __le16 num_cmpl_rings; + __le16 num_tx_rings; + __le16 num_rx_rings; + __le16 num_l2_ctxs; + __le16 num_vnics; + __le16 num_stat_ctxs; + __le16 num_hw_ring_grps; + __le32 num_ktls_tx_key_ctxs; + __le32 num_ktls_rx_key_ctxs; + __le16 num_msix; + u8 unused[2]; + __le32 num_quic_tx_key_ctxs; + __le32 num_quic_rx_key_ctxs; +}; + +/* hwrm_func_vf_cfg_output (size:128b/16B) */ +struct hwrm_func_vf_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_qcaps_input (size:192b/24B) */ +struct hwrm_func_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_qcaps_output (size:1152b/144B) */ +struct hwrm_func_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + __le16 port_id; + __le32 flags; + #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL + #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL + #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL + #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL + #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL + #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL + #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL + #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL + #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL + #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL + #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL + #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL + #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL + #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL + #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL + #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL + #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL + #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL + #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL + #define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL + #define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL + #define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL + #define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL + #define FUNC_QCAPS_RESP_FLAGS_COREDUMP_CMD_SUPPORTED 0x10000000UL + #define FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED 0x20000000UL + #define FUNC_QCAPS_RESP_FLAGS_PFC_WD_STATS_SUPPORTED 0x40000000UL + #define FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED 0x80000000UL + u8 mac_address[6]; + __le16 max_rsscos_ctx; + __le16 max_cmpl_rings; + __le16 max_tx_rings; + __le16 max_rx_rings; + __le16 max_l2_ctxs; + __le16 max_vnics; + __le16 first_vf_id; + __le16 max_vfs; + __le16 max_stat_ctx; + __le32 max_encap_records; + __le32 max_decap_records; + __le32 max_tx_em_flows; + __le32 max_tx_wm_flows; + __le32 max_rx_em_flows; + __le32 max_rx_wm_flows; + __le32 max_mcast_filters; + __le32 max_flow_id; + __le32 max_hw_ring_grps; + __le16 max_sp_tx_rings; + __le16 max_msix_vfs; + __le32 flags_ext; + #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_HW_DBR_DROP_RECOV_SUPPORTED 0x40000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_DISABLE_CQ_OVERFLOW_DETECTION_SUPPORTED 0x80000000UL + u8 max_schqs; + u8 mpc_chnls_cap; + #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL + #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RCE 0x2UL + #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA 0x4UL + #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL + #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL + __le16 max_key_ctxs_alloc; + __le32 flags_ext2; + #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_CONCURRENT_KTLS_QUIC_SUPPORTED 0x8000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_CROSS_TC_CAP_SUPPORTED 0x10000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_CAP_SUPPORTED 0x20000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_RESERVATION_SUPPORTED 0x40000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_DB_ERROR_STATS_SUPPORTED 0x80000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED 0x100000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED 0x200000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED 0x400000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_PACING_SUPPORTED 0x20000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_VF_STAT_EJECTION_SUPPORTED 0x40000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_HOST_COREDUMP_SUPPORTED 0x80000000UL + __le16 tunnel_disable_flag; + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NVGRE 0x4UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_L2GRE 0x8UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_GRE 0x10UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL + __le16 xid_partition_cap; + #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_TX_CK 0x1UL + #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL + u8 device_serial_number[8]; + __le16 ctxs_per_partition; + __le16 max_tso_segs; + __le32 roce_vf_max_av; + __le32 roce_vf_max_cq; + __le32 roce_vf_max_mrw; + __le32 roce_vf_max_qp; + __le32 roce_vf_max_srq; + __le32 roce_vf_max_gid; + __le32 flags_ext3; + #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL + #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_EXT3_RX_RATE_PROFILE_SEL_SUPPORTED 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_EXT3_BIDI_OPT_SUPPORTED 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED 0x20UL + __le16 max_roce_vfs; + __le16 max_crypto_rx_flow_filters; + u8 unused_3[3]; + u8 valid; +}; + +/* hwrm_func_qcfg_input (size:192b/24B) */ +struct hwrm_func_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_qcfg_output (size:1344b/168B) */ +struct hwrm_func_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + __le16 port_id; + __le16 vlan; + __le16 flags; + #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL + #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL + #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL + #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL + #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL + #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL + #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL + #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL + #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL + #define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL + #define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL + #define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL + #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL + #define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL + #define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV 0x4000UL + #define FUNC_QCFG_RESP_FLAGS_ROCE_VNIC_ID_VALID 0x8000UL + u8 mac_address[6]; + __le16 pci_id; + __le16 alloc_rsscos_ctx; + __le16 alloc_cmpl_rings; + __le16 alloc_tx_rings; + __le16 alloc_rx_rings; + __le16 alloc_l2_ctx; + __le16 alloc_vnics; + __le16 admin_mtu; + __le16 mru; + __le16 stat_ctx_id; + u8 port_partition_type; + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2 0x5UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN + u8 port_pf_cnt; + #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL + #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL + __le16 dflt_vnic_id; + __le16 max_mtu_configured; + __le32 min_bw; + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0 + #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL + #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 max_bw; + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0 + #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL + #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID + u8 evb_mode; + #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL + #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL + #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL + #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA + u8 options; + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0 + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT 2 + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2) + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2) + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2) + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO + #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xf0UL + #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 4 + __le16 alloc_vfs; + __le32 alloc_mcast_filters; + __le32 alloc_hw_ring_grps; + __le16 alloc_sp_tx_rings; + __le16 alloc_stat_ctx; + __le16 alloc_msix; + __le16 registered_vfs; + __le16 l2_doorbell_bar_size_kb; + u8 active_endpoints; + u8 always_1; + __le32 reset_addr_poll; + __le16 legacy_l2_db_size_kb; + __le16 svif_info; + #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL + #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0 + #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL + u8 mpc_chnls; + #define FUNC_QCFG_RESP_MPC_CHNLS_TCE_ENABLED 0x1UL + #define FUNC_QCFG_RESP_MPC_CHNLS_RCE_ENABLED 0x2UL + #define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED 0x4UL + #define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED 0x8UL + #define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED 0x10UL + u8 db_page_size; + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4KB 0x0UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_8KB 0x1UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_16KB 0x2UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_32KB 0x3UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_64KB 0x4UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_128KB 0x5UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_256KB 0x6UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_512KB 0x7UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_1MB 0x8UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_2MB 0x9UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB 0xaUL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB + __le16 roce_vnic_id; + __le32 partition_min_bw; + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0 + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE 0x10000000UL + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 + __le32 partition_max_bw; + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_SFT 0 + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE 0x10000000UL + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 + __le16 host_mtu; + __le16 flags2; + #define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL + __le16 stag_vid; + u8 port_kdnet_mode; + #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL + #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL + #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED + u8 kdnet_pcie_function; + __le16 port_kdnet_fid; + u8 unused_5; + u8 roce_bidi_opt_mode; + #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DISABLED 0x1UL + #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED 0x2UL + #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_SHARED 0x4UL + __le32 num_ktls_tx_key_ctxs; + __le32 num_ktls_rx_key_ctxs; + u8 lag_id; + u8 parif; + u8 fw_lag_id; + u8 unused_6; + __le32 num_quic_tx_key_ctxs; + __le32 num_quic_rx_key_ctxs; + __le32 roce_max_av_per_vf; + __le32 roce_max_cq_per_vf; + __le32 roce_max_mrw_per_vf; + __le32 roce_max_qp_per_vf; + __le32 roce_max_srq_per_vf; + __le32 roce_max_gid_per_vf; + __le16 xid_partition_cfg; + #define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL + #define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL + __le16 mirror_vnic_id; + u8 unused_7[7]; + u8 valid; +}; + +/* hwrm_func_cfg_input (size:1280b/160B) */ +struct hwrm_func_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 num_msix; + __le32 flags; + #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL + #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL + #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL + #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2 + #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL + #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL + #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL + #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL + #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL + #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL + #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL + #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL + #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL + #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL + #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL + #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL + #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL + #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL + #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL + #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL + #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL + #define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL + #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL + #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL + #define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL + #define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL + __le32 enables; + #define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL + #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL + #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL + #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL + #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL + #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL + #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL + #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL + #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL + #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL + #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL + #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL + #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL + #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL + #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL + #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL + #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL + #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL + #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL + #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL + #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL + #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL + #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL + #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL + #define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL + #define FUNC_CFG_REQ_ENABLES_MPC_CHNLS 0x2000000UL + #define FUNC_CFG_REQ_ENABLES_PARTITION_MIN_BW 0x4000000UL + #define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL + #define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL + #define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL + #define FUNC_CFG_REQ_ENABLES_KTLS_TX_KEY_CTXS 0x40000000UL + #define FUNC_CFG_REQ_ENABLES_KTLS_RX_KEY_CTXS 0x80000000UL + __le16 admin_mtu; + __le16 mru; + __le16 num_rsscos_ctxs; + __le16 num_cmpl_rings; + __le16 num_tx_rings; + __le16 num_rx_rings; + __le16 num_l2_ctxs; + __le16 num_vnics; + __le16 num_stat_ctxs; + __le16 num_hw_ring_grps; + u8 dflt_mac_addr[6]; + __le16 dflt_vlan; + __be32 dflt_ip_addr[4]; + __le32 min_bw; + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0 + #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL + #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 max_bw; + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0 + #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL + #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID + __le16 async_event_cr; + u8 vlan_antispoof_mode; + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN + u8 allowed_vlan_pris; + u8 evb_mode; + #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL + #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL + #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL + #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA + u8 options; + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0 + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT 2 + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2) + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2) + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2) + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO + #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL + #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4 + __le16 num_mcast_filters; + __le16 schq_id; + __le16 mpc_chnls; + #define FUNC_CFG_REQ_MPC_CHNLS_TCE_ENABLE 0x1UL + #define FUNC_CFG_REQ_MPC_CHNLS_TCE_DISABLE 0x2UL + #define FUNC_CFG_REQ_MPC_CHNLS_RCE_ENABLE 0x4UL + #define FUNC_CFG_REQ_MPC_CHNLS_RCE_DISABLE 0x8UL + #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_ENABLE 0x10UL + #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_DISABLE 0x20UL + #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_ENABLE 0x40UL + #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_DISABLE 0x80UL + #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_ENABLE 0x100UL + #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_DISABLE 0x200UL + __le32 partition_min_bw; + #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_SFT 0 + #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE 0x10000000UL + #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES + #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 + __le32 partition_max_bw; + #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_SFT 0 + #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE 0x10000000UL + #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES + #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 + __be16 tpid; + __le16 host_mtu; + __le32 flags2; + #define FUNC_CFG_REQ_FLAGS2_KTLS_KEY_CTX_ASSETS_TEST 0x1UL + #define FUNC_CFG_REQ_FLAGS2_QUIC_KEY_CTX_ASSETS_TEST 0x2UL + __le32 enables2; + #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL + #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL + #define FUNC_CFG_REQ_ENABLES2_QUIC_TX_KEY_CTXS 0x4UL + #define FUNC_CFG_REQ_ENABLES2_QUIC_RX_KEY_CTXS 0x8UL + #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF 0x10UL + #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF 0x20UL + #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF 0x40UL + #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF 0x80UL + #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF 0x100UL + #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF 0x200UL + #define FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG 0x400UL + #define FUNC_CFG_REQ_ENABLES2_PHYSICAL_SLOT_NUMBER 0x800UL + u8 port_kdnet_mode; + #define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL + #define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL + #define FUNC_CFG_REQ_PORT_KDNET_MODE_LAST FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED + u8 db_page_size; + #define FUNC_CFG_REQ_DB_PAGE_SIZE_4KB 0x0UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_8KB 0x1UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_16KB 0x2UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_32KB 0x3UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_64KB 0x4UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_128KB 0x5UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_256KB 0x6UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_512KB 0x7UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_1MB 0x8UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB + __le16 physical_slot_number; + __le32 num_ktls_tx_key_ctxs; + __le32 num_ktls_rx_key_ctxs; + __le32 num_quic_tx_key_ctxs; + __le32 num_quic_rx_key_ctxs; + __le32 roce_max_av_per_vf; + __le32 roce_max_cq_per_vf; + __le32 roce_max_mrw_per_vf; + __le32 roce_max_qp_per_vf; + __le32 roce_max_srq_per_vf; + __le32 roce_max_gid_per_vf; + __le16 xid_partition_cfg; + #define FUNC_CFG_REQ_XID_PARTITION_CFG_TX_CK 0x1UL + #define FUNC_CFG_REQ_XID_PARTITION_CFG_RX_CK 0x2UL + __le16 unused_2; +}; + +/* hwrm_func_cfg_output (size:128b/16B) */ +struct hwrm_func_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_cfg_cmd_err (size:64b/8B) */ +struct hwrm_func_cfg_cmd_err { + u8 code; + #define FUNC_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_RANGE 0x1UL + #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_MORE_THAN_MAX 0x2UL + #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_UNSUPPORTED 0x3UL + #define FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT 0x4UL + #define FUNC_CFG_CMD_ERR_CODE_LAST FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT + u8 unused_0[7]; +}; + +/* hwrm_func_qstats_input (size:192b/24B) */ +struct hwrm_func_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 flags; + #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL + #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL + #define FUNC_QSTATS_REQ_FLAGS_L2_ONLY 0x4UL + u8 unused_0[5]; +}; + +/* hwrm_func_qstats_output (size:1408b/176B) */ +struct hwrm_func_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_discard_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 rx_agg_pkts; + __le64 rx_agg_bytes; + __le64 rx_agg_events; + __le64 rx_agg_aborts; + u8 clear_seq; + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_func_qstats_ext_input (size:256b/32B) */ +struct hwrm_func_qstats_ext_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 flags; + #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL + #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL + u8 unused_0[1]; + __le32 enables; + #define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL + __le16 schq_id; + __le16 traffic_class; + u8 unused_1[4]; +}; + +/* hwrm_func_qstats_ext_output (size:1536b/192B) */ +struct hwrm_func_qstats_ext_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_error_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_error_pkts; + __le64 tx_discard_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_tpa_eligible_pkt; + __le64 rx_tpa_eligible_bytes; + __le64 rx_tpa_pkt; + __le64 rx_tpa_bytes; + __le64 rx_tpa_errors; + __le64 rx_tpa_events; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_clr_stats_input (size:192b/24B) */ +struct hwrm_func_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_clr_stats_output (size:128b/16B) */ +struct hwrm_func_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_vf_resc_free_input (size:192b/24B) */ +struct hwrm_func_vf_resc_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + u8 unused_0[6]; +}; + +/* hwrm_func_vf_resc_free_output (size:128b/16B) */ +struct hwrm_func_vf_resc_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_drv_rgtr_input (size:896b/112B) */ +struct hwrm_func_drv_rgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL + #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL + #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL + #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL + #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL + #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL + #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL + #define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL + #define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL + #define FUNC_DRV_RGTR_REQ_FLAGS_TF_EGRESS_NIC_FLOW_MODE 0x1000UL + __le32 enables; + #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL + #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL + #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL + #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL + #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL + __le16 os_type; + #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI + u8 ver_maj_8b; + u8 ver_min_8b; + u8 ver_upd_8b; + u8 unused_0[3]; + __le32 timestamp; + u8 unused_1[4]; + __le32 vf_req_fwd[8]; + __le32 async_event_fwd[8]; + __le16 ver_maj; + __le16 ver_min; + __le16 ver_upd; + __le16 ver_patch; +}; + +/* hwrm_func_drv_rgtr_output (size:128b/16B) */ +struct hwrm_func_drv_rgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED 0x1UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_func_drv_unrgtr_input (size:192b/24B) */ +struct hwrm_func_drv_unrgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL + u8 unused_0[4]; +}; + +/* hwrm_func_drv_unrgtr_output (size:128b/16B) */ +struct hwrm_func_drv_unrgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_buf_rgtr_input (size:1024b/128B) */ +struct hwrm_func_buf_rgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL + #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL + __le16 vf_id; + __le16 req_buf_num_pages; + __le16 req_buf_page_size; + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G + __le16 req_buf_len; + __le16 resp_buf_len; + u8 unused_0[2]; + __le64 req_buf_page_addr0; + __le64 req_buf_page_addr1; + __le64 req_buf_page_addr2; + __le64 req_buf_page_addr3; + __le64 req_buf_page_addr4; + __le64 req_buf_page_addr5; + __le64 req_buf_page_addr6; + __le64 req_buf_page_addr7; + __le64 req_buf_page_addr8; + __le64 req_buf_page_addr9; + __le64 error_buf_addr; + __le64 resp_buf_addr; +}; + +/* hwrm_func_buf_rgtr_output (size:128b/16B) */ +struct hwrm_func_buf_rgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_drv_qver_input (size:192b/24B) */ +struct hwrm_func_drv_qver_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 reserved; + __le16 fid; + u8 driver_type; + #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_L2 0x0UL + #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE 0x1UL + #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_LAST FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE + u8 unused_0; +}; + +/* hwrm_func_drv_qver_output (size:256b/32B) */ +struct hwrm_func_drv_qver_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 os_type; + #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL + #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL + #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL + #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI + u8 ver_maj_8b; + u8 ver_min_8b; + u8 ver_upd_8b; + u8 unused_0[3]; + __le16 ver_maj; + __le16 ver_min; + __le16 ver_upd; + __le16 ver_patch; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_func_resource_qcaps_input (size:192b/24B) */ +struct hwrm_func_resource_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_resource_qcaps_output (size:704b/88B) */ +struct hwrm_func_resource_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 max_vfs; + __le16 max_msix; + __le16 vf_reservation_strategy; + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC 0x2UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC + __le16 min_rsscos_ctx; + __le16 max_rsscos_ctx; + __le16 min_cmpl_rings; + __le16 max_cmpl_rings; + __le16 min_tx_rings; + __le16 max_tx_rings; + __le16 min_rx_rings; + __le16 max_rx_rings; + __le16 min_l2_ctxs; + __le16 max_l2_ctxs; + __le16 min_vnics; + __le16 max_vnics; + __le16 min_stat_ctx; + __le16 max_stat_ctx; + __le16 min_hw_ring_grps; + __le16 max_hw_ring_grps; + __le16 max_tx_scheduler_inputs; + __le16 flags; + #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL + __le16 min_msix; + __le32 min_ktls_tx_key_ctxs; + __le32 max_ktls_tx_key_ctxs; + __le32 min_ktls_rx_key_ctxs; + __le32 max_ktls_rx_key_ctxs; + __le32 min_quic_tx_key_ctxs; + __le32 max_quic_tx_key_ctxs; + __le32 min_quic_rx_key_ctxs; + __le32 max_quic_rx_key_ctxs; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_func_vf_resource_cfg_input (size:704b/88B) */ +struct hwrm_func_vf_resource_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + __le16 max_msix; + __le16 min_rsscos_ctx; + __le16 max_rsscos_ctx; + __le16 min_cmpl_rings; + __le16 max_cmpl_rings; + __le16 min_tx_rings; + __le16 max_tx_rings; + __le16 min_rx_rings; + __le16 max_rx_rings; + __le16 min_l2_ctxs; + __le16 max_l2_ctxs; + __le16 min_vnics; + __le16 max_vnics; + __le16 min_stat_ctx; + __le16 max_stat_ctx; + __le16 min_hw_ring_grps; + __le16 max_hw_ring_grps; + __le16 flags; + #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL + __le16 min_msix; + __le32 min_ktls_tx_key_ctxs; + __le32 max_ktls_tx_key_ctxs; + __le32 min_ktls_rx_key_ctxs; + __le32 max_ktls_rx_key_ctxs; + __le32 min_quic_tx_key_ctxs; + __le32 max_quic_tx_key_ctxs; + __le32 min_quic_rx_key_ctxs; + __le32 max_quic_rx_key_ctxs; +}; + +/* hwrm_func_vf_resource_cfg_output (size:384b/48B) */ +struct hwrm_func_vf_resource_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 reserved_rsscos_ctx; + __le16 reserved_cmpl_rings; + __le16 reserved_tx_rings; + __le16 reserved_rx_rings; + __le16 reserved_l2_ctxs; + __le16 reserved_vnics; + __le16 reserved_stat_ctx; + __le16 reserved_hw_ring_grps; + __le32 reserved_ktls_tx_key_ctxs; + __le32 reserved_ktls_rx_key_ctxs; + __le32 reserved_quic_tx_key_ctxs; + __le32 reserved_quic_rx_key_ctxs; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */ +struct hwrm_func_backing_store_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_func_backing_store_qcaps_output (size:832b/104B) */ +struct hwrm_func_backing_store_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 qp_max_entries; + __le16 qp_min_qp1_entries; + __le16 qp_max_l2_entries; + __le16 qp_entry_size; + __le16 srq_max_l2_entries; + __le32 srq_max_entries; + __le16 srq_entry_size; + __le16 cq_max_l2_entries; + __le32 cq_max_entries; + __le16 cq_entry_size; + __le16 vnic_max_vnic_entries; + __le16 vnic_max_ring_table_entries; + __le16 vnic_entry_size; + __le32 stat_max_entries; + __le16 stat_entry_size; + __le16 tqm_entry_size; + __le32 tqm_min_entries_per_ring; + __le32 tqm_max_entries_per_ring; + __le32 mrav_max_entries; + __le16 mrav_entry_size; + __le16 tim_entry_size; + __le32 tim_max_entries; + __le16 mrav_num_entries_units; + u8 tqm_entries_multiple; + u8 ctx_kind_initializer; + __le16 ctx_init_mask; + #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_QP 0x1UL + #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_SRQ 0x2UL + #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_CQ 0x4UL + #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_VNIC 0x8UL + #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_STAT 0x10UL + #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_MRAV 0x20UL + #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_TKC 0x40UL + #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_RKC 0x80UL + u8 qp_init_offset; + u8 srq_init_offset; + u8 cq_init_offset; + u8 vnic_init_offset; + u8 tqm_fp_rings_count; + u8 stat_init_offset; + u8 mrav_init_offset; + u8 tqm_fp_rings_count_ext; + u8 tkc_init_offset; + u8 rkc_init_offset; + __le16 tkc_entry_size; + __le16 rkc_entry_size; + __le32 tkc_max_entries; + __le32 rkc_max_entries; + __le16 fast_qpmd_qp_num_entries; + u8 rsvd1[5]; + u8 valid; +}; + +/* tqm_fp_ring_cfg (size:128b/16B) */ +struct tqm_fp_ring_cfg { + u8 tqm_ring_pg_size_tqm_ring_lvl; + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_MASK 0xfUL + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_SFT 0 + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_0 0x0UL + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_1 0x1UL + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2 0x2UL + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2 + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_MASK 0xf0UL + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_SFT 4 + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4) + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4) + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4) + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4) + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4) + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4) + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G + u8 unused[3]; + __le32 tqm_ring_num_entries; + __le64 tqm_ring_page_dir; +}; + +/* hwrm_func_backing_store_cfg_input (size:2688b/336B) */ +struct hwrm_func_backing_store_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL + __le32 enables; + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD 0x200000UL + u8 qpc_pg_size_qpc_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G + u8 srq_pg_size_srq_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G + u8 cq_pg_size_cq_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G + u8 vnic_pg_size_vnic_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G + u8 stat_pg_size_stat_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G + u8 tqm_sp_pg_size_tqm_sp_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G + u8 tqm_ring0_pg_size_tqm_ring0_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G + u8 tqm_ring1_pg_size_tqm_ring1_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G + u8 tqm_ring2_pg_size_tqm_ring2_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G + u8 tqm_ring3_pg_size_tqm_ring3_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G + u8 tqm_ring4_pg_size_tqm_ring4_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G + u8 tqm_ring5_pg_size_tqm_ring5_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G + u8 tqm_ring6_pg_size_tqm_ring6_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G + u8 tqm_ring7_pg_size_tqm_ring7_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G + u8 mrav_pg_size_mrav_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G + u8 tim_pg_size_tim_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G + __le64 qpc_page_dir; + __le64 srq_page_dir; + __le64 cq_page_dir; + __le64 vnic_page_dir; + __le64 stat_page_dir; + __le64 tqm_sp_page_dir; + __le64 tqm_ring0_page_dir; + __le64 tqm_ring1_page_dir; + __le64 tqm_ring2_page_dir; + __le64 tqm_ring3_page_dir; + __le64 tqm_ring4_page_dir; + __le64 tqm_ring5_page_dir; + __le64 tqm_ring6_page_dir; + __le64 tqm_ring7_page_dir; + __le64 mrav_page_dir; + __le64 tim_page_dir; + __le32 qp_num_entries; + __le32 srq_num_entries; + __le32 cq_num_entries; + __le32 stat_num_entries; + __le32 tqm_sp_num_entries; + __le32 tqm_ring0_num_entries; + __le32 tqm_ring1_num_entries; + __le32 tqm_ring2_num_entries; + __le32 tqm_ring3_num_entries; + __le32 tqm_ring4_num_entries; + __le32 tqm_ring5_num_entries; + __le32 tqm_ring6_num_entries; + __le32 tqm_ring7_num_entries; + __le32 mrav_num_entries; + __le32 tim_num_entries; + __le16 qp_num_qp1_entries; + __le16 qp_num_l2_entries; + __le16 qp_entry_size; + __le16 srq_num_l2_entries; + __le16 srq_entry_size; + __le16 cq_num_l2_entries; + __le16 cq_entry_size; + __le16 vnic_num_vnic_entries; + __le16 vnic_num_ring_table_entries; + __le16 vnic_entry_size; + __le16 stat_entry_size; + __le16 tqm_entry_size; + __le16 mrav_entry_size; + __le16 tim_entry_size; + u8 tqm_ring8_pg_size_tqm_ring_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G + u8 ring8_unused[3]; + __le32 tqm_ring8_num_entries; + __le64 tqm_ring8_page_dir; + u8 tqm_ring9_pg_size_tqm_ring_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G + u8 ring9_unused[3]; + __le32 tqm_ring9_num_entries; + __le64 tqm_ring9_page_dir; + u8 tqm_ring10_pg_size_tqm_ring_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G + u8 ring10_unused[3]; + __le32 tqm_ring10_num_entries; + __le64 tqm_ring10_page_dir; + __le32 tkc_num_entries; + __le32 rkc_num_entries; + __le64 tkc_page_dir; + __le64 rkc_page_dir; + __le16 tkc_entry_size; + __le16 rkc_entry_size; + u8 tkc_pg_size_tkc_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G + u8 rkc_pg_size_rkc_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G + __le16 qp_num_fast_qpmd_entries; +}; + +/* hwrm_func_backing_store_cfg_output (size:128b/16B) */ +struct hwrm_func_backing_store_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_error_recovery_qcfg_input (size:192b/24B) */ +struct hwrm_error_recovery_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 unused_0[8]; +}; + +/* hwrm_error_recovery_qcfg_output (size:1664b/208B) */ +struct hwrm_error_recovery_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU 0x2UL + __le32 driver_polling_freq; + __le32 master_func_wait_period; + __le32 normal_func_wait_period; + __le32 master_func_wait_period_after_reset; + __le32 max_bailout_time_after_reset; + __le32 fw_health_status_reg; + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SFT 2 + __le32 fw_heartbeat_reg; + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SFT 2 + __le32 fw_reset_cnt_reg; + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SFT 2 + __le32 reset_inprogress_reg; + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SFT 2 + __le32 reset_inprogress_reg_mask; + u8 unused_0[3]; + u8 reg_array_cnt; + __le32 reset_reg[16]; + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2 + __le32 reset_reg_val[16]; + u8 delay_after_reset[16]; + __le32 err_recovery_cnt_reg; + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SFT 2 + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_func_echo_response_input (size:192b/24B) */ +struct hwrm_func_echo_response_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 event_data1; + __le32 event_data2; +}; + +/* hwrm_func_echo_response_output (size:128b/16B) */ +struct hwrm_func_echo_response_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_ptp_pin_qcfg_input (size:192b/24B) */ +struct hwrm_func_ptp_pin_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 unused_0[8]; +}; + +/* hwrm_func_ptp_pin_qcfg_output (size:128b/16B) */ +struct hwrm_func_ptp_pin_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_pins; + u8 state; + #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN0_ENABLED 0x1UL + #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN1_ENABLED 0x2UL + #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN2_ENABLED 0x4UL + #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN3_ENABLED 0x8UL + u8 pin0_usage; + #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT + u8 pin1_usage; + #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT + u8 pin2_usage; + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT + u8 pin3_usage; + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT + u8 unused_0; + u8 valid; +}; + +/* hwrm_func_ptp_pin_cfg_input (size:256b/32B) */ +struct hwrm_func_ptp_pin_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE 0x2UL + #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_STATE 0x4UL + #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_USAGE 0x8UL + #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_STATE 0x10UL + #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_USAGE 0x20UL + #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_STATE 0x40UL + #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_USAGE 0x80UL + u8 pin0_state; + #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_DISABLED 0x0UL + #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED + u8 pin0_usage; + #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT + u8 pin1_state; + #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_DISABLED 0x0UL + #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED + u8 pin1_usage; + #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT + u8 pin2_state; + #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_DISABLED 0x0UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED + u8 pin2_usage; + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT + u8 pin3_state; + #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_DISABLED 0x0UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED + u8 pin3_usage; + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT + u8 unused_0[4]; +}; + +/* hwrm_func_ptp_pin_cfg_output (size:128b/16B) */ +struct hwrm_func_ptp_pin_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_ptp_cfg_input (size:384b/48B) */ +struct hwrm_func_ptp_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 enables; + #define FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT 0x1UL + #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_SOURCE 0x2UL + #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_PHASE 0x4UL + #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD 0x8UL + #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP 0x10UL + #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE 0x20UL + #define FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME 0x40UL + u8 ptp_pps_event; + #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_INTERNAL 0x1UL + #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_EXTERNAL 0x2UL + u8 ptp_freq_adj_dll_source; + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_NONE 0x0UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_0 0x1UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_1 0x2UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_2 0x3UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_3 0x4UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_0 0x5UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_1 0x6UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_2 0x7UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_3 0x8UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID 0xffUL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID + u8 ptp_freq_adj_dll_phase; + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_NONE 0x0UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_4K 0x1UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_8K 0x2UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M 0x3UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M 0x4UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M + u8 unused_0[3]; + __le32 ptp_freq_adj_ext_period; + __le32 ptp_freq_adj_ext_up; + __le32 ptp_freq_adj_ext_phase_lower; + __le32 ptp_freq_adj_ext_phase_upper; + __le64 ptp_set_time; +}; + +/* hwrm_func_ptp_cfg_output (size:128b/16B) */ +struct hwrm_func_ptp_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_ptp_ts_query_input (size:192b/24B) */ +struct hwrm_func_ptp_ts_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PPS_TIME 0x1UL + #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PTM_TIME 0x2UL + u8 unused_0[4]; +}; + +/* hwrm_func_ptp_ts_query_output (size:320b/40B) */ +struct hwrm_func_ptp_ts_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 pps_event_ts; + __le64 ptm_local_ts; + __le64 ptm_system_ts; + __le32 ptm_link_delay; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_func_ptp_ext_cfg_input (size:256b/32B) */ +struct hwrm_func_ptp_ext_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 enables; + #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_MASTER_FID 0x1UL + #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_FID 0x2UL + #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_MODE 0x4UL + #define FUNC_PTP_EXT_CFG_REQ_ENABLES_FAILOVER_TIMER 0x8UL + __le16 phc_master_fid; + __le16 phc_sec_fid; + u8 phc_sec_mode; + #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_SWITCH 0x0UL + #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_ALL 0x1UL + #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY 0x2UL + #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_LAST FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY + u8 unused_0; + __le32 failover_timer; + u8 unused_1[4]; +}; + +/* hwrm_func_ptp_ext_cfg_output (size:128b/16B) */ +struct hwrm_func_ptp_ext_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_ptp_ext_qcfg_input (size:192b/24B) */ +struct hwrm_func_ptp_ext_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 unused_0[8]; +}; + +/* hwrm_func_ptp_ext_qcfg_output (size:256b/32B) */ +struct hwrm_func_ptp_ext_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 phc_master_fid; + __le16 phc_sec_fid; + __le16 phc_active_fid0; + __le16 phc_active_fid1; + __le32 last_failover_event; + __le16 from_fid; + __le16 to_fid; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_backing_store_cfg_v2_input (size:512b/64B) */ +struct hwrm_func_backing_store_cfg_v2_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 type; + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA0_TRACE 0x26UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA1_TRACE 0x27UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA2_TRACE 0x28UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID + __le16 instance; + __le32 flags; + #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE 0x2UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_EXTEND 0x4UL + __le64 page_dir; + __le32 num_entries; + __le16 entry_size; + u8 page_size_pbl_level; + #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_SFT 0 + #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2 + #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G + u8 subtype_valid_cnt; + __le32 split_entry_0; + __le32 split_entry_1; + __le32 split_entry_2; + __le32 split_entry_3; + __le32 enables; + #define FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET 0x1UL + __le32 next_bs_offset; +}; + +/* hwrm_func_backing_store_cfg_v2_output (size:128b/16B) */ +struct hwrm_func_backing_store_cfg_v2_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 rsvd0[7]; + u8 valid; +}; + +/* hwrm_func_backing_store_qcfg_v2_input (size:192b/24B) */ +struct hwrm_func_backing_store_qcfg_v2_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 type; + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RX_CK 0x14UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION_TABLE 0x1dUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA0_TRACE 0x26UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA1_TRACE 0x27UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA2_TRACE 0x28UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID + __le16 instance; + u8 rsvd[4]; +}; + +/* hwrm_func_backing_store_qcfg_v2_output (size:448b/56B) */ +struct hwrm_func_backing_store_qcfg_v2_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 type; + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA0_TRACE 0x26UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA1_TRACE 0x27UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA2_TRACE 0x28UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP1_TRACE 0x29UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID + __le16 instance; + __le32 flags; + __le64 page_dir; + __le32 num_entries; + u8 page_size_pbl_level; + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G + u8 subtype_valid_cnt; + u8 rsvd[2]; + __le32 split_entry_0; + __le32 split_entry_1; + __le32 split_entry_2; + __le32 split_entry_3; + u8 rsvd2[7]; + u8 valid; +}; + +/* qpc_split_entries (size:128b/16B) */ +struct qpc_split_entries { + __le32 qp_num_l2_entries; + __le32 qp_num_qp1_entries; + __le32 qp_num_fast_qpmd_entries; + __le32 rsvd; +}; + +/* srq_split_entries (size:128b/16B) */ +struct srq_split_entries { + __le32 srq_num_l2_entries; + __le32 rsvd; + __le32 rsvd2[2]; +}; + +/* cq_split_entries (size:128b/16B) */ +struct cq_split_entries { + __le32 cq_num_l2_entries; + __le32 rsvd; + __le32 rsvd2[2]; +}; + +/* vnic_split_entries (size:128b/16B) */ +struct vnic_split_entries { + __le32 vnic_num_vnic_entries; + __le32 rsvd; + __le32 rsvd2[2]; +}; + +/* mrav_split_entries (size:128b/16B) */ +struct mrav_split_entries { + __le32 mrav_num_av_entries; + __le32 rsvd; + __le32 rsvd2[2]; +}; + +/* ts_split_entries (size:128b/16B) */ +struct ts_split_entries { + __le32 region_num_entries; + u8 tsid; + u8 lkup_static_bkt_cnt_exp[2]; + u8 locked; + __le32 rsvd2[2]; +}; + +/* ck_split_entries (size:128b/16B) */ +struct ck_split_entries { + __le32 num_quic_entries; + __le32 rsvd; + __le32 rsvd2[2]; +}; + +/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */ +struct hwrm_func_backing_store_qcaps_v2_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 type; + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA0_TRACE 0x26UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE 0x27UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE 0x28UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE 0x29UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID + u8 rsvd[6]; +}; + +/* hwrm_func_backing_store_qcaps_v2_output (size:448b/56B) */ +struct hwrm_func_backing_store_qcaps_v2_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 type; + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA0_TRACE 0x26UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA1_TRACE 0x27UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA2_TRACE 0x28UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP1_TRACE 0x29UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID + __le16 entry_size; + __le32 flags; + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE 0x10UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_BIN_DBG_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_NEXT_BS_OFFSET 0x40UL + __le32 instance_bit_map; + u8 ctx_init_value; + u8 ctx_init_offset; + u8 entry_multiple; + u8 rsvd; + __le32 max_num_entries; + __le32 min_num_entries; + __le16 next_valid_type; + u8 subtype_valid_cnt; + u8 exact_cnt_bit_map; + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_0_EXACT 0x1UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_1_EXACT 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_2_EXACT 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_3_EXACT 0x8UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_SFT 4 + __le32 split_entry_0; + __le32 split_entry_1; + __le32 split_entry_2; + __le32 split_entry_3; + __le16 max_instance_count; + u8 rsvd3; + u8 valid; +}; + +/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */ +struct hwrm_func_dbr_pacing_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */ +struct hwrm_func_dbr_pacing_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL + u8 unused_0[7]; + __le32 dbr_stat_db_fifo_reg; + #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0 + #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 + #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2 + __le32 dbr_stat_db_fifo_reg_watermark_mask; + u8 dbr_stat_db_fifo_reg_watermark_shift; + u8 unused_1[3]; + __le32 dbr_stat_db_fifo_reg_fifo_room_mask; + u8 dbr_stat_db_fifo_reg_fifo_room_shift; + u8 unused_2[3]; + __le32 dbr_throttling_aeq_arm_reg; + #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0 + #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 + #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2 + u8 dbr_throttling_aeq_arm_reg_val; + u8 unused_3[3]; + __le32 dbr_stat_db_max_fifo_depth; + __le32 primary_nq_id; + __le32 pacing_threshold; + u8 unused_4[7]; + u8 valid; +}; + +/* hwrm_func_drv_if_change_input (size:192b/24B) */ +struct hwrm_func_drv_if_change_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP 0x1UL + __le32 unused; +}; + +/* hwrm_func_drv_if_change_output (size:128b/16B) */ +struct hwrm_func_drv_if_change_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL + #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL + #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE 0x4UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_port_phy_cfg_input (size:512b/64B) */ +struct hwrm_port_phy_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL + #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL + #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL + #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL + #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE 0x20000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE 0x40000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE 0x80000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE 0x100000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE 0x200000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE 0x400000UL + __le32 enables; + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL + #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL + #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL + #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL + #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL + #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL + #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL + #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL + #define PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2 0x2000UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK 0x4000UL + __le16 port_id; + __le16 force_link_speed; + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB + u8 auto_mode; + #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK + u8 auto_duplex; + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH + u8 auto_pause; + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL + u8 mgmt_flag; + #define PORT_PHY_CFG_REQ_MGMT_FLAG_LINK_RELEASE 0x1UL + #define PORT_PHY_CFG_REQ_MGMT_FLAG_MGMT_VALID 0x80UL + __le16 auto_link_speed; + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB + __le16 auto_link_speed_mask; + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL + u8 wirespeed; + #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL + #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL + #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON + u8 lpbk; + #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL + #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL + #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL + #define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL + #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_EXTERNAL + u8 force_pause; + #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL + #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL + u8 unused_1; + __le32 preemphasis; + __le16 eee_link_speed_mask; + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL + __le16 force_pam4_link_speed; + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB + __le32 tx_lpi_timer; + #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL + #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0 + __le16 auto_link_pam4_speed_mask; + #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL + __le16 force_link_speeds2; + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_1GB 0xaUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB 0x64UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB 0xfaUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB 0x190UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB 0x1f4UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB 0x3e8UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 + __le16 auto_link_speeds2_mask; + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_25GB 0x4UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_40GB 0x8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB 0x10UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB 0x20UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB_PAM4_56 0x40UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_56 0x80UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_56 0x100UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_56 0x200UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL + u8 unused_2[6]; +}; + +/* hwrm_port_phy_cfg_output (size:128b/16B) */ +struct hwrm_port_phy_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */ +struct hwrm_port_phy_cfg_cmd_err { + u8 code; + #define PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED 0x1UL + #define PORT_PHY_CFG_CMD_ERR_CODE_RETRY 0x2UL + #define PORT_PHY_CFG_CMD_ERR_CODE_LAST PORT_PHY_CFG_CMD_ERR_CODE_RETRY + u8 unused_0[7]; +}; + +/* hwrm_port_phy_qcfg_input (size:192b/24B) */ +struct hwrm_port_phy_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_phy_qcfg_output (size:832b/104B) */ +struct hwrm_port_phy_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 link; + #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL + #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK + u8 active_fec_signal_mode; + #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK 0xfUL + #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0 + #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL + #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL + #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 0x2UL + #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4 + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4) + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE (0x1UL << 4) + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE (0x2UL << 4) + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE (0x3UL << 4) + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE (0x4UL << 4) + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE (0x5UL << 4) + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE (0x6UL << 4) + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_LAST PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE + __le16 link_speed; + #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_800GB 0x1f40UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB + u8 duplex_cfg; + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL + u8 pause; + #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL + __le16 support_speeds; + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL + __le16 force_link_speed; + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB + u8 auto_mode; + #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK + u8 auto_pause; + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL + __le16 auto_link_speed; + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB + __le16 auto_link_speed_mask; + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL + u8 wirespeed; + #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL + #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL + #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON + u8 lpbk; + #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL + #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL + #define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL + #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_EXTERNAL + u8 force_pause; + #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL + u8 module_status; + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_OVERHEATED 0x6UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE + __le32 preemphasis; + u8 phy_maj; + u8 phy_min; + u8 phy_bld; + u8 phy_type; + #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4 0x1cUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR 0x20UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR 0x21UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR 0x22UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER 0x23UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2 0x24UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR 0x28UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR 0x29UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR 0x2aUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER 0x2bUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2 0x2cUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2 0x2dUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2 0x2eUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2 0x2fUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8 0x30UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8 0x31UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8 0x32UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8 0x33UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4 0x34UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASECR8 0x38UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASESR8 0x39UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASELR8 0x3aUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 + u8 media_type; + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE 0x4UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE + u8 xcvr_pkg_type; + #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL + #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL + #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL + u8 eee_config_phy_addr; + #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL + #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0 + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5 + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL + u8 parallel_detect; + #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL + __le16 link_partner_adv_speeds; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL + u8 link_partner_adv_auto_mode; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK + u8 link_partner_adv_pause; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL + __le16 adv_eee_link_speed_mask; + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL + __le16 link_partner_adv_eee_link_speed_mask; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL + __le32 xcvr_identifier_type_tx_lpi_timer; + #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL + #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0 + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24 + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPDD (0x18UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP112 (0x1eUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFPDD (0x1fUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP (0x20UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP + __le16 fec_cfg; + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_SUPPORTED 0x200UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED 0x400UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED 0x800UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED 0x1000UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED 0x2000UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED 0x4000UL + u8 duplex_state; + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL + u8 option_flags; + #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL + #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL + #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SPEEDS2_SUPPORTED 0x4UL + char phy_vendor_name[16]; + char phy_vendor_partnumber[16]; + __le16 support_pam4_speeds; + #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL + #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL + #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL + __le16 force_pam4_link_speed; + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB + __le16 auto_pam4_link_speed_mask; + #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL + u8 link_partner_pam4_adv_speeds; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL + u8 link_down_reason; + #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION 0x2UL + __le16 support_speeds2; + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB 0x1UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB 0x2UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB 0x4UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB 0x8UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB 0x10UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB 0x20UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56 0x40UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56 0x80UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56 0x100UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56 0x200UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112 0x400UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112 0x800UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112 0x1000UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_800GB_PAM4_112 0x2000UL + __le16 force_link_speeds2; + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_1GB 0xaUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_10GB 0x64UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_25GB 0xfaUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_40GB 0x190UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112 + __le16 auto_link_speeds2; + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_1GB 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_10GB 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_25GB 0x4UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_40GB 0x8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB 0x10UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB 0x20UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB_PAM4_56 0x40UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_56 0x80UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_56 0x100UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_56 0x200UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_112 0x400UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_112 0x800UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_112 0x1000UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_800GB_PAM4_112 0x2000UL + u8 active_lanes; + u8 valid; +}; + +/* hwrm_port_mac_cfg_input (size:448b/56B) */ +struct hwrm_port_mac_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL + #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL + #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL + #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL + #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL + #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL + #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL + #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL + #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL + #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE 0x4000UL + #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE 0x8000UL + __le32 enables; + #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL + #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL + #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL + #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL + #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL + #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL + #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL + #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL + #define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL + #define PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE 0x400UL + #define PORT_MAC_CFG_REQ_ENABLES_PTP_LOAD_CONTROL 0x800UL + __le16 port_id; + u8 ipg; + u8 lpbk; + #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL + #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL + #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL + #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE + u8 vlan_pri2cos_map_pri; + u8 reserved1; + u8 tunnel_pri2cos_map_pri; + u8 dscp2pri_map_pri; + __le16 rx_ts_capture_ptp_msg_type; + __le16 tx_ts_capture_ptp_msg_type; + u8 cos_field_cfg; + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1 + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3 + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5 + u8 unused_0[3]; + __le32 ptp_freq_adj_ppb; + u8 unused_1[3]; + u8 ptp_load_control; + #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_NONE 0x0UL + #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_IMMEDIATE 0x1UL + #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT 0x2UL + #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_LAST PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT + __le64 ptp_adj_phase; +}; + +/* hwrm_port_mac_cfg_output (size:128b/16B) */ +struct hwrm_port_mac_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mru; + __le16 mtu; + u8 ipg; + u8 lpbk; + #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL + #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL + #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL + #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE + u8 unused_0; + u8 valid; +}; + +/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */ +struct hwrm_port_mac_ptp_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_mac_ptp_qcfg_output (size:704b/88B) */ +struct hwrm_port_mac_ptp_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED 0x20UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME 0x40UL + u8 unused_0[3]; + __le32 rx_ts_reg_off_lower; + __le32 rx_ts_reg_off_upper; + __le32 rx_ts_reg_off_seq_id; + __le32 rx_ts_reg_off_src_id_0; + __le32 rx_ts_reg_off_src_id_1; + __le32 rx_ts_reg_off_src_id_2; + __le32 rx_ts_reg_off_domain_id; + __le32 rx_ts_reg_off_fifo; + __le32 rx_ts_reg_off_fifo_adv; + __le32 rx_ts_reg_off_granularity; + __le32 tx_ts_reg_off_lower; + __le32 tx_ts_reg_off_upper; + __le32 tx_ts_reg_off_seq_id; + __le32 tx_ts_reg_off_fifo; + __le32 tx_ts_reg_off_granularity; + __le32 ts_ref_clock_reg_lower; + __le32 ts_ref_clock_reg_upper; + u8 unused_1[7]; + u8 valid; +}; + +/* tx_port_stats (size:3264b/408B) */ +struct tx_port_stats { + __le64 tx_64b_frames; + __le64 tx_65b_127b_frames; + __le64 tx_128b_255b_frames; + __le64 tx_256b_511b_frames; + __le64 tx_512b_1023b_frames; + __le64 tx_1024b_1518b_frames; + __le64 tx_good_vlan_frames; + __le64 tx_1519b_2047b_frames; + __le64 tx_2048b_4095b_frames; + __le64 tx_4096b_9216b_frames; + __le64 tx_9217b_16383b_frames; + __le64 tx_good_frames; + __le64 tx_total_frames; + __le64 tx_ucast_frames; + __le64 tx_mcast_frames; + __le64 tx_bcast_frames; + __le64 tx_pause_frames; + __le64 tx_pfc_frames; + __le64 tx_jabber_frames; + __le64 tx_fcs_err_frames; + __le64 tx_control_frames; + __le64 tx_oversz_frames; + __le64 tx_single_dfrl_frames; + __le64 tx_multi_dfrl_frames; + __le64 tx_single_coll_frames; + __le64 tx_multi_coll_frames; + __le64 tx_late_coll_frames; + __le64 tx_excessive_coll_frames; + __le64 tx_frag_frames; + __le64 tx_err; + __le64 tx_tagged_frames; + __le64 tx_dbl_tagged_frames; + __le64 tx_runt_frames; + __le64 tx_fifo_underruns; + __le64 tx_pfc_ena_frames_pri0; + __le64 tx_pfc_ena_frames_pri1; + __le64 tx_pfc_ena_frames_pri2; + __le64 tx_pfc_ena_frames_pri3; + __le64 tx_pfc_ena_frames_pri4; + __le64 tx_pfc_ena_frames_pri5; + __le64 tx_pfc_ena_frames_pri6; + __le64 tx_pfc_ena_frames_pri7; + __le64 tx_eee_lpi_events; + __le64 tx_eee_lpi_duration; + __le64 tx_llfc_logical_msgs; + __le64 tx_hcfc_msgs; + __le64 tx_total_collisions; + __le64 tx_bytes; + __le64 tx_xthol_frames; + __le64 tx_stat_discard; + __le64 tx_stat_error; +}; + +/* rx_port_stats (size:4224b/528B) */ +struct rx_port_stats { + __le64 rx_64b_frames; + __le64 rx_65b_127b_frames; + __le64 rx_128b_255b_frames; + __le64 rx_256b_511b_frames; + __le64 rx_512b_1023b_frames; + __le64 rx_1024b_1518b_frames; + __le64 rx_good_vlan_frames; + __le64 rx_1519b_2047b_frames; + __le64 rx_2048b_4095b_frames; + __le64 rx_4096b_9216b_frames; + __le64 rx_9217b_16383b_frames; + __le64 rx_total_frames; + __le64 rx_ucast_frames; + __le64 rx_mcast_frames; + __le64 rx_bcast_frames; + __le64 rx_fcs_err_frames; + __le64 rx_ctrl_frames; + __le64 rx_pause_frames; + __le64 rx_pfc_frames; + __le64 rx_unsupported_opcode_frames; + __le64 rx_unsupported_da_pausepfc_frames; + __le64 rx_wrong_sa_frames; + __le64 rx_align_err_frames; + __le64 rx_oor_len_frames; + __le64 rx_code_err_frames; + __le64 rx_false_carrier_frames; + __le64 rx_ovrsz_frames; + __le64 rx_jbr_frames; + __le64 rx_mtu_err_frames; + __le64 rx_match_crc_frames; + __le64 rx_promiscuous_frames; + __le64 rx_tagged_frames; + __le64 rx_double_tagged_frames; + __le64 rx_trunc_frames; + __le64 rx_good_frames; + __le64 rx_pfc_xon2xoff_frames_pri0; + __le64 rx_pfc_xon2xoff_frames_pri1; + __le64 rx_pfc_xon2xoff_frames_pri2; + __le64 rx_pfc_xon2xoff_frames_pri3; + __le64 rx_pfc_xon2xoff_frames_pri4; + __le64 rx_pfc_xon2xoff_frames_pri5; + __le64 rx_pfc_xon2xoff_frames_pri6; + __le64 rx_pfc_xon2xoff_frames_pri7; + __le64 rx_pfc_ena_frames_pri0; + __le64 rx_pfc_ena_frames_pri1; + __le64 rx_pfc_ena_frames_pri2; + __le64 rx_pfc_ena_frames_pri3; + __le64 rx_pfc_ena_frames_pri4; + __le64 rx_pfc_ena_frames_pri5; + __le64 rx_pfc_ena_frames_pri6; + __le64 rx_pfc_ena_frames_pri7; + __le64 rx_sch_crc_err_frames; + __le64 rx_undrsz_frames; + __le64 rx_frag_frames; + __le64 rx_eee_lpi_events; + __le64 rx_eee_lpi_duration; + __le64 rx_llfc_physical_msgs; + __le64 rx_llfc_logical_msgs; + __le64 rx_llfc_msgs_with_crc_err; + __le64 rx_hcfc_msgs; + __le64 rx_hcfc_msgs_with_crc_err; + __le64 rx_bytes; + __le64 rx_runt_bytes; + __le64 rx_runt_frames; + __le64 rx_stat_discard; + __le64 rx_stat_err; +}; + +/* hwrm_port_qstats_input (size:320b/40B) */ +struct hwrm_port_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 flags; + #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL + u8 unused_0[5]; + __le64 tx_stat_host_addr; + __le64 rx_stat_host_addr; +}; + +/* hwrm_port_qstats_output (size:128b/16B) */ +struct hwrm_port_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tx_stat_size; + __le16 rx_stat_size; + u8 flags; + #define PORT_QSTATS_RESP_FLAGS_CLEARED 0x1UL + u8 unused_0[2]; + u8 valid; +}; + +/* tx_port_stats_ext (size:2048b/256B) */ +struct tx_port_stats_ext { + __le64 tx_bytes_cos0; + __le64 tx_bytes_cos1; + __le64 tx_bytes_cos2; + __le64 tx_bytes_cos3; + __le64 tx_bytes_cos4; + __le64 tx_bytes_cos5; + __le64 tx_bytes_cos6; + __le64 tx_bytes_cos7; + __le64 tx_packets_cos0; + __le64 tx_packets_cos1; + __le64 tx_packets_cos2; + __le64 tx_packets_cos3; + __le64 tx_packets_cos4; + __le64 tx_packets_cos5; + __le64 tx_packets_cos6; + __le64 tx_packets_cos7; + __le64 pfc_pri0_tx_duration_us; + __le64 pfc_pri0_tx_transitions; + __le64 pfc_pri1_tx_duration_us; + __le64 pfc_pri1_tx_transitions; + __le64 pfc_pri2_tx_duration_us; + __le64 pfc_pri2_tx_transitions; + __le64 pfc_pri3_tx_duration_us; + __le64 pfc_pri3_tx_transitions; + __le64 pfc_pri4_tx_duration_us; + __le64 pfc_pri4_tx_transitions; + __le64 pfc_pri5_tx_duration_us; + __le64 pfc_pri5_tx_transitions; + __le64 pfc_pri6_tx_duration_us; + __le64 pfc_pri6_tx_transitions; + __le64 pfc_pri7_tx_duration_us; + __le64 pfc_pri7_tx_transitions; +}; + +/* rx_port_stats_ext (size:3904b/488B) */ +struct rx_port_stats_ext { + __le64 link_down_events; + __le64 continuous_pause_events; + __le64 resume_pause_events; + __le64 continuous_roce_pause_events; + __le64 resume_roce_pause_events; + __le64 rx_bytes_cos0; + __le64 rx_bytes_cos1; + __le64 rx_bytes_cos2; + __le64 rx_bytes_cos3; + __le64 rx_bytes_cos4; + __le64 rx_bytes_cos5; + __le64 rx_bytes_cos6; + __le64 rx_bytes_cos7; + __le64 rx_packets_cos0; + __le64 rx_packets_cos1; + __le64 rx_packets_cos2; + __le64 rx_packets_cos3; + __le64 rx_packets_cos4; + __le64 rx_packets_cos5; + __le64 rx_packets_cos6; + __le64 rx_packets_cos7; + __le64 pfc_pri0_rx_duration_us; + __le64 pfc_pri0_rx_transitions; + __le64 pfc_pri1_rx_duration_us; + __le64 pfc_pri1_rx_transitions; + __le64 pfc_pri2_rx_duration_us; + __le64 pfc_pri2_rx_transitions; + __le64 pfc_pri3_rx_duration_us; + __le64 pfc_pri3_rx_transitions; + __le64 pfc_pri4_rx_duration_us; + __le64 pfc_pri4_rx_transitions; + __le64 pfc_pri5_rx_duration_us; + __le64 pfc_pri5_rx_transitions; + __le64 pfc_pri6_rx_duration_us; + __le64 pfc_pri6_rx_transitions; + __le64 pfc_pri7_rx_duration_us; + __le64 pfc_pri7_rx_transitions; + __le64 rx_bits; + __le64 rx_buffer_passed_threshold; + __le64 rx_pcs_symbol_err; + __le64 rx_corrected_bits; + __le64 rx_discard_bytes_cos0; + __le64 rx_discard_bytes_cos1; + __le64 rx_discard_bytes_cos2; + __le64 rx_discard_bytes_cos3; + __le64 rx_discard_bytes_cos4; + __le64 rx_discard_bytes_cos5; + __le64 rx_discard_bytes_cos6; + __le64 rx_discard_bytes_cos7; + __le64 rx_discard_packets_cos0; + __le64 rx_discard_packets_cos1; + __le64 rx_discard_packets_cos2; + __le64 rx_discard_packets_cos3; + __le64 rx_discard_packets_cos4; + __le64 rx_discard_packets_cos5; + __le64 rx_discard_packets_cos6; + __le64 rx_discard_packets_cos7; + __le64 rx_fec_corrected_blocks; + __le64 rx_fec_uncorrectable_blocks; + __le64 rx_filter_miss; + __le64 rx_fec_symbol_err; +}; + +/* hwrm_port_qstats_ext_input (size:320b/40B) */ +struct hwrm_port_qstats_ext_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 tx_stat_size; + __le16 rx_stat_size; + u8 flags; + #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL + u8 unused_0; + __le64 tx_stat_host_addr; + __le64 rx_stat_host_addr; +}; + +/* hwrm_port_qstats_ext_output (size:128b/16B) */ +struct hwrm_port_qstats_ext_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tx_stat_size; + __le16 rx_stat_size; + __le16 total_active_cos_queues; + u8 flags; + #define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL + #define PORT_QSTATS_EXT_RESP_FLAGS_CLEARED 0x2UL + u8 valid; +}; + +/* hwrm_port_lpbk_qstats_input (size:256b/32B) */ +struct hwrm_port_lpbk_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 lpbk_stat_size; + u8 flags; + #define PORT_LPBK_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL + u8 unused_0[5]; + __le64 lpbk_stat_host_addr; +}; + +/* hwrm_port_lpbk_qstats_output (size:128b/16B) */ +struct hwrm_port_lpbk_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 lpbk_stat_size; + u8 unused_0[5]; + u8 valid; +}; + +/* port_lpbk_stats (size:640b/80B) */ +struct port_lpbk_stats { + __le64 lpbk_ucast_frames; + __le64 lpbk_mcast_frames; + __le64 lpbk_bcast_frames; + __le64 lpbk_ucast_bytes; + __le64 lpbk_mcast_bytes; + __le64 lpbk_bcast_bytes; + __le64 lpbk_tx_discards; + __le64 lpbk_tx_errors; + __le64 lpbk_rx_discards; + __le64 lpbk_rx_errors; +}; + +/* hwrm_port_ecn_qstats_input (size:256b/32B) */ +struct hwrm_port_ecn_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 ecn_stat_buf_size; + u8 flags; + #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL + u8 unused_0[3]; + __le64 ecn_stat_host_addr; +}; + +/* hwrm_port_ecn_qstats_output (size:128b/16B) */ +struct hwrm_port_ecn_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 ecn_stat_buf_size; + u8 mark_en; + u8 unused_0[4]; + u8 valid; +}; + +/* port_stats_ecn (size:512b/64B) */ +struct port_stats_ecn { + __le64 mark_cnt_cos0; + __le64 mark_cnt_cos1; + __le64 mark_cnt_cos2; + __le64 mark_cnt_cos3; + __le64 mark_cnt_cos4; + __le64 mark_cnt_cos5; + __le64 mark_cnt_cos6; + __le64 mark_cnt_cos7; +}; + +/* hwrm_port_clr_stats_input (size:192b/24B) */ +struct hwrm_port_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 flags; + #define PORT_CLR_STATS_REQ_FLAGS_ROCE_COUNTERS 0x1UL + u8 unused_0[5]; +}; + +/* hwrm_port_clr_stats_output (size:128b/16B) */ +struct hwrm_port_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */ +struct hwrm_port_lpbk_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */ +struct hwrm_port_lpbk_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_ts_query_input (size:320b/40B) */ +struct hwrm_port_ts_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_TS_QUERY_REQ_FLAGS_PATH 0x1UL + #define PORT_TS_QUERY_REQ_FLAGS_PATH_TX 0x0UL + #define PORT_TS_QUERY_REQ_FLAGS_PATH_RX 0x1UL + #define PORT_TS_QUERY_REQ_FLAGS_PATH_LAST PORT_TS_QUERY_REQ_FLAGS_PATH_RX + #define PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME 0x2UL + __le16 port_id; + u8 unused_0[2]; + __le16 enables; + #define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT 0x1UL + #define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID 0x2UL + #define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET 0x4UL + __le16 ts_req_timeout; + __le32 ptp_seq_id; + __le16 ptp_hdr_offset; + u8 unused_1[6]; +}; + +/* hwrm_port_ts_query_output (size:192b/24B) */ +struct hwrm_port_ts_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 ptp_msg_ts; + __le16 ptp_msg_seqid; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_port_phy_qcaps_input (size:192b/24B) */ +struct hwrm_port_phy_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_phy_qcaps_output (size:320b/40B) */ +struct hwrm_port_phy_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL + #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL + #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL + #define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL + #define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL + #define PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN 0x40UL + #define PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS 0x80UL + u8 port_cnt; + #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_12 0xcUL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_12 + __le16 supported_speeds_force_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL + __le16 supported_speeds_auto_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL + __le16 supported_speeds_eee_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL + __le32 tx_lpi_timer_low; + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0 + #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL + #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24 + __le32 valid_tx_lpi_timer_high; + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0 + #define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL + #define PORT_PHY_QCAPS_RESP_RSVD_SFT 24 + __le16 supported_pam4_speeds_auto_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL + __le16 supported_pam4_speeds_force_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL + __le16 flags2; + #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL + #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL + #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL + #define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL + u8 internal_port_cnt; + u8 unused_0; + __le16 supported_speeds2_force_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_1GB 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_10GB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_25GB 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_40GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB_PAM4_56 0x40UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_56 0x80UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_56 0x100UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_56 0x200UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_112 0x400UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_112 0x800UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_112 0x1000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_800GB_PAM4_112 0x2000UL + __le16 supported_speeds2_auto_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_1GB 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_10GB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_25GB 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_40GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB_PAM4_56 0x40UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_56 0x80UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_56 0x100UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_56 0x200UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_112 0x400UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_112 0x800UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_112 0x1000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_800GB_PAM4_112 0x2000UL + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_port_phy_i2c_write_input (size:832b/104B) */ +struct hwrm_port_phy_i2c_write_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET 0x1UL + #define PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER 0x2UL + __le16 port_id; + u8 i2c_slave_addr; + u8 bank_number; + __le16 page_number; + __le16 page_offset; + u8 data_length; + u8 unused_1[7]; + __le32 data[16]; +}; + +/* hwrm_port_phy_i2c_write_output (size:128b/16B) */ +struct hwrm_port_phy_i2c_write_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_phy_i2c_read_input (size:320b/40B) */ +struct hwrm_port_phy_i2c_read_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL + #define PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER 0x2UL + __le16 port_id; + u8 i2c_slave_addr; + u8 bank_number; + __le16 page_number; + __le16 page_offset; + u8 data_length; + u8 unused_1[7]; +}; + +/* hwrm_port_phy_i2c_read_output (size:640b/80B) */ +struct hwrm_port_phy_i2c_read_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 data[16]; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_phy_mdio_write_input (size:320b/40B) */ +struct hwrm_port_phy_mdio_write_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 unused_0[2]; + __le16 port_id; + u8 phy_addr; + u8 dev_addr; + __le16 reg_addr; + __le16 reg_data; + u8 cl45_mdio; + u8 unused_1[7]; +}; + +/* hwrm_port_phy_mdio_write_output (size:128b/16B) */ +struct hwrm_port_phy_mdio_write_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_phy_mdio_read_input (size:256b/32B) */ +struct hwrm_port_phy_mdio_read_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 unused_0[2]; + __le16 port_id; + u8 phy_addr; + u8 dev_addr; + __le16 reg_addr; + u8 cl45_mdio; + u8 unused_1; +}; + +/* hwrm_port_phy_mdio_read_output (size:128b/16B) */ +struct hwrm_port_phy_mdio_read_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 reg_data; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_port_led_cfg_input (size:512b/64B) */ +struct hwrm_port_led_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL + __le16 port_id; + u8 num_leds; + u8 rsvd; + u8 led0_id; + u8 led0_state; + #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT + u8 led0_color; + #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER + u8 unused_0; + __le16 led0_blink_on; + __le16 led0_blink_off; + u8 led0_group_id; + u8 rsvd0; + u8 led1_id; + u8 led1_state; + #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT + u8 led1_color; + #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER + u8 unused_1; + __le16 led1_blink_on; + __le16 led1_blink_off; + u8 led1_group_id; + u8 rsvd1; + u8 led2_id; + u8 led2_state; + #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT + u8 led2_color; + #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER + u8 unused_2; + __le16 led2_blink_on; + __le16 led2_blink_off; + u8 led2_group_id; + u8 rsvd2; + u8 led3_id; + u8 led3_state; + #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT + u8 led3_color; + #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER + u8 unused_3; + __le16 led3_blink_on; + __le16 led3_blink_off; + u8 led3_group_id; + u8 rsvd3; +}; + +/* hwrm_port_led_cfg_output (size:128b/16B) */ +struct hwrm_port_led_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_led_qcfg_input (size:192b/24B) */ +struct hwrm_port_led_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_led_qcfg_output (size:448b/56B) */ +struct hwrm_port_led_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_leds; + u8 led0_id; + u8 led0_type; + #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID + u8 led0_state; + #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT + u8 led0_color; + #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER + u8 unused_0; + __le16 led0_blink_on; + __le16 led0_blink_off; + u8 led0_group_id; + u8 led1_id; + u8 led1_type; + #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID + u8 led1_state; + #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT + u8 led1_color; + #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER + u8 unused_1; + __le16 led1_blink_on; + __le16 led1_blink_off; + u8 led1_group_id; + u8 led2_id; + u8 led2_type; + #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID + u8 led2_state; + #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT + u8 led2_color; + #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER + u8 unused_2; + __le16 led2_blink_on; + __le16 led2_blink_off; + u8 led2_group_id; + u8 led3_id; + u8 led3_type; + #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID + u8 led3_state; + #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT + u8 led3_color; + #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER + u8 unused_3; + __le16 led3_blink_on; + __le16 led3_blink_off; + u8 led3_group_id; + u8 unused_4[6]; + u8 valid; +}; + +/* hwrm_port_led_qcaps_input (size:192b/24B) */ +struct hwrm_port_led_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_led_qcaps_output (size:384b/48B) */ +struct hwrm_port_led_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_leds; + u8 unused[3]; + u8 led0_id; + u8 led0_type; + #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID + u8 led0_group_id; + u8 unused_0; + __le16 led0_state_caps; + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led0_color_caps; + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led1_id; + u8 led1_type; + #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID + u8 led1_group_id; + u8 unused_1; + __le16 led1_state_caps; + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led1_color_caps; + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led2_id; + u8 led2_type; + #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID + u8 led2_group_id; + u8 unused_2; + __le16 led2_state_caps; + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led2_color_caps; + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led3_id; + u8 led3_type; + #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID + u8 led3_group_id; + u8 unused_3; + __le16 led3_state_caps; + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led3_color_caps; + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 unused_4[3]; + u8 valid; +}; + +/* hwrm_port_mac_qcaps_input (size:192b/24B) */ +struct hwrm_port_mac_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_mac_qcaps_output (size:128b/16B) */ +struct hwrm_port_mac_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x1UL + #define PORT_MAC_QCAPS_RESP_FLAGS_REMOTE_LPBK_SUPPORTED 0x2UL + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_queue_qportcfg_input (size:192b/24B) */ +struct hwrm_queue_qportcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX + __le16 port_id; + u8 drv_qmap_cap; + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_DISABLED 0x0UL + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED 0x1UL + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_LAST QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED + u8 unused_0; +}; + +/* hwrm_queue_qportcfg_output (size:1344b/168B) */ +struct hwrm_queue_qportcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 max_configurable_queues; + u8 max_configurable_lossless_queues; + u8 queue_cfg_allowed; + u8 queue_cfg_info; + #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_USE_PROFILE_TYPE 0x2UL + u8 queue_pfcenable_cfg_allowed; + u8 queue_pri2cos_cfg_allowed; + u8 queue_cos2bw_cfg_allowed; + u8 queue_id0; + u8 queue_id0_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN + u8 queue_id1; + u8 queue_id1_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN + u8 queue_id2; + u8 queue_id2_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN + u8 queue_id3; + u8 queue_id3_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN + u8 queue_id4; + u8 queue_id4_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN + u8 queue_id5; + u8 queue_id5_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN + u8 queue_id6; + u8 queue_id6_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN + u8 queue_id7; + u8 queue_id7_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN + u8 queue_id0_service_profile_type; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_NIC 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_CNP 0x4UL + char qid0_name[16]; + char qid1_name[16]; + char qid2_name[16]; + char qid3_name[16]; + char qid4_name[16]; + char qid5_name[16]; + char qid6_name[16]; + char qid7_name[16]; + u8 queue_id1_service_profile_type; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_NIC 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP 0x4UL + u8 queue_id2_service_profile_type; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_NIC 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_CNP 0x4UL + u8 queue_id3_service_profile_type; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_NIC 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_CNP 0x4UL + u8 queue_id4_service_profile_type; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_NIC 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_CNP 0x4UL + u8 queue_id5_service_profile_type; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_NIC 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_CNP 0x4UL + u8 queue_id6_service_profile_type; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_NIC 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_CNP 0x4UL + u8 queue_id7_service_profile_type; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_NIC 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_CNP 0x4UL + u8 valid; +}; + +/* hwrm_queue_qcfg_input (size:192b/24B) */ +struct hwrm_queue_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX + __le32 queue_id; +}; + +/* hwrm_queue_qcfg_output (size:128b/16B) */ +struct hwrm_queue_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 queue_len; + u8 service_profile; + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN + u8 queue_cfg_info; + #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL + u8 unused_0; + u8 valid; +}; + +/* hwrm_queue_cfg_input (size:320b/40B) */ +struct hwrm_queue_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL + #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0 + #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL + #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR + __le32 enables; + #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL + #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL + __le32 queue_id; + __le32 dflt_len; + u8 service_profile; + #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN + u8 unused_0[7]; +}; + +/* hwrm_queue_cfg_output (size:128b/16B) */ +struct hwrm_queue_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */ +struct hwrm_queue_pfcenable_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */ +struct hwrm_queue_pfcenable_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */ +struct hwrm_queue_pfcenable_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL + __le16 port_id; + u8 unused_0[2]; +}; + +/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */ +struct hwrm_queue_pfcenable_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */ +struct hwrm_queue_pri2cos_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL + u8 port_id; + u8 unused_0[3]; +}; + +/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */ +struct hwrm_queue_pri2cos_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 pri0_cos_queue_id; + u8 pri1_cos_queue_id; + u8 pri2_cos_queue_id; + u8 pri3_cos_queue_id; + u8 pri4_cos_queue_id; + u8 pri5_cos_queue_id; + u8 pri6_cos_queue_id; + u8 pri7_cos_queue_id; + u8 queue_cfg_info; + #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */ +struct hwrm_queue_pri2cos_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0 + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL + __le32 enables; + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL + u8 port_id; + u8 pri0_cos_queue_id; + u8 pri1_cos_queue_id; + u8 pri2_cos_queue_id; + u8 pri3_cos_queue_id; + u8 pri4_cos_queue_id; + u8 pri5_cos_queue_id; + u8 pri6_cos_queue_id; + u8 pri7_cos_queue_id; + u8 unused_0[7]; +}; + +/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */ +struct hwrm_queue_pri2cos_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */ +struct hwrm_queue_cos2bw_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */ +struct hwrm_queue_cos2bw_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 queue_id0; + u8 unused_0; + __le16 unused_1; + __le32 queue_id0_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id0_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id0_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id0_pri_lvl; + u8 queue_id0_bw_weight; + struct { + u8 queue_id; + __le32 queue_id_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id_pri_lvl; + u8 queue_id_bw_weight; + } __packed cfg[7]; + u8 unused_2[4]; + u8 valid; +}; + +/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */ +struct hwrm_queue_cos2bw_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL + __le16 port_id; + u8 queue_id0; + u8 unused_0; + __le32 queue_id0_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id0_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id0_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id0_pri_lvl; + u8 queue_id0_bw_weight; + struct { + u8 queue_id; + __le32 queue_id_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id_pri_lvl; + u8 queue_id_bw_weight; + } __packed cfg[7]; + u8 unused_1[5]; +}; + +/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */ +struct hwrm_queue_cos2bw_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */ +struct hwrm_queue_dscp_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 port_id; + u8 unused_0[7]; +}; + +/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */ +struct hwrm_queue_dscp_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_dscp_bits; + u8 unused_0; + __le16 max_entries; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */ +struct hwrm_queue_dscp2pri_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + u8 port_id; + u8 unused_0; + __le16 dest_data_buffer_size; + u8 unused_1[4]; +}; + +/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */ +struct hwrm_queue_dscp2pri_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 entry_cnt; + u8 default_pri; + u8 unused_0[4]; + u8 valid; +}; + +/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */ +struct hwrm_queue_dscp2pri_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le32 flags; + #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL + __le32 enables; + #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL + u8 port_id; + u8 default_pri; + __le16 entry_cnt; + u8 unused_0[4]; +}; + +/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */ +struct hwrm_queue_dscp2pri_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_alloc_input (size:192b/24B) */ +struct hwrm_vnic_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL + #define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL + #define VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID 0x4UL + __le16 virtio_net_fid; + __le16 vnic_id; +}; + +/* hwrm_vnic_alloc_output (size:128b/16B) */ +struct hwrm_vnic_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 vnic_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_vnic_update_input (size:256b/32B) */ +struct hwrm_vnic_update_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 vnic_id; + __le32 enables; + #define VNIC_UPDATE_REQ_ENABLES_VNIC_STATE_VALID 0x1UL + #define VNIC_UPDATE_REQ_ENABLES_MRU_VALID 0x2UL + #define VNIC_UPDATE_REQ_ENABLES_METADATA_FORMAT_TYPE_VALID 0x4UL + u8 vnic_state; + #define VNIC_UPDATE_REQ_VNIC_STATE_NORMAL 0x0UL + #define VNIC_UPDATE_REQ_VNIC_STATE_DROP 0x1UL + #define VNIC_UPDATE_REQ_VNIC_STATE_LAST VNIC_UPDATE_REQ_VNIC_STATE_DROP + u8 metadata_format_type; + #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_0 0x0UL + #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_1 0x1UL + #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_2 0x2UL + #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_3 0x3UL + #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4 0x4UL + #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_LAST VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4 + __le16 mru; + u8 unused_1[4]; +}; + +/* hwrm_vnic_update_output (size:128b/16B) */ +struct hwrm_vnic_update_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_free_input (size:192b/24B) */ +struct hwrm_vnic_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 vnic_id; + u8 unused_0[4]; +}; + +/* hwrm_vnic_free_output (size:128b/16B) */ +struct hwrm_vnic_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_cfg_input (size:384b/48B) */ +struct hwrm_vnic_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL + #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL + #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL + #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL + #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL + #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL + #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL + #define VNIC_CFG_REQ_FLAGS_PORTCOS_MAPPING_MODE 0x80UL + __le32 enables; + #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL + #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL + #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL + #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL + #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL + #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL + #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL + #define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL + #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL + #define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL + #define VNIC_CFG_REQ_ENABLES_RAW_QP_ID 0x400UL + __le16 vnic_id; + __le16 dflt_ring_grp; + __le16 rss_rule; + __le16 cos_rule; + __le16 lb_rule; + __le16 mru; + __le16 default_rx_ring_id; + __le16 default_cmpl_ring_id; + __le16 queue_id; + u8 rx_csum_v2_mode; + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX + u8 l2_cqe_mode; + #define VNIC_CFG_REQ_L2_CQE_MODE_DEFAULT 0x0UL + #define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL + #define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL + #define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED + __le32 raw_qp_id; +}; + +/* hwrm_vnic_cfg_output (size:128b/16B) */ +struct hwrm_vnic_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_qcaps_input (size:192b/24B) */ +struct hwrm_vnic_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + u8 unused_0[4]; +}; + +/* hwrm_vnic_qcaps_output (size:192b/24B) */ +struct hwrm_vnic_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mru; + u8 unused_0[2]; + __le32 flags; + #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL + #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL + #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL + #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL + #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL + #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL + #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL + #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL + #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL + #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP 0x8000UL + #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_XOR_CAP 0x10000UL + #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP 0x20000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL + #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V3_CAP 0x80000UL + #define VNIC_QCAPS_RESP_FLAGS_L2_CQE_MODE_CAP 0x100000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP 0x200000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP 0x400000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL + #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP 0x2000000UL + #define VNIC_QCAPS_RESP_FLAGS_PORTCOS_MAPPING_MODE 0x4000000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL + #define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL + #define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL + #define VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP 0x40000000UL + __le16 max_aggs_supported; + u8 unused_1[5]; + u8 valid; +}; + +/* hwrm_vnic_tpa_cfg_input (size:384b/48B) */ +struct hwrm_vnic_tpa_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL + #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL + #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL + #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL + #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL + #define VNIC_TPA_CFG_REQ_FLAGS_AGG_PACK_AS_GRO 0x100UL + __le32 enables; + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL + #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL + #define VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN 0x10UL + __le16 vnic_id; + __le16 max_agg_segs; + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX + __le16 max_aggs; + #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX + u8 unused_0[2]; + __le32 max_agg_timer; + __le32 min_agg_len; + __le32 tnl_tpa_en_bitmap; + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN 0x1UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE 0x2UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_NVGRE 0x4UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE 0x8UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 0x10UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6 0x20UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR1 0x200UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR2 0x400UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR3 0x800UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL + u8 unused_1[4]; +}; + +/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */ +struct hwrm_vnic_tpa_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_tpa_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vnic_id; + u8 unused_0[6]; +}; + +/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */ +struct hwrm_vnic_tpa_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL + #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL + #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL + #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL + #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL + #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL + #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL + #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL + __le16 max_agg_segs; + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX + __le16 max_aggs; + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX + __le32 max_agg_timer; + __le32 min_agg_len; + __le32 tnl_tpa_en_bitmap; + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN 0x1UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GENEVE 0x2UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_NVGRE 0x4UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE 0x8UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV4 0x10UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV6 0x20UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR1 0x200UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR2 0x400UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR3 0x800UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_vnic_rss_cfg_input (size:384b/48B) */ +struct hwrm_vnic_rss_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 hash_type; + #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 0x80UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4 0x100UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 0x200UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6 0x400UL + __le16 vnic_id; + u8 ring_table_pair_index; + u8 hash_mode_flags; + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL + __le64 ring_grp_tbl_addr; + __le64 hash_key_tbl_addr; + __le16 rss_ctx_idx; + u8 flags; + #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL + #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL + #define VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT 0x4UL + u8 ring_select_mode; + #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL + #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL + #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL + #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_LAST VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM + u8 unused_1[4]; +}; + +/* hwrm_vnic_rss_cfg_output (size:128b/16B) */ +struct hwrm_vnic_rss_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */ +struct hwrm_vnic_rss_cfg_cmd_err { + u8 code; + #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL + #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY + u8 unused_0[7]; +}; + +/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_rss_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 rss_ctx_idx; + __le16 vnic_id; + u8 unused_0[4]; +}; + +/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */ +struct hwrm_vnic_rss_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 hash_type; + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV4 0x1UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV4 0x2UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV4 0x4UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6 0x8UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV6 0x10UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV6 0x20UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV4 0x80UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV4 0x100UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV6 0x200UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV6 0x400UL + u8 unused_0[4]; + __le32 hash_key[10]; + u8 hash_mode_flags; + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_DEFAULT 0x1UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_4 0x2UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_2 0x4UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL + u8 ring_select_mode; + #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ 0x0UL + #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_XOR 0x1UL + #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL + #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_LAST VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM + u8 unused_1[5]; + u8 valid; +}; + +/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */ +struct hwrm_vnic_plcmodes_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL + __le32 enables; + #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL + #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL + #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL + #define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL + __le32 vnic_id; + __le16 jumbo_thresh; + __le16 hds_offset; + __le16 hds_threshold; + __le16 max_bds; + u8 unused_0[4]; +}; + +/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */ +struct hwrm_vnic_plcmodes_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_plcmodes_cfg_cmd_err (size:64b/8B) */ +struct hwrm_vnic_plcmodes_cfg_cmd_err { + u8 code; + #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD 0x1UL + #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_LAST VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD + u8 unused_0[7]; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rss_cos_lb_ctx_id; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */ +struct hwrm_vnic_rss_cos_lb_ctx_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 rss_cos_lb_ctx_id; + u8 unused_0[6]; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_alloc_input (size:704b/88B) */ +struct hwrm_ring_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL + #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL + #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL + #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL + #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL + #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL + #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL + #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL + #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL + #define RING_ALLOC_REQ_ENABLES_RX_RATE_PROFILE_VALID 0x1000UL + u8 ring_type; + #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL + #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL + #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL + #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL + #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL + #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL + #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ + u8 cmpl_coal_cnt; + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_OFF 0x0UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_4 0x1UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_8 0x2UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_12 0x3UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_16 0x4UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_24 0x5UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_32 0x6UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_48 0x7UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64 0x8UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_96 0x9UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_128 0xaUL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_192 0xbUL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_256 0xcUL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_320 0xdUL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_384 0xeUL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX + __le16 flags; + #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL + #define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL + #define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL + #define RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE 0x8UL + __le64 page_tbl_addr; + __le32 fbo; + u8 page_size; + u8 page_tbl_depth; + __le16 schq_id; + __le32 length; + __le16 logical_id; + __le16 cmpl_ring_id; + __le16 queue_id; + __le16 rx_buf_size; + __le16 rx_ring_id; + __le16 nq_ring_id; + __le16 ring_arb_cfg; + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0 + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ + #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL + #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4 + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8 + __le16 steering_tag; + __le32 reserved3; + __le32 stat_ctx_id; + __le32 reserved4; + __le32 max_bw; + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0 + #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL + #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28) + #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID + u8 int_mode; + #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL + #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL + #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL + #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL + #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL + u8 mpc_chnls_type; + #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TCE 0x0UL + #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RCE 0x1UL + #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA 0x2UL + #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL + #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL + #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE + u8 rx_rate_profile_sel; + #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_DEFAULT 0x0UL + #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE 0x1UL + #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_LAST RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE + u8 unused_4; + __le64 cq_handle; +}; + +/* hwrm_ring_alloc_output (size:128b/16B) */ +struct hwrm_ring_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 ring_id; + __le16 logical_ring_id; + u8 push_buffer_index; + #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL + #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL + #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_LAST RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER + u8 unused_0[2]; + u8 valid; +}; + +/* hwrm_ring_free_input (size:256b/32B) */ +struct hwrm_ring_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL + #define RING_FREE_REQ_RING_TYPE_TX 0x1UL + #define RING_FREE_REQ_RING_TYPE_RX 0x2UL + #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL + #define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL + #define RING_FREE_REQ_RING_TYPE_NQ 0x5UL + #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ + u8 flags; + #define RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID 0x1UL + #define RING_FREE_REQ_FLAGS_LAST RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID + __le16 ring_id; + __le32 prod_idx; + __le32 opaque; + __le32 unused_1; +}; + +/* hwrm_ring_free_output (size:128b/16B) */ +struct hwrm_ring_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_reset_input (size:192b/24B) */ +struct hwrm_ring_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL + #define RING_RESET_REQ_RING_TYPE_TX 0x1UL + #define RING_RESET_REQ_RING_TYPE_RX 0x2UL + #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL + #define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL + #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP + u8 unused_0; + __le16 ring_id; + u8 unused_1[4]; +}; + +/* hwrm_ring_reset_output (size:128b/16B) */ +struct hwrm_ring_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 push_buffer_index; + #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL + #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL + #define RING_RESET_RESP_PUSH_BUFFER_INDEX_LAST RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER + u8 unused_0[3]; + u8 consumer_idx[3]; + u8 valid; +}; + +/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */ +struct hwrm_ring_aggint_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */ +struct hwrm_ring_aggint_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 cmpl_params; + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN 0x1UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX 0x2UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET 0x4UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE 0x8UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR 0x10UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT 0x20UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL + __le32 nq_params; + #define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL + __le16 num_cmpl_dma_aggr_min; + __le16 num_cmpl_dma_aggr_max; + __le16 num_cmpl_dma_aggr_during_int_min; + __le16 num_cmpl_dma_aggr_during_int_max; + __le16 cmpl_aggr_dma_tmr_min; + __le16 cmpl_aggr_dma_tmr_max; + __le16 cmpl_aggr_dma_tmr_during_int_min; + __le16 cmpl_aggr_dma_tmr_during_int_max; + __le16 int_lat_tmr_min_min; + __le16 int_lat_tmr_min_max; + __le16 int_lat_tmr_max_min; + __le16 int_lat_tmr_max_max; + __le16 num_cmpl_aggr_int_min; + __le16 num_cmpl_aggr_int_max; + __le16 timer_units; + u8 unused_0[1]; + u8 valid; +}; + +/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */ +struct hwrm_ring_cmpl_ring_qaggint_params_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 ring_id; + __le16 flags; + #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_MASK 0x3UL + #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_SFT 0 + #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL + u8 unused_0[4]; +}; + +/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */ +struct hwrm_ring_cmpl_ring_qaggint_params_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flags; + #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL + #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL + __le16 num_cmpl_dma_aggr; + __le16 num_cmpl_dma_aggr_during_int; + __le16 cmpl_aggr_dma_tmr; + __le16 cmpl_aggr_dma_tmr_during_int; + __le16 int_lat_tmr_min; + __le16 int_lat_tmr_max; + __le16 num_cmpl_aggr_int; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */ +struct hwrm_ring_cmpl_ring_cfg_aggint_params_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 ring_id; + __le16 flags; + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL + __le16 num_cmpl_dma_aggr; + __le16 num_cmpl_dma_aggr_during_int; + __le16 cmpl_aggr_dma_tmr; + __le16 cmpl_aggr_dma_tmr_during_int; + __le16 int_lat_tmr_min; + __le16 int_lat_tmr_max; + __le16 num_cmpl_aggr_int; + __le16 enables; + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR 0x1UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT 0x2UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR 0x4UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN 0x8UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX 0x10UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT 0x20UL + u8 unused_0[4]; +}; + +/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */ +struct hwrm_ring_cmpl_ring_cfg_aggint_params_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_grp_alloc_input (size:192b/24B) */ +struct hwrm_ring_grp_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 cr; + __le16 rr; + __le16 ar; + __le16 sc; +}; + +/* hwrm_ring_grp_alloc_output (size:128b/16B) */ +struct hwrm_ring_grp_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 ring_group_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_ring_grp_free_input (size:192b/24B) */ +struct hwrm_ring_grp_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 ring_group_id; + u8 unused_0[4]; +}; + +/* hwrm_ring_grp_free_output (size:128b/16B) */ +struct hwrm_ring_grp_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +#define DEFAULT_FLOW_ID 0xFFFFFFFFUL +#define ROCEV1_FLOW_ID 0xFFFFFFFEUL +#define ROCEV2_FLOW_ID 0xFFFFFFFDUL +#define ROCEV2_CNP_FLOW_ID 0xFFFFFFFCUL + +/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */ +struct hwrm_cfa_l2_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_MASK 0x30UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_SFT 4 + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 4) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_XDP_DISABLE 0x40UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_SOURCE_VALID 0x80UL + __le32 enables; + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS 0x20000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_NUM_VLANS 0x40000UL + u8 l2_addr[6]; + u8 num_vlans; + u8 t_num_vlans; + u8 l2_addr_mask[6]; + __le16 l2_ovlan; + __le16 l2_ovlan_mask; + __le16 l2_ivlan; + __le16 l2_ivlan_mask; + u8 unused_1[2]; + u8 t_l2_addr[6]; + u8 unused_2[2]; + u8 t_l2_addr_mask[6]; + __le16 t_l2_ovlan; + __le16 t_l2_ovlan_mask; + __le16 t_l2_ivlan; + __le16 t_l2_ivlan_mask; + u8 src_type; + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG + u8 unused_3; + __le32 src_id; + u8 tunnel_type; + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_4; + __le16 dst_id; + __le16 mirror_vnic_id; + u8 pri_hint; + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN + u8 unused_5; + __le32 unused_6; + __le64 l2_filter_id_hint; +}; + +/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_l2_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 l2_filter_id; + __le32 flow_id; + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0 + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30) + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30) + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31) + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31) + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_l2_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 l2_filter_id; +}; + +/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_l2_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_l2_filter_cfg_input (size:384b/48B) */ +struct hwrm_cfa_l2_filter_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX + #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP (0x3UL << 4) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP + __le32 enables; + #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL + #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL + #define CFA_L2_FILTER_CFG_REQ_ENABLES_PROF_FUNC 0x4UL + #define CFA_L2_FILTER_CFG_REQ_ENABLES_L2_CONTEXT_ID 0x8UL + __le64 l2_filter_id; + __le32 dst_id; + __le32 new_mirror_vnic_id; + __le32 prof_func; + __le32 l2_context_id; +}; + +/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */ +struct hwrm_cfa_l2_filter_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */ +struct hwrm_cfa_l2_set_rx_mask_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 vnic_id; + __le32 mask; + #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL + __le64 mc_tbl_addr; + __le32 num_mc_entries; + u8 unused_0[4]; + __le64 vlan_tag_tbl_addr; + __le32 num_vlan_tags; + u8 unused_1[4]; +}; + +/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */ +struct hwrm_cfa_l2_set_rx_mask_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */ +struct hwrm_cfa_l2_set_rx_mask_cmd_err { + u8 code; + #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL + #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL + #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR + u8 unused_0[7]; +}; + +/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */ +struct hwrm_cfa_tunnel_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + __le32 enables; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL + __le64 l2_filter_id; + u8 l2_addr[6]; + __le16 l2_ivlan; + __le32 l3_addr[4]; + __le32 t_l3_addr[4]; + u8 l3_addr_type; + u8 t_l3_addr_type; + u8 tunnel_type; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 tunnel_flags; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL + __le32 vni; + __le32 dst_vnic_id; + __le32 mirror_vnic_id; +}; + +/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_tunnel_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tunnel_filter_id; + __le32 flow_id; + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0 + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30) + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30) + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31) + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31) + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_tunnel_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 tunnel_filter_id; +}; + +/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_tunnel_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */ +struct hwrm_vxlan_ipv4_hdr { + u8 ver_hlen; + #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL + #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0 + #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL + #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4 + u8 tos; + __be16 ip_id; + __be16 flags_frag_offset; + u8 ttl; + u8 protocol; + __be32 src_ip_addr; + __be32 dest_ip_addr; +}; + +/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */ +struct hwrm_vxlan_ipv6_hdr { + __be32 ver_tc_flow_label; + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK + __be16 payload_len; + u8 next_hdr; + u8 ttl; + __be32 src_ip_addr[4]; + __be32 dest_ip_addr[4]; +}; + +/* hwrm_cfa_encap_data_vxlan (size:640b/80B) */ +struct hwrm_cfa_encap_data_vxlan { + u8 src_mac_addr[6]; + __le16 unused_0; + u8 dst_mac_addr[6]; + u8 num_vlan_tags; + u8 unused_1; + __be16 ovlan_tpid; + __be16 ovlan_tci; + __be16 ivlan_tpid; + __be16 ivlan_tci; + __le32 l3[10]; + #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL + #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL + #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL + #define CFA_ENCAP_DATA_VXLAN_L3_LAST CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 + __be16 src_port; + __be16 dst_port; + __be32 vni; + u8 hdr_rsvd0[3]; + u8 hdr_rsvd1; + u8 hdr_flags; + u8 unused[3]; +}; + +/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */ +struct hwrm_cfa_encap_record_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_EXTERNAL 0x2UL + u8 encap_type; + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE 0x10UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE + u8 unused_0[3]; + __le32 encap_data[20]; +}; + +/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */ +struct hwrm_cfa_encap_record_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 encap_record_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_encap_record_free_input (size:192b/24B) */ +struct hwrm_cfa_encap_record_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_record_id; + u8 unused_0[4]; +}; + +/* hwrm_cfa_encap_record_free_output (size:128b/16B) */ +struct hwrm_cfa_encap_record_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */ +struct hwrm_cfa_ntuple_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_NO_L2_CONTEXT 0x40UL + __le32 enables; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX 0x80000UL + __le64 l2_filter_id; + u8 src_macaddr[6]; + __be16 ethertype; + u8 ip_addr_type; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 + u8 ip_protocol; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMP 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMPV6 0x3aUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD 0xffUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD + __le16 dst_id; + __le16 rfs_ring_tbl_idx; + u8 tunnel_type; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 pri_hint; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST + __be32 src_ipaddr[4]; + __be32 src_ipaddr_mask[4]; + __be32 dst_ipaddr[4]; + __be32 dst_ipaddr_mask[4]; + __be16 src_port; + __be16 src_port_mask; + __be16 dst_port; + __be16 dst_port_mask; + __le64 ntuple_filter_id_hint; +}; + +/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_ntuple_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 ntuple_filter_id; + __le32 flow_id; + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0 + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30) + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30) + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31) + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31) + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */ +struct hwrm_cfa_ntuple_filter_alloc_cmd_err { + u8 code; + #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR + u8 unused_0[7]; +}; + +/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_ntuple_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 ntuple_filter_id; +}; + +/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_ntuple_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */ +struct hwrm_cfa_ntuple_filter_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL + __le32 flags; + #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL + #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL + #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_NO_L2_CONTEXT 0x4UL + __le64 ntuple_filter_id; + __le32 new_dst_id; + __le32 new_mirror_vnic_id; + __le16 new_meter_instance_id; + #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID + u8 unused_1[6]; +}; + +/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */ +struct hwrm_cfa_ntuple_filter_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */ +struct hwrm_cfa_decap_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL + __le32 enables; + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL + __be32 tunnel_id; + u8 tunnel_type; + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_0; + __le16 unused_1; + u8 src_macaddr[6]; + u8 unused_2[2]; + u8 dst_macaddr[6]; + __be16 ovlan_vid; + __be16 ivlan_vid; + __be16 t_ovlan_vid; + __be16 t_ivlan_vid; + __be16 ethertype; + u8 ip_addr_type; + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 + u8 ip_protocol; + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP + __le16 unused_3; + __le32 unused_4; + __be32 src_ipaddr[4]; + __be32 dst_ipaddr[4]; + __be16 src_port; + __be16 dst_port; + __le16 dst_id; + __le16 l2_ctxt_ref_id; +}; + +/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */ +struct hwrm_cfa_decap_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 decap_filter_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_decap_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 decap_filter_id; + u8 unused_0[4]; +}; + +/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_decap_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */ +struct hwrm_cfa_flow_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1 + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3 + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 + #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_VHOST_ID_USE_VLAN 0x200UL + __le16 src_fid; + __le32 tunnel_handle; + __le16 action_flags; + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NO_FLOW_COUNTER_ALLOC 0x2000UL + __le16 dst_fid; + __be16 l2_rewrite_vlan_tpid; + __be16 l2_rewrite_vlan_tci; + __le16 act_meter_id; + __le16 ref_flow_handle; + __be16 ethertype; + __be16 outer_vlan_tci; + __be16 dmac[3]; + __be16 inner_vlan_tci; + __be16 smac[3]; + u8 ip_dst_mask_len; + u8 ip_src_mask_len; + __be32 ip_dst[4]; + __be32 ip_src[4]; + __be16 l4_src_port; + __be16 l4_src_port_mask; + __be16 l4_dst_port; + __be16 l4_dst_port_mask; + __be32 nat_ip_address[4]; + __be16 l2_rewrite_dmac[3]; + __be16 nat_port; + __be16 l2_rewrite_smac[3]; + u8 ip_proto; + u8 tunnel_type; + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL +}; + +/* hwrm_cfa_flow_alloc_output (size:256b/32B) */ +struct hwrm_cfa_flow_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flow_handle; + u8 unused_0[2]; + __le32 flow_id; + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_SFT 0 + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30) + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30) + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31) + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31) + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX + __le64 ext_flow_handle; + __le32 flow_counter_id; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */ +struct hwrm_cfa_flow_alloc_cmd_err { + u8 code; + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_L2_CONTEXT_TCAM 0x1UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_ACTION_RECORD 0x2UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_COUNTER 0x3UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_WILD_CARD_TCAM 0x4UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_HASH_COLLISION 0x5UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_KEY_EXISTS 0x6UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB 0x7UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB + u8 unused_0[7]; +}; + +/* hwrm_cfa_flow_free_input (size:256b/32B) */ +struct hwrm_cfa_flow_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flow_handle; + __le16 unused_0; + __le32 flow_counter_id; + __le64 ext_flow_handle; +}; + +/* hwrm_cfa_flow_free_output (size:256b/32B) */ +struct hwrm_cfa_flow_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 packet; + __le64 byte; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_info_input (size:256b/32B) */ +struct hwrm_cfa_flow_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flow_handle; + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_TX 0x3000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT_RX 0x9000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT_RX 0xa000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_RX 0xb000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX 0xc000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_LAST CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX + u8 unused_0[6]; + __le64 ext_flow_handle; +}; + +/* hwrm_cfa_flow_info_output (size:5632b/704B) */ +struct hwrm_cfa_flow_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define CFA_FLOW_INFO_RESP_FLAGS_PATH_TX 0x1UL + #define CFA_FLOW_INFO_RESP_FLAGS_PATH_RX 0x2UL + u8 profile; + __le16 src_fid; + __le16 dst_fid; + __le16 l2_ctxt_id; + __le64 em_info; + __le64 tcam_info; + __le64 vfp_tcam_info; + __le16 ar_id; + __le16 flow_handle; + __le32 tunnel_handle; + __le16 flow_timer; + u8 unused_0[6]; + __le32 flow_key_data[130]; + __le32 flow_action_info[30]; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_stats_input (size:640b/80B) */ +struct hwrm_cfa_flow_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 num_flows; + __le16 flow_handle_0; + __le16 flow_handle_1; + __le16 flow_handle_2; + __le16 flow_handle_3; + __le16 flow_handle_4; + __le16 flow_handle_5; + __le16 flow_handle_6; + __le16 flow_handle_7; + __le16 flow_handle_8; + __le16 flow_handle_9; + u8 unused_0[2]; + __le32 flow_id_0; + __le32 flow_id_1; + __le32 flow_id_2; + __le32 flow_id_3; + __le32 flow_id_4; + __le32 flow_id_5; + __le32 flow_id_6; + __le32 flow_id_7; + __le32 flow_id_8; + __le32 flow_id_9; +}; + +/* hwrm_cfa_flow_stats_output (size:1408b/176B) */ +struct hwrm_cfa_flow_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 packet_0; + __le64 packet_1; + __le64 packet_2; + __le64 packet_3; + __le64 packet_4; + __le64 packet_5; + __le64 packet_6; + __le64 packet_7; + __le64 packet_8; + __le64 packet_9; + __le64 byte_0; + __le64 byte_1; + __le64 byte_2; + __le64 byte_3; + __le64 byte_4; + __le64 byte_5; + __le64 byte_6; + __le64 byte_7; + __le64 byte_8; + __le64 byte_9; + __le16 flow_hits; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */ +struct hwrm_cfa_vfr_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + __le16 reserved; + u8 unused_0[4]; + char vfr_name[32]; +}; + +/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */ +struct hwrm_cfa_vfr_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rx_cfa_code; + __le16 tx_cfa_action; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_vfr_free_input (size:448b/56B) */ +struct hwrm_cfa_vfr_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + char vfr_name[32]; + __le16 vf_id; + __le16 reserved; + u8 unused_0[4]; +}; + +/* hwrm_cfa_vfr_free_output (size:128b/16B) */ +struct hwrm_cfa_vfr_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_eem_qcaps_input (size:192b/24B) */ +struct hwrm_cfa_eem_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_TX 0x1UL + #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_RX 0x2UL + #define CFA_EEM_QCAPS_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL + __le32 unused_0; +}; + +/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */ +struct hwrm_cfa_eem_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL + #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL + #define CFA_EEM_QCAPS_RESP_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x4UL + #define CFA_EEM_QCAPS_RESP_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x8UL + __le32 unused_0; + __le32 supported; + #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL + #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL + #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL + #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL + #define CFA_EEM_QCAPS_RESP_SUPPORTED_FID_TABLE 0x10UL + __le32 max_entries_supported; + __le16 key_entry_size; + __le16 record_entry_size; + __le16 efc_entry_size; + __le16 fid_entry_size; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_cfa_eem_cfg_input (size:384b/48B) */ +struct hwrm_cfa_eem_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_EEM_CFG_REQ_FLAGS_PATH_TX 0x1UL + #define CFA_EEM_CFG_REQ_FLAGS_PATH_RX 0x2UL + #define CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL + #define CFA_EEM_CFG_REQ_FLAGS_SECONDARY_PF 0x8UL + __le16 group_id; + __le16 unused_0; + __le32 num_entries; + __le32 unused_1; + __le16 key0_ctx_id; + __le16 key1_ctx_id; + __le16 record_ctx_id; + __le16 efc_ctx_id; + __le16 fid_ctx_id; + __le16 unused_2; + __le32 unused_3; +}; + +/* hwrm_cfa_eem_cfg_output (size:128b/16B) */ +struct hwrm_cfa_eem_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_eem_qcfg_input (size:192b/24B) */ +struct hwrm_cfa_eem_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_EEM_QCFG_REQ_FLAGS_PATH_TX 0x1UL + #define CFA_EEM_QCFG_REQ_FLAGS_PATH_RX 0x2UL + __le32 unused_0; +}; + +/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */ +struct hwrm_cfa_eem_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define CFA_EEM_QCFG_RESP_FLAGS_PATH_TX 0x1UL + #define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL + #define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL + __le32 num_entries; + __le16 key0_ctx_id; + __le16 key1_ctx_id; + __le16 record_ctx_id; + __le16 efc_ctx_id; + __le16 fid_ctx_id; + u8 unused_2[5]; + u8 valid; +}; + +/* hwrm_cfa_eem_op_input (size:192b/24B) */ +struct hwrm_cfa_eem_op_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_EEM_OP_REQ_FLAGS_PATH_TX 0x1UL + #define CFA_EEM_OP_REQ_FLAGS_PATH_RX 0x2UL + __le16 unused_0; + __le16 op; + #define CFA_EEM_OP_REQ_OP_RESERVED 0x0UL + #define CFA_EEM_OP_REQ_OP_EEM_DISABLE 0x1UL + #define CFA_EEM_OP_REQ_OP_EEM_ENABLE 0x2UL + #define CFA_EEM_OP_REQ_OP_EEM_CLEANUP 0x3UL + #define CFA_EEM_OP_REQ_OP_LAST CFA_EEM_OP_REQ_OP_EEM_CLEANUP +}; + +/* hwrm_cfa_eem_op_output (size:128b/16B) */ +struct hwrm_cfa_eem_op_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_adv_flow_mgnt_qcaps_input (size:256b/32B) */ +struct hwrm_cfa_adv_flow_mgnt_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 unused_0[4]; +}; + +/* hwrm_cfa_adv_flow_mgnt_qcaps_output (size:128b/16B) */ +struct hwrm_cfa_adv_flow_mgnt_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NIC_FLOW_STATS_SUPPORTED 0x80000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED 0x100000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED 0x200000UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 + u8 tunnel_next_proto; + u8 unused_0[6]; +}; + +/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tunnel_dst_port_id; + __be16 tunnel_dst_port_val; + u8 upar_in_use; + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR0 0x1UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR1 0x2UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR2 0x4UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR3 0x8UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR4 0x10UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR5 0x20UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR6 0x40UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR7 0x80UL + u8 status; + #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_CHIP_LEVEL 0x1UL + #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_FUNC_LEVEL 0x2UL + u8 unused_0; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 + u8 tunnel_next_proto; + __be16 tunnel_dst_port_val; + u8 unused_0[4]; +}; + +/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tunnel_dst_port_id; + u8 error_info; + #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_SUCCESS 0x0UL + #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ALLOCATED 0x1UL + #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE 0x2UL + #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED 0x3UL + #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED + u8 upar_in_use; + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR0 0x1UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR1 0x2UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR2 0x4UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR3 0x8UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR4 0x10UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR5 0x20UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR6 0x40UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR7 0x80UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 + u8 tunnel_next_proto; + __le16 tunnel_dst_port_id; + u8 unused_0[4]; +}; + +/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 error_info; + #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_SUCCESS 0x0UL + #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_OWNER 0x1UL + #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED 0x2UL + #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED + u8 unused_1[6]; + u8 valid; +}; + +/* ctx_hw_stats (size:1280b/160B) */ +struct ctx_hw_stats { + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_error_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_error_pkts; + __le64 tx_discard_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 tpa_pkts; + __le64 tpa_bytes; + __le64 tpa_events; + __le64 tpa_aborts; +}; + +/* ctx_hw_stats_ext (size:1408b/176B) */ +struct ctx_hw_stats_ext { + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_error_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_error_pkts; + __le64 tx_discard_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_tpa_eligible_pkt; + __le64 rx_tpa_eligible_bytes; + __le64 rx_tpa_pkt; + __le64 rx_tpa_bytes; + __le64 rx_tpa_errors; + __le64 rx_tpa_events; +}; + +/* hwrm_stat_ctx_alloc_input (size:384b/48B) */ +struct hwrm_stat_ctx_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 stats_dma_addr; + __le32 update_period_ms; + u8 stat_ctx_flags; + #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL + #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_DUP_HOST_BUF 0x2UL + u8 unused_0; + __le16 stats_dma_length; + __le16 flags; + #define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL + __le16 steering_tag; + __le32 stat_ctx_id; + __le16 alloc_seq_id; + u8 unused_1[6]; +}; + +/* hwrm_stat_ctx_alloc_output (size:128b/16B) */ +struct hwrm_stat_ctx_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 stat_ctx_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_stat_ctx_free_input (size:192b/24B) */ +struct hwrm_stat_ctx_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 unused_0[4]; +}; + +/* hwrm_stat_ctx_free_output (size:128b/16B) */ +struct hwrm_stat_ctx_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 stat_ctx_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_stat_ctx_query_input (size:192b/24B) */ +struct hwrm_stat_ctx_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 flags; + #define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL + u8 unused_0[3]; +}; + +/* hwrm_stat_ctx_query_output (size:1408b/176B) */ +struct hwrm_stat_ctx_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_discard_pkts; + __le64 tx_error_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_error_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 rx_agg_pkts; + __le64 rx_agg_bytes; + __le64 rx_agg_events; + __le64 rx_agg_aborts; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */ +struct hwrm_stat_ext_ctx_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 flags; + #define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL + u8 unused_0[3]; +}; + +/* hwrm_stat_ext_ctx_query_output (size:1536b/192B) */ +struct hwrm_stat_ext_ctx_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_error_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_error_pkts; + __le64 tx_discard_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_tpa_eligible_pkt; + __le64 rx_tpa_eligible_bytes; + __le64 rx_tpa_pkt; + __le64 rx_tpa_bytes; + __le64 rx_tpa_errors; + __le64 rx_tpa_events; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */ +struct hwrm_stat_ctx_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 unused_0[4]; +}; + +/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */ +struct hwrm_stat_ctx_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_pcie_qstats_input (size:256b/32B) */ +struct hwrm_pcie_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 pcie_stat_size; + u8 unused_0[6]; + __le64 pcie_stat_host_addr; +}; + +/* hwrm_pcie_qstats_output (size:128b/16B) */ +struct hwrm_pcie_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 pcie_stat_size; + u8 unused_0[5]; + u8 valid; +}; + +/* pcie_ctx_hw_stats (size:768b/96B) */ +struct pcie_ctx_hw_stats { + __le64 pcie_pl_signal_integrity; + __le64 pcie_dl_signal_integrity; + __le64 pcie_tl_signal_integrity; + __le64 pcie_link_integrity; + __le64 pcie_tx_traffic_rate; + __le64 pcie_rx_traffic_rate; + __le64 pcie_tx_dllp_statistics; + __le64 pcie_rx_dllp_statistics; + __le64 pcie_equalization_time; + __le32 pcie_ltssm_histogram[4]; + __le64 pcie_recovery_histogram; +}; + +/* pcie_ctx_hw_stats_v2 (size:4096b/512B) */ +struct pcie_ctx_hw_stats_v2 { + __le64 pcie_pl_signal_integrity; + __le64 pcie_dl_signal_integrity; + __le64 pcie_tl_signal_integrity; + __le64 pcie_link_integrity; + __le64 pcie_tx_traffic_rate; + __le64 pcie_rx_traffic_rate; + __le64 pcie_tx_dllp_statistics; + __le64 pcie_rx_dllp_statistics; + __le64 pcie_equalization_time; + __le32 pcie_ltssm_histogram[4]; + __le64 pcie_recovery_histogram; + __le32 pcie_tl_credit_nph_histogram[8]; + __le32 pcie_tl_credit_ph_histogram[8]; + __le32 pcie_tl_credit_pd_histogram[8]; + __le32 pcie_cmpl_latest_times[4]; + __le32 pcie_cmpl_longest_time; + __le32 pcie_cmpl_shortest_time; + __le32 unused_0[2]; + __le32 pcie_cmpl_latest_headers[4][4]; + __le32 pcie_cmpl_longest_headers[4][4]; + __le32 pcie_cmpl_shortest_headers[4][4]; + __le32 pcie_wr_latency_histogram[12]; + __le32 pcie_wr_latency_all_normal_count; + __le32 unused_1; + __le64 pcie_posted_packet_count; + __le64 pcie_non_posted_packet_count; + __le64 pcie_other_packet_count; + __le64 pcie_blocked_packet_count; + __le64 pcie_cmpl_packet_count; +}; + +/* hwrm_stat_generic_qstats_input (size:256b/32B) */ +struct hwrm_stat_generic_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 generic_stat_size; + u8 flags; + #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL + u8 unused_0[5]; + __le64 generic_stat_host_addr; +}; + +/* hwrm_stat_generic_qstats_output (size:128b/16B) */ +struct hwrm_stat_generic_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 generic_stat_size; + u8 unused_0[5]; + u8 valid; +}; + +/* generic_sw_hw_stats (size:1472b/184B) */ +struct generic_sw_hw_stats { + __le64 pcie_statistics_tx_tlp; + __le64 pcie_statistics_rx_tlp; + __le64 pcie_credit_fc_hdr_posted; + __le64 pcie_credit_fc_hdr_nonposted; + __le64 pcie_credit_fc_hdr_cmpl; + __le64 pcie_credit_fc_data_posted; + __le64 pcie_credit_fc_data_nonposted; + __le64 pcie_credit_fc_data_cmpl; + __le64 pcie_credit_fc_tgt_nonposted; + __le64 pcie_credit_fc_tgt_data_posted; + __le64 pcie_credit_fc_tgt_hdr_posted; + __le64 pcie_credit_fc_cmpl_hdr_posted; + __le64 pcie_credit_fc_cmpl_data_posted; + __le64 pcie_cmpl_longest; + __le64 pcie_cmpl_shortest; + __le64 cache_miss_count_cfcq; + __le64 cache_miss_count_cfcs; + __le64 cache_miss_count_cfcc; + __le64 cache_miss_count_cfcm; + __le64 hw_db_recov_dbs_dropped; + __le64 hw_db_recov_drops_serviced; + __le64 hw_db_recov_dbs_recovered; + __le64 hw_db_recov_oo_drop_count; +}; + +/* hwrm_fw_reset_input (size:192b/24B) */ +struct hwrm_fw_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 embedded_proc_type; + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION 0x8UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION + u8 selfrst_status; + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL + #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE + u8 host_idx; + u8 flags; + #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL + #define FW_RESET_REQ_FLAGS_FW_ACTIVATION 0x2UL + u8 unused_0[4]; +}; + +/* hwrm_fw_reset_output (size:128b/16B) */ +struct hwrm_fw_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 selfrst_status; + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL + #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_fw_qstatus_input (size:192b/24B) */ +struct hwrm_fw_qstatus_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 embedded_proc_type; + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP + u8 unused_0[7]; +}; + +/* hwrm_fw_qstatus_output (size:128b/16B) */ +struct hwrm_fw_qstatus_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 selfrst_status; + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL + #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER + u8 nvm_option_action_status; + #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_NONE 0x0UL + #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_HOTRESET 0x1UL + #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_WARMBOOT 0x2UL + #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT 0x3UL + #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_LAST FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_fw_set_time_input (size:256b/32B) */ +struct hwrm_fw_set_time_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 year; + #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL + #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN + u8 month; + u8 day; + u8 hour; + u8 minute; + u8 second; + u8 unused_0; + __le16 millisecond; + __le16 zone; + #define FW_SET_TIME_REQ_ZONE_UTC 0 + #define FW_SET_TIME_REQ_ZONE_UNKNOWN 65535 + #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN + u8 unused_1[4]; +}; + +/* hwrm_fw_set_time_output (size:128b/16B) */ +struct hwrm_fw_set_time_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_struct_hdr (size:128b/16B) */ +struct hwrm_struct_hdr { + __le16 struct_id; + #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL + #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL + #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL + #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL + #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL + #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL + #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL + #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL + #define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL + #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL + #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL + #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL + #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL + #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_COUNT 0x12cUL + #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND 0x12dUL + #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND + __le16 len; + u8 version; + #define STRUCT_HDR_VERSION_0 0x0UL + #define STRUCT_HDR_VERSION_1 0x1UL + #define STRUCT_HDR_VERSION_LAST STRUCT_HDR_VERSION_1 + u8 count; + __le16 subtype; + __le16 next_offset; + #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL + u8 unused_0[6]; +}; + +/* hwrm_struct_data_dcbx_app (size:64b/8B) */ +struct hwrm_struct_data_dcbx_app { + __be16 protocol_id; + u8 protocol_selector; + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT + u8 priority; + u8 valid; + u8 unused_0[3]; +}; + +/* hwrm_fw_set_structured_data_input (size:256b/32B) */ +struct hwrm_fw_set_structured_data_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le16 data_len; + u8 hdr_cnt; + u8 unused_0[5]; +}; + +/* hwrm_fw_set_structured_data_output (size:128b/16B) */ +struct hwrm_fw_set_structured_data_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */ +struct hwrm_fw_set_structured_data_cmd_err { + u8 code; + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID + u8 unused_0[7]; +}; + +/* hwrm_fw_get_structured_data_input (size:256b/32B) */ +struct hwrm_fw_get_structured_data_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + __le16 data_len; + __le16 structure_id; + __le16 subtype; + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED 0x0UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL + u8 count; + u8 unused_0; +}; + +/* hwrm_fw_get_structured_data_output (size:128b/16B) */ +struct hwrm_fw_get_structured_data_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 hdr_cnt; + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */ +struct hwrm_fw_get_structured_data_cmd_err { + u8 code; + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID + u8 unused_0[7]; +}; + +/* hwrm_fw_livepatch_query_input (size:192b/24B) */ +struct hwrm_fw_livepatch_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 fw_target; + #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW 0x1UL + #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW 0x2UL + #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_LAST FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW + u8 unused_0[7]; +}; + +/* hwrm_fw_livepatch_query_output (size:640b/80B) */ +struct hwrm_fw_livepatch_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + char install_ver[32]; + char active_ver[32]; + __le16 status_flags; + #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL 0x1UL + #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE 0x2UL + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_fw_livepatch_input (size:256b/32B) */ +struct hwrm_fw_livepatch_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 opcode; + #define FW_LIVEPATCH_REQ_OPCODE_ACTIVATE 0x1UL + #define FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE 0x2UL + #define FW_LIVEPATCH_REQ_OPCODE_LAST FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE + u8 fw_target; + #define FW_LIVEPATCH_REQ_FW_TARGET_COMMON_FW 0x1UL + #define FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW 0x2UL + #define FW_LIVEPATCH_REQ_FW_TARGET_LAST FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW + u8 loadtype; + #define FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL 0x1UL + #define FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT 0x2UL + #define FW_LIVEPATCH_REQ_LOADTYPE_LAST FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT + u8 flags; + __le32 patch_len; + __le64 host_addr; +}; + +/* hwrm_fw_livepatch_output (size:128b/16B) */ +struct hwrm_fw_livepatch_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_livepatch_cmd_err (size:64b/8B) */ +struct hwrm_fw_livepatch_cmd_err { + u8 code; + #define FW_LIVEPATCH_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE 0x1UL + #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_TARGET 0x2UL + #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED 0x3UL + #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED 0x4UL + #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED 0x5UL + #define FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL 0x6UL + #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER 0x7UL + #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE 0x8UL + #define FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED 0x9UL + #define FW_LIVEPATCH_CMD_ERR_CODE_LAST FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED + u8 unused_0[7]; +}; + +/* hwrm_exec_fwd_resp_input (size:1024b/128B) */ +struct hwrm_exec_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_request[26]; + __le16 encap_resp_target_id; + u8 unused_0[6]; +}; + +/* hwrm_exec_fwd_resp_output (size:128b/16B) */ +struct hwrm_exec_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_reject_fwd_resp_input (size:1024b/128B) */ +struct hwrm_reject_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_request[26]; + __le16 encap_resp_target_id; + u8 unused_0[6]; +}; + +/* hwrm_reject_fwd_resp_output (size:128b/16B) */ +struct hwrm_reject_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fwd_resp_input (size:1024b/128B) */ +struct hwrm_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 encap_resp_target_id; + __le16 encap_resp_cmpl_ring; + __le16 encap_resp_len; + u8 unused_0; + u8 unused_1; + __le64 encap_resp_addr; + __le32 encap_resp[24]; +}; + +/* hwrm_fwd_resp_output (size:128b/16B) */ +struct hwrm_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */ +struct hwrm_fwd_async_event_cmpl_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 encap_async_event_target_id; + u8 unused_0[6]; + __le32 encap_async_event_cmpl[4]; +}; + +/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */ +struct hwrm_fwd_async_event_cmpl_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_temp_monitor_query_input (size:128b/16B) */ +struct hwrm_temp_monitor_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_temp_monitor_query_output (size:192b/24B) */ +struct hwrm_temp_monitor_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 temp; + u8 phy_temp; + u8 om_temp; + u8 flags; + #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE 0x20UL + u8 temp2; + u8 phy_temp2; + u8 om_temp2; + u8 warn_threshold; + u8 critical_threshold; + u8 fatal_threshold; + u8 shutdown_threshold; + u8 unused_0[4]; + u8 valid; +}; + +/* hwrm_wol_filter_alloc_input (size:512b/64B) */ +struct hwrm_wol_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL + __le16 port_id; + u8 wol_type; + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID + u8 unused_0[5]; + u8 mac_address[6]; + __le16 pattern_offset; + __le16 pattern_buf_size; + __le16 pattern_mask_size; + u8 unused_1[4]; + __le64 pattern_buf_addr; + __le64 pattern_mask_addr; +}; + +/* hwrm_wol_filter_alloc_output (size:128b/16B) */ +struct hwrm_wol_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 wol_filter_id; + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_wol_filter_free_input (size:256b/32B) */ +struct hwrm_wol_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL + __le32 enables; + #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL + __le16 port_id; + u8 wol_filter_id; + u8 unused_0[5]; +}; + +/* hwrm_wol_filter_free_output (size:128b/16B) */ +struct hwrm_wol_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_wol_filter_qcfg_input (size:448b/56B) */ +struct hwrm_wol_filter_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 handle; + u8 unused_0[4]; + __le64 pattern_buf_addr; + __le16 pattern_buf_size; + u8 unused_1[6]; + __le64 pattern_mask_addr; + __le16 pattern_mask_size; + u8 unused_2[6]; +}; + +/* hwrm_wol_filter_qcfg_output (size:256b/32B) */ +struct hwrm_wol_filter_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 next_handle; + u8 wol_filter_id; + u8 wol_type; + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID + __le32 unused_0; + u8 mac_address[6]; + __le16 pattern_offset; + __le16 pattern_size; + __le16 pattern_mask_size; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_wol_reason_qcfg_input (size:320b/40B) */ +struct hwrm_wol_reason_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; + __le64 wol_pkt_buf_addr; + __le16 wol_pkt_buf_size; + u8 unused_1[6]; +}; + +/* hwrm_wol_reason_qcfg_output (size:128b/16B) */ +struct hwrm_wol_reason_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 wol_filter_id; + u8 wol_reason; + #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL + #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL + #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL + #define WOL_REASON_QCFG_RESP_WOL_REASON_LAST WOL_REASON_QCFG_RESP_WOL_REASON_INVALID + u8 wol_pkt_len; + u8 unused_0[4]; + u8 valid; +}; + +/* hwrm_dbg_read_direct_input (size:256b/32B) */ +struct hwrm_dbg_read_direct_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 read_addr; + __le32 read_len32; +}; + +/* hwrm_dbg_read_direct_output (size:128b/16B) */ +struct hwrm_dbg_read_direct_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 crc32; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_dbg_qcaps_input (size:192b/24B) */ +struct hwrm_dbg_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_dbg_qcaps_output (size:192b/24B) */ +struct hwrm_dbg_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + u8 unused_0[2]; + __le32 coredump_component_disable_caps; + #define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL + __le32 flags; + #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL + #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL + #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL + #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL + #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_DDR 0x10UL + #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_CAPTURE 0x20UL + #define DBG_QCAPS_RESP_FLAGS_PTRACE 0x40UL + #define DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED 0x80UL + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_dbg_qcfg_input (size:192b/24B) */ +struct hwrm_dbg_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 flags; + #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_MASK 0x3UL + #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_SFT 0 + #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_NVM 0x0UL + #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR 0x1UL + #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR 0x2UL + #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_LAST DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR + __le32 coredump_component_disable_flags; + #define DBG_QCFG_REQ_COREDUMP_COMPONENT_DISABLE_FLAGS_NVRAM 0x1UL +}; + +/* hwrm_dbg_qcfg_output (size:256b/32B) */ +struct hwrm_dbg_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + u8 unused_0[2]; + __le32 coredump_size; + __le32 flags; + #define DBG_QCFG_RESP_FLAGS_UART_LOG 0x1UL + #define DBG_QCFG_RESP_FLAGS_UART_LOG_SECONDARY 0x2UL + #define DBG_QCFG_RESP_FLAGS_FW_TRACE 0x4UL + #define DBG_QCFG_RESP_FLAGS_FW_TRACE_SECONDARY 0x8UL + #define DBG_QCFG_RESP_FLAGS_DEBUG_NOTIFY 0x10UL + #define DBG_QCFG_RESP_FLAGS_JTAG_DEBUG 0x20UL + __le16 async_cmpl_ring; + u8 unused_2[2]; + __le32 crashdump_size; + u8 unused_3[3]; + u8 valid; +}; + +/* hwrm_dbg_crashdump_medium_cfg_input (size:320b/40B) */ +struct hwrm_dbg_crashdump_medium_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 output_dest_flags; + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR 0x1UL + __le16 pg_size_lvl; + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_MASK 0x3UL + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_SFT 0 + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_0 0x0UL + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_1 0x1UL + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 0x2UL + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_MASK 0x1cUL + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_SFT 2 + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K (0x0UL << 2) + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K (0x1UL << 2) + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K (0x2UL << 2) + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_2M (0x3UL << 2) + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8M (0x4UL << 2) + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G (0x5UL << 2) + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_MASK 0xffe0UL + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_SFT 5 + __le32 size; + __le32 coredump_component_disable_flags; + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_NVRAM 0x1UL + __le32 unused_0; + __le64 pbl; +}; + +/* hwrm_dbg_crashdump_medium_cfg_output (size:128b/16B) */ +struct hwrm_dbg_crashdump_medium_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_1[7]; + u8 valid; +}; + +/* coredump_segment_record (size:128b/16B) */ +struct coredump_segment_record { + __le16 component_id; + __le16 segment_id; + __le16 max_instances; + u8 version_hi; + u8 version_low; + u8 seg_flags; + u8 compress_flags; + #define SFLAG_COMPRESSED_ZLIB 0x1UL + u8 unused_0[2]; + __le32 segment_len; +}; + +/* hwrm_dbg_coredump_list_input (size:256b/32B) */ +struct hwrm_dbg_coredump_list_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_buf_len; + __le16 seq_no; + u8 flags; + #define DBG_COREDUMP_LIST_REQ_FLAGS_CRASHDUMP 0x1UL + u8 unused_0[1]; +}; + +/* hwrm_dbg_coredump_list_output (size:128b/16B) */ +struct hwrm_dbg_coredump_list_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define DBG_COREDUMP_LIST_RESP_FLAGS_MORE 0x1UL + u8 unused_0; + __le16 total_segments; + __le16 data_len; + u8 unused_1; + u8 valid; +}; + +/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */ +struct hwrm_dbg_coredump_initiate_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 component_id; + __le16 segment_id; + __le16 instance; + __le16 unused_0; + u8 seg_flags; + #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_LIVE_DATA 0x1UL + #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_CRASH_DATA 0x2UL + #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE 0x4UL + u8 unused_1[7]; +}; + +/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */ +struct hwrm_dbg_coredump_initiate_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* coredump_data_hdr (size:128b/16B) */ +struct coredump_data_hdr { + __le32 address; + __le32 flags_length; + #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_MASK 0xffffffUL + #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_SFT 0 + #define COREDUMP_DATA_HDR_FLAGS_LENGTH_INDIRECT_ACCESS 0x1000000UL + __le32 instance; + __le32 next_offset; +}; + +/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */ +struct hwrm_dbg_coredump_retrieve_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_buf_len; + __le32 unused_0; + __le16 component_id; + __le16 segment_id; + __le16 instance; + __le16 unused_1; + u8 seg_flags; + u8 unused_2; + __le16 unused_3; + __le32 unused_4; + __le32 seq_no; + __le32 unused_5; +}; + +/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */ +struct hwrm_dbg_coredump_retrieve_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE 0x1UL + u8 unused_0; + __le16 data_len; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_dbg_ring_info_get_input (size:192b/24B) */ +struct hwrm_dbg_ring_info_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL + #define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL + #define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL + #define DBG_RING_INFO_GET_REQ_RING_TYPE_NQ 0x3UL + #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_NQ + u8 unused_0[3]; + __le32 fw_ring_id; +}; + +/* hwrm_dbg_ring_info_get_output (size:192b/24B) */ +struct hwrm_dbg_ring_info_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 producer_index; + __le32 consumer_index; + __le32 cag_vector_ctrl; + __le16 st_tag; + u8 unused_0; + u8 valid; +}; + +/* hwrm_dbg_log_buffer_flush_input (size:192b/24B) */ +struct hwrm_dbg_log_buffer_flush_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 type; + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE 0x0UL + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE 0x1UL + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE 0x2UL + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE 0x3UL + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE 0x4UL + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE 0x5UL + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE 0x6UL + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE 0x7UL + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE 0x8UL + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE 0x9UL + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE 0xaUL + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE 0xbUL + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_LAST DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE + u8 unused_1[2]; + __le32 flags; + #define DBG_LOG_BUFFER_FLUSH_REQ_FLAGS_FLUSH_ALL_BUFFERS 0x1UL +}; + +/* hwrm_dbg_log_buffer_flush_output (size:128b/16B) */ +struct hwrm_dbg_log_buffer_flush_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 current_buffer_offset; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_nvm_read_input (size:320b/40B) */ +struct hwrm_nvm_read_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le16 dir_idx; + u8 unused_0[2]; + __le32 offset; + __le32 len; + u8 unused_1[4]; +}; + +/* hwrm_nvm_read_output (size:128b/16B) */ +struct hwrm_nvm_read_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */ +struct hwrm_nvm_get_dir_entries_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; +}; + +/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */ +struct hwrm_nvm_get_dir_entries_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_get_dir_info_input (size:128b/16B) */ +struct hwrm_nvm_get_dir_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_nvm_get_dir_info_output (size:192b/24B) */ +struct hwrm_nvm_get_dir_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 entries; + __le32 entry_length; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_write_input (size:448b/56B) */ +struct hwrm_nvm_write_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + __le16 dir_attr; + __le32 dir_data_length; + __le16 option; + __le16 flags; + #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL + #define NVM_WRITE_REQ_FLAGS_BATCH_MODE 0x2UL + #define NVM_WRITE_REQ_FLAGS_BATCH_LAST 0x4UL + #define NVM_WRITE_REQ_FLAGS_SKIP_CRID_CHECK 0x8UL + __le32 dir_item_length; + __le32 offset; + __le32 len; + __le32 unused_0; +}; + +/* hwrm_nvm_write_output (size:128b/16B) */ +struct hwrm_nvm_write_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 dir_item_length; + __le16 dir_idx; + u8 unused_0; + u8 valid; +}; + +/* hwrm_nvm_write_cmd_err (size:64b/8B) */ +struct hwrm_nvm_write_cmd_err { + u8 code; + #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL + #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL + #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_NO_SPACE + u8 unused_0[7]; +}; + +/* hwrm_nvm_modify_input (size:320b/40B) */ +struct hwrm_nvm_modify_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le16 dir_idx; + __le16 flags; + #define NVM_MODIFY_REQ_FLAGS_BATCH_MODE 0x1UL + #define NVM_MODIFY_REQ_FLAGS_BATCH_LAST 0x2UL + __le32 offset; + __le32 len; + u8 unused_1[4]; +}; + +/* hwrm_nvm_modify_output (size:128b/16B) */ +struct hwrm_nvm_modify_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */ +struct hwrm_nvm_find_dir_entry_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL + __le16 dir_idx; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + u8 opt_ordinal; + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0 + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT + u8 unused_0[3]; +}; + +/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */ +struct hwrm_nvm_find_dir_entry_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 dir_item_length; + __le32 dir_data_length; + __le32 fw_ver; + __le16 dir_ordinal; + __le16 dir_idx; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */ +struct hwrm_nvm_erase_dir_entry_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dir_idx; + u8 unused_0[6]; +}; + +/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */ +struct hwrm_nvm_erase_dir_entry_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_get_dev_info_input (size:192b/24B) */ +struct hwrm_nvm_get_dev_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define NVM_GET_DEV_INFO_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL + u8 unused_0[7]; +}; + +/* hwrm_nvm_get_dev_info_output (size:768b/96B) */ +struct hwrm_nvm_get_dev_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 manufacturer_id; + __le16 device_id; + __le32 sector_size; + __le32 nvram_size; + __le32 reserved_size; + __le32 available_size; + u8 nvm_cfg_ver_maj; + u8 nvm_cfg_ver_min; + u8 nvm_cfg_ver_upd; + u8 flags; + #define NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID 0x1UL + char pkg_name[16]; + __le16 hwrm_fw_major; + __le16 hwrm_fw_minor; + __le16 hwrm_fw_build; + __le16 hwrm_fw_patch; + __le16 mgmt_fw_major; + __le16 mgmt_fw_minor; + __le16 mgmt_fw_build; + __le16 mgmt_fw_patch; + __le16 roce_fw_major; + __le16 roce_fw_minor; + __le16 roce_fw_build; + __le16 roce_fw_patch; + __le16 netctrl_fw_major; + __le16 netctrl_fw_minor; + __le16 netctrl_fw_build; + __le16 netctrl_fw_patch; + __le16 srt2_fw_major; + __le16 srt2_fw_minor; + __le16 srt2_fw_build; + __le16 srt2_fw_patch; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */ +struct hwrm_nvm_mod_dir_entry_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL + __le16 dir_idx; + __le16 dir_ordinal; + __le16 dir_ext; + __le16 dir_attr; + __le32 checksum; +}; + +/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */ +struct hwrm_nvm_mod_dir_entry_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_verify_update_input (size:192b/24B) */ +struct hwrm_nvm_verify_update_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + u8 unused_0[2]; +}; + +/* hwrm_nvm_verify_update_output (size:128b/16B) */ +struct hwrm_nvm_verify_update_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_install_update_input (size:192b/24B) */ +struct hwrm_nvm_install_update_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 install_type; + #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL + #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL + #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL + __le16 flags; + #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL + #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL + #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL + #define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL + u8 unused_0[2]; +}; + +/* hwrm_nvm_install_update_output (size:192b/24B) */ +struct hwrm_nvm_install_update_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 installed_items; + u8 result; + #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_FAILURE 0xffUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_MALLOC_FAILURE 0xfdUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER 0xfbUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER 0xf3UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE 0xf2UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER 0xecUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE 0xebUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM 0xeaUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH 0xe9UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST 0xe8UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER 0xe7UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM 0xe6UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM 0xe5UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH 0xe4UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE 0xe1UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV 0xceUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID 0xcdUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR 0xccUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID 0xcbUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM 0xc5UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM 0xc4UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM 0xc3UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR 0xb9UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR 0xb8UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR 0xb7UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND 0xb0UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED 0xa7UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED + u8 problem_item; + #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL + #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL + #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE + u8 reset_required; + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER + u8 unused_0[4]; + u8 valid; +}; + +/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */ +struct hwrm_nvm_install_update_cmd_err { + u8 code; + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT 0x4UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT + u8 unused_0[7]; +}; + +/* hwrm_nvm_get_variable_input (size:320b/40B) */ +struct hwrm_nvm_get_variable_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + __le16 data_len; + __le16 option_num; + #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL + #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL + #define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF + __le16 dimensions; + __le16 index_0; + __le16 index_1; + __le16 index_2; + __le16 index_3; + u8 flags; + #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL + u8 unused_0; +}; + +/* hwrm_nvm_get_variable_output (size:128b/16B) */ +struct hwrm_nvm_get_variable_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 data_len; + __le16 option_num; + #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL + #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL + #define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */ +struct hwrm_nvm_get_variable_cmd_err { + u8 code; + #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT + u8 unused_0[7]; +}; + +/* hwrm_nvm_set_variable_input (size:320b/40B) */ +struct hwrm_nvm_set_variable_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le16 data_len; + __le16 option_num; + #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL + #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL + #define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF + __le16 dimensions; + __le16 index_0; + __le16 index_1; + __le16 index_2; + __le16 index_3; + u8 flags; + #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1 + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH + #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL + #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4 + #define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL + u8 unused_0; +}; + +/* hwrm_nvm_set_variable_output (size:128b/16B) */ +struct hwrm_nvm_set_variable_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */ +struct hwrm_nvm_set_variable_cmd_err { + u8 code; + #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL + #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL + #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR + u8 unused_0[7]; +}; + +/* hwrm_selftest_qlist_input (size:128b/16B) */ +struct hwrm_selftest_qlist_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_selftest_qlist_output (size:2240b/280B) */ +struct hwrm_selftest_qlist_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_tests; + u8 available_tests; + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL + u8 offline_tests; + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL + u8 unused_0; + __le16 test_timeout; + u8 unused_1[2]; + char test_name[8][32]; + u8 eyescope_target_BER_support; + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED + u8 unused_2[6]; + u8 valid; +}; + +/* hwrm_selftest_exec_input (size:192b/24B) */ +struct hwrm_selftest_exec_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL + #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL + #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL + #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL + u8 unused_0[7]; +}; + +/* hwrm_selftest_exec_output (size:128b/16B) */ +struct hwrm_selftest_exec_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 requested_tests; + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL + u8 test_success; + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_selftest_irq_input (size:128b/16B) */ +struct hwrm_selftest_irq_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_selftest_irq_output (size:128b/16B) */ +struct hwrm_selftest_irq_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* dbc_dbc (size:64b/8B) */ +struct dbc_dbc { + __le32 index; + #define DBC_DBC_INDEX_MASK 0xffffffUL + #define DBC_DBC_INDEX_SFT 0 + #define DBC_DBC_EPOCH 0x1000000UL + #define DBC_DBC_TOGGLE_MASK 0x6000000UL + #define DBC_DBC_TOGGLE_SFT 25 + __le32 type_path_xid; + #define DBC_DBC_XID_MASK 0xfffffUL + #define DBC_DBC_XID_SFT 0 + #define DBC_DBC_PATH_MASK 0x3000000UL + #define DBC_DBC_PATH_SFT 24 + #define DBC_DBC_PATH_ROCE (0x0UL << 24) + #define DBC_DBC_PATH_L2 (0x1UL << 24) + #define DBC_DBC_PATH_ENGINE (0x2UL << 24) + #define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE + #define DBC_DBC_VALID 0x4000000UL + #define DBC_DBC_DEBUG_TRACE 0x8000000UL + #define DBC_DBC_TYPE_MASK 0xf0000000UL + #define DBC_DBC_TYPE_SFT 28 + #define DBC_DBC_TYPE_SQ (0x0UL << 28) + #define DBC_DBC_TYPE_RQ (0x1UL << 28) + #define DBC_DBC_TYPE_SRQ (0x2UL << 28) + #define DBC_DBC_TYPE_SRQ_ARM (0x3UL << 28) + #define DBC_DBC_TYPE_CQ (0x4UL << 28) + #define DBC_DBC_TYPE_CQ_ARMSE (0x5UL << 28) + #define DBC_DBC_TYPE_CQ_ARMALL (0x6UL << 28) + #define DBC_DBC_TYPE_CQ_ARMENA (0x7UL << 28) + #define DBC_DBC_TYPE_SRQ_ARMENA (0x8UL << 28) + #define DBC_DBC_TYPE_CQ_CUTOFF_ACK (0x9UL << 28) + #define DBC_DBC_TYPE_NQ (0xaUL << 28) + #define DBC_DBC_TYPE_NQ_ARM (0xbUL << 28) + #define DBC_DBC_TYPE_NQ_MASK (0xeUL << 28) + #define DBC_DBC_TYPE_NULL (0xfUL << 28) + #define DBC_DBC_TYPE_LAST DBC_DBC_TYPE_NULL +}; + +/* db_push_start (size:64b/8B) */ +struct db_push_start { + u64 db; + #define DB_PUSH_START_DB_INDEX_MASK 0xffffffUL + #define DB_PUSH_START_DB_INDEX_SFT 0 + #define DB_PUSH_START_DB_PI_LO_MASK 0xff000000UL + #define DB_PUSH_START_DB_PI_LO_SFT 24 + #define DB_PUSH_START_DB_XID_MASK 0xfffff00000000ULL + #define DB_PUSH_START_DB_XID_SFT 32 + #define DB_PUSH_START_DB_PI_HI_MASK 0xf0000000000000ULL + #define DB_PUSH_START_DB_PI_HI_SFT 52 + #define DB_PUSH_START_DB_TYPE_MASK 0xf000000000000000ULL + #define DB_PUSH_START_DB_TYPE_SFT 60 + #define DB_PUSH_START_DB_TYPE_PUSH_START (0xcULL << 60) + #define DB_PUSH_START_DB_TYPE_PUSH_END (0xdULL << 60) + #define DB_PUSH_START_DB_TYPE_LAST DB_PUSH_START_DB_TYPE_PUSH_END +}; + +/* db_push_end (size:64b/8B) */ +struct db_push_end { + u64 db; + #define DB_PUSH_END_DB_INDEX_MASK 0xffffffUL + #define DB_PUSH_END_DB_INDEX_SFT 0 + #define DB_PUSH_END_DB_PI_LO_MASK 0xff000000UL + #define DB_PUSH_END_DB_PI_LO_SFT 24 + #define DB_PUSH_END_DB_XID_MASK 0xfffff00000000ULL + #define DB_PUSH_END_DB_XID_SFT 32 + #define DB_PUSH_END_DB_PI_HI_MASK 0xf0000000000000ULL + #define DB_PUSH_END_DB_PI_HI_SFT 52 + #define DB_PUSH_END_DB_PATH_MASK 0x300000000000000ULL + #define DB_PUSH_END_DB_PATH_SFT 56 + #define DB_PUSH_END_DB_PATH_ROCE (0x0ULL << 56) + #define DB_PUSH_END_DB_PATH_L2 (0x1ULL << 56) + #define DB_PUSH_END_DB_PATH_ENGINE (0x2ULL << 56) + #define DB_PUSH_END_DB_PATH_LAST DB_PUSH_END_DB_PATH_ENGINE + #define DB_PUSH_END_DB_DEBUG_TRACE 0x800000000000000ULL + #define DB_PUSH_END_DB_TYPE_MASK 0xf000000000000000ULL + #define DB_PUSH_END_DB_TYPE_SFT 60 + #define DB_PUSH_END_DB_TYPE_PUSH_START (0xcULL << 60) + #define DB_PUSH_END_DB_TYPE_PUSH_END (0xdULL << 60) + #define DB_PUSH_END_DB_TYPE_LAST DB_PUSH_END_DB_TYPE_PUSH_END +}; + +/* db_push_info (size:64b/8B) */ +struct db_push_info { + u32 push_size_push_index; + #define DB_PUSH_INFO_PUSH_INDEX_MASK 0xffffffUL + #define DB_PUSH_INFO_PUSH_INDEX_SFT 0 + #define DB_PUSH_INFO_PUSH_SIZE_MASK 0x1f000000UL + #define DB_PUSH_INFO_PUSH_SIZE_SFT 24 + u32 reserved32; +}; + +/* fw_status_reg (size:32b/4B) */ +struct fw_status_reg { + u32 fw_status; + #define FW_STATUS_REG_CODE_MASK 0xffffUL + #define FW_STATUS_REG_CODE_SFT 0 + #define FW_STATUS_REG_CODE_READY 0x8000UL + #define FW_STATUS_REG_CODE_LAST FW_STATUS_REG_CODE_READY + #define FW_STATUS_REG_IMAGE_DEGRADED 0x10000UL + #define FW_STATUS_REG_RECOVERABLE 0x20000UL + #define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL + #define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL + #define FW_STATUS_REG_SHUTDOWN 0x100000UL + #define FW_STATUS_REG_CRASHED_NO_MASTER 0x200000UL + #define FW_STATUS_REG_RECOVERING 0x400000UL + #define FW_STATUS_REG_MANU_DEBUG_STATUS 0x800000UL +}; + +/* hcomm_status (size:64b/8B) */ +struct hcomm_status { + u32 sig_ver; + #define HCOMM_STATUS_VER_MASK 0xffUL + #define HCOMM_STATUS_VER_SFT 0 + #define HCOMM_STATUS_VER_LATEST 0x1UL + #define HCOMM_STATUS_VER_LAST HCOMM_STATUS_VER_LATEST + #define HCOMM_STATUS_SIGNATURE_MASK 0xffffff00UL + #define HCOMM_STATUS_SIGNATURE_SFT 8 + #define HCOMM_STATUS_SIGNATURE_VAL (0x484353UL << 8) + #define HCOMM_STATUS_SIGNATURE_LAST HCOMM_STATUS_SIGNATURE_VAL + u32 fw_status_loc; + #define HCOMM_STATUS_TRUE_ADDR_SPACE_MASK 0x3UL + #define HCOMM_STATUS_TRUE_ADDR_SPACE_SFT 0 + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_PCIE_CFG 0x0UL + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_GRC 0x1UL + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR0 0x2UL + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 0x3UL + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_LAST HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 + #define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL + #define HCOMM_STATUS_TRUE_OFFSET_SFT 2 +}; +#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL + +#endif /* _BNXT_HSI_H_ */ diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h index 3f4b4ac527ca..25df9260d206 100644 --- a/include/linux/bootconfig.h +++ b/include/linux/bootconfig.h @@ -290,7 +290,7 @@ int __init xbc_get_info(int *node_size, size_t *data_size); /* XBC cleanup data structures */ void __init _xbc_exit(bool early); -static inline void xbc_exit(void) +static __always_inline void xbc_exit(void) { _xbc_exit(false); } diff --git a/include/linux/bpf-cgroup-defs.h b/include/linux/bpf-cgroup-defs.h index 0985221d5478..c9e6b26abab6 100644 --- a/include/linux/bpf-cgroup-defs.h +++ b/include/linux/bpf-cgroup-defs.h @@ -63,6 +63,7 @@ struct cgroup_bpf { */ struct hlist_head progs[MAX_CGROUP_BPF_ATTACH_TYPE]; u8 flags[MAX_CGROUP_BPF_ATTACH_TYPE]; + u64 revisions[MAX_CGROUP_BPF_ATTACH_TYPE]; /* list of cgroup shared storages */ struct list_head storages; diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 70c8b94e797a..082ccd8ad96b 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -103,7 +103,6 @@ struct bpf_cgroup_storage { struct bpf_cgroup_link { struct bpf_link link; struct cgroup *cgroup; - enum bpf_attach_type type; }; struct bpf_prog_list { diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 5b25d278409b..f9cd2164ed23 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1538,6 +1538,37 @@ struct btf_mod_pair { struct bpf_kfunc_desc_tab; +enum bpf_stream_id { + BPF_STDOUT = 1, + BPF_STDERR = 2, +}; + +struct bpf_stream_elem { + struct llist_node node; + int total_len; + int consumed_len; + char str[]; +}; + +enum { + /* 100k bytes */ + BPF_STREAM_MAX_CAPACITY = 100000ULL, +}; + +struct bpf_stream { + atomic_t capacity; + struct llist_head log; /* list of in-flight stream elements in LIFO order */ + + struct mutex lock; /* lock protecting backlog_{head,tail} */ + struct llist_node *backlog_head; /* list of in-flight stream elements in FIFO order */ + struct llist_node *backlog_tail; /* tail of the list above */ +}; + +struct bpf_stream_stage { + struct llist_head log; + int len; +}; + struct bpf_prog_aux { atomic64_t refcnt; u32 used_map_cnt; @@ -1646,6 +1677,7 @@ struct bpf_prog_aux { struct work_struct work; struct rcu_head rcu; }; + struct bpf_stream stream[2]; }; struct bpf_prog { @@ -1697,11 +1729,10 @@ struct bpf_link { enum bpf_link_type type; const struct bpf_link_ops *ops; struct bpf_prog *prog; - /* whether BPF link itself has "sleepable" semantics, which can differ - * from underlying BPF program having a "sleepable" semantics, as BPF - * link's semantics is determined by target attach hook - */ - bool sleepable; + + u32 flags; + enum bpf_attach_type attach_type; + /* rcu is used before freeing, work can be used to schedule that * RCU-based freeing before that, so they never overlap */ @@ -1709,6 +1740,11 @@ struct bpf_link { struct rcu_head rcu; struct work_struct work; }; + /* whether BPF link itself has "sleepable" semantics, which can differ + * from underlying BPF program having a "sleepable" semantics, as BPF + * link's semantics is determined by target attach hook + */ + bool sleepable; }; struct bpf_link_ops { @@ -1748,7 +1784,6 @@ struct bpf_shim_tramp_link { struct bpf_tracing_link { struct bpf_tramp_link link; - enum bpf_attach_type attach_type; struct bpf_trampoline *trampoline; struct bpf_prog *tgt_prog; }; @@ -2001,11 +2036,13 @@ int bpf_prog_ctx_arg_info_init(struct bpf_prog *prog, #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM) int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, - int cgroup_atype); + int cgroup_atype, + enum bpf_attach_type attach_type); void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog); #else static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, - int cgroup_atype) + int cgroup_atype, + enum bpf_attach_type attach_type) { return -EOPNOTSUPP; } @@ -2288,6 +2325,9 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array *array, return ret; } +bool bpf_jit_bypass_spec_v1(void); +bool bpf_jit_bypass_spec_v4(void); + #ifdef CONFIG_BPF_SYSCALL DECLARE_PER_CPU(int, bpf_prog_active); extern struct mutex bpf_stats_enabled_mutex; @@ -2314,6 +2354,7 @@ extern const struct super_operations bpf_super_ops; extern const struct file_operations bpf_map_fops; extern const struct file_operations bpf_prog_fops; extern const struct file_operations bpf_iter_fops; +extern const struct file_operations bpf_token_fops; #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ extern const struct bpf_prog_ops _name ## _prog_ops; \ @@ -2405,6 +2446,7 @@ int generic_map_delete_batch(struct bpf_map *map, struct bpf_map *bpf_map_get_curr_or_next(u32 *id); struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); + int bpf_map_alloc_pages(const struct bpf_map *map, int nid, unsigned long nr_pages, struct page **page_array); #ifdef CONFIG_MEMCG @@ -2475,22 +2517,27 @@ static inline bool bpf_allow_uninit_stack(const struct bpf_token *token) static inline bool bpf_bypass_spec_v1(const struct bpf_token *token) { - return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON); + return bpf_jit_bypass_spec_v1() || + cpu_mitigations_off() || + bpf_token_capable(token, CAP_PERFMON); } static inline bool bpf_bypass_spec_v4(const struct bpf_token *token) { - return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON); + return bpf_jit_bypass_spec_v4() || + cpu_mitigations_off() || + bpf_token_capable(token, CAP_PERFMON); } int bpf_map_new_fd(struct bpf_map *map, int flags); int bpf_prog_new_fd(struct bpf_prog *prog); void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, - const struct bpf_link_ops *ops, struct bpf_prog *prog); + const struct bpf_link_ops *ops, struct bpf_prog *prog, + enum bpf_attach_type attach_type); void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type, const struct bpf_link_ops *ops, struct bpf_prog *prog, - bool sleepable); + enum bpf_attach_type attach_type, bool sleepable); int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); int bpf_link_settle(struct bpf_link_primer *primer); void bpf_link_cleanup(struct bpf_link_primer *primer); @@ -2505,6 +2552,9 @@ void bpf_token_inc(struct bpf_token *token); void bpf_token_put(struct bpf_token *token); int bpf_token_create(union bpf_attr *attr); struct bpf_token *bpf_token_get_from_fd(u32 ufd); +int bpf_token_get_info_by_fd(struct bpf_token *token, + const union bpf_attr *attr, + union bpf_attr __user *uattr); bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd); bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type); @@ -2842,13 +2892,13 @@ bpf_prog_inc_not_zero(struct bpf_prog *prog) static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, const struct bpf_link_ops *ops, - struct bpf_prog *prog) + struct bpf_prog *prog, enum bpf_attach_type attach_type) { } static inline void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type, const struct bpf_link_ops *ops, struct bpf_prog *prog, - bool sleepable) + enum bpf_attach_type attach_type, bool sleepable) { } @@ -2903,6 +2953,13 @@ static inline struct bpf_token *bpf_token_get_from_fd(u32 ufd) return ERR_PTR(-EOPNOTSUPP); } +static inline int bpf_token_get_info_by_fd(struct bpf_token *token, + const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + return -EOPNOTSUPP; +} + static inline void __dev_flush(struct list_head *flush_list) { } @@ -3543,6 +3600,16 @@ bool btf_id_set_contains(const struct btf_id_set *set, u32 id); #define MAX_BPRINTF_VARARGS 12 #define MAX_BPRINTF_BUF 1024 +/* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary + * arguments representation. + */ +#define MAX_BPRINTF_BIN_ARGS 512 + +struct bpf_bprintf_buffers { + char bin_args[MAX_BPRINTF_BIN_ARGS]; + char buf[MAX_BPRINTF_BUF]; +}; + struct bpf_bprintf_data { u32 *bin_args; char *buf; @@ -3550,9 +3617,33 @@ struct bpf_bprintf_data { bool get_buf; }; -int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, +int bpf_bprintf_prepare(const char *fmt, u32 fmt_size, const u64 *raw_args, u32 num_args, struct bpf_bprintf_data *data); void bpf_bprintf_cleanup(struct bpf_bprintf_data *data); +int bpf_try_get_buffers(struct bpf_bprintf_buffers **bufs); +void bpf_put_buffers(void); + +void bpf_prog_stream_init(struct bpf_prog *prog); +void bpf_prog_stream_free(struct bpf_prog *prog); +int bpf_prog_stream_read(struct bpf_prog *prog, enum bpf_stream_id stream_id, void __user *buf, int len); +void bpf_stream_stage_init(struct bpf_stream_stage *ss); +void bpf_stream_stage_free(struct bpf_stream_stage *ss); +__printf(2, 3) +int bpf_stream_stage_printk(struct bpf_stream_stage *ss, const char *fmt, ...); +int bpf_stream_stage_commit(struct bpf_stream_stage *ss, struct bpf_prog *prog, + enum bpf_stream_id stream_id); +int bpf_stream_stage_dump_stack(struct bpf_stream_stage *ss); + +#define bpf_stream_printk(ss, ...) bpf_stream_stage_printk(&ss, __VA_ARGS__) +#define bpf_stream_dump_stack(ss) bpf_stream_stage_dump_stack(&ss) + +#define bpf_stream_stage(ss, prog, stream_id, expr) \ + ({ \ + bpf_stream_stage_init(&ss); \ + (expr); \ + bpf_stream_stage_commit(&ss, prog, stream_id); \ + bpf_stream_stage_free(&ss); \ + }) #ifdef CONFIG_BPF_LSM void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype); @@ -3588,4 +3679,8 @@ static inline bool bpf_is_subprog(const struct bpf_prog *prog) return prog->aux->func_idx != 0; } +int bpf_prog_get_file_line(struct bpf_prog *prog, unsigned long ip, const char **filep, + const char **linep, int *nump); +struct bpf_prog *bpf_prog_find_from_stack(void); + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 256274acb1d8..94defa405c85 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -344,7 +344,7 @@ struct bpf_func_state { #define MAX_CALL_FRAMES 8 -/* instruction history flags, used in bpf_insn_hist_entry.flags field */ +/* instruction history flags, used in bpf_jmp_history_entry.flags field */ enum { /* instruction references stack slot through PTR_TO_STACK register; * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8) @@ -366,7 +366,7 @@ enum { static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES); static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8); -struct bpf_insn_hist_entry { +struct bpf_jmp_history_entry { u32 idx; /* insn idx can't be bigger than 1 million */ u32 prev_idx : 20; @@ -449,32 +449,20 @@ struct bpf_verifier_state { /* first and last insn idx of this verifier state */ u32 first_insn_idx; u32 last_insn_idx; - /* If this state is a part of states loop this field points to some - * parent of this state such that: - * - it is also a member of the same states loop; - * - DFS states traversal starting from initial state visits loop_entry - * state before this state. - * Used to compute topmost loop entry for state loops. - * State loops might appear because of open coded iterators logic. - * See get_loop_entry() for more information. + /* if this state is a backedge state then equal_state + * records cached state to which this state is equal. */ - struct bpf_verifier_state *loop_entry; - /* Sub-range of env->insn_hist[] corresponding to this state's - * instruction history. - * Backtracking is using it to go from last to first. - * For most states instruction history is short, 0-3 instructions. + struct bpf_verifier_state *equal_state; + /* jmp history recorded from first to last. + * backtracking is using it to go from last to first. + * For most states jmp_history_cnt is [0-3]. * For loops can go up to ~40. */ - u32 insn_hist_start; - u32 insn_hist_end; + struct bpf_jmp_history_entry *jmp_history; + u32 jmp_history_cnt; u32 dfs_depth; u32 callback_unroll_depth; u32 may_goto_depth; - /* If this state was ever pointed-to by other state's loop_entry field - * this flag would be set to true. Used to avoid freeing such states - * while they are still in use. - */ - u32 used_as_loop_entry; }; #define bpf_get_spilled_reg(slot, frame, mask) \ @@ -580,7 +568,8 @@ struct bpf_insn_aux_data { u64 map_key_state; /* constant (32 bit) key tracking for maps */ int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ u32 seen; /* this insn was processed by the verifier at env->pass_cnt */ - bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */ + bool nospec; /* do not execute this instruction speculatively */ + bool nospec_result; /* result is unsafe under speculation, nospec must follow */ bool zext_dst; /* this insn zero extends dst reg */ bool needs_zext; /* alu op needs to clear upper bits */ bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */ @@ -609,6 +598,11 @@ struct bpf_insn_aux_data { * accepts callback function as a parameter. */ bool calls_callback; + /* + * CFG strongly connected component this instruction belongs to, + * zero if it is a singleton SCC. + */ + u32 scc; /* registers alive before this instruction. */ u16 live_regs_before; }; @@ -718,6 +712,38 @@ struct bpf_idset { u32 ids[BPF_ID_MAP_SIZE]; }; +/* see verifier.c:compute_scc_callchain() */ +struct bpf_scc_callchain { + /* call sites from bpf_verifier_state->frame[*]->callsite leading to this SCC */ + u32 callsites[MAX_CALL_FRAMES - 1]; + /* last frame in a chain is identified by SCC id */ + u32 scc; +}; + +/* verifier state waiting for propagate_backedges() */ +struct bpf_scc_backedge { + struct bpf_scc_backedge *next; + struct bpf_verifier_state state; +}; + +struct bpf_scc_visit { + struct bpf_scc_callchain callchain; + /* first state in current verification path that entered SCC + * identified by the callchain + */ + struct bpf_verifier_state *entry_state; + struct bpf_scc_backedge *backedges; /* list of backedges */ + u32 num_backedges; +}; + +/* An array of bpf_scc_visit structs sharing tht same bpf_scc_callchain->scc + * but having different bpf_scc_callchain->callsites. + */ +struct bpf_scc_info { + u32 num_visits; + struct bpf_scc_visit visits[]; +}; + /* single container for all structs * one verifier_env per bpf_check() call */ @@ -775,9 +801,7 @@ struct bpf_verifier_env { int cur_postorder; } cfg; struct backtrack_state bt; - struct bpf_insn_hist_entry *insn_hist; - struct bpf_insn_hist_entry *cur_hist_ent; - u32 insn_hist_cap; + struct bpf_jmp_history_entry *cur_hist_ent; u32 pass_cnt; /* number of times do_check() was called */ u32 subprog_cnt; /* number of instructions analyzed by the verifier */ @@ -799,6 +823,7 @@ struct bpf_verifier_env { u32 longest_mark_read_walk; u32 free_list_size; u32 explored_states_size; + u32 num_backedges; bpfptr_t fd_array; /* bit mask to keep track of whether a register has been accessed @@ -816,6 +841,10 @@ struct bpf_verifier_env { char tmp_str_buf[TMP_STR_BUF_LEN]; struct bpf_insn insn_buf[INSN_BUF_SIZE]; struct bpf_insn epilogue_buf[INSN_BUF_SIZE]; + struct bpf_scc_callchain callchain_buf; + /* array of pointers to bpf_scc_info indexed by SCC id */ + struct bpf_scc_info **scc_info; + u32 scc_cnt; }; static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog) diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 028b3e00378e..15c35655f482 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -183,6 +183,12 @@ #define BCM_LED_MULTICOLOR_PROGRAM 0xa /* + * Broadcom Synchronous Ethernet Controls (expansion register 0x0E) + */ +#define BCM_EXP_SYNC_ETHERNET (MII_BCM54XX_EXP_SEL_ER + 0x0E) +#define BCM_EXP_SYNC_ETHERNET_MII_LITE BIT(11) + +/* * BCM5482: Shadow registers * Shadow values go into bits [14:10] of register 0x1c to select a shadow * register to access. diff --git a/include/linux/btf.h b/include/linux/btf.h index b2983706292f..9eda6b113f9b 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -221,6 +221,9 @@ bool btf_is_vmlinux(const struct btf *btf); struct module *btf_try_get_module(const struct btf *btf); u32 btf_nr_types(const struct btf *btf); struct btf *btf_base_btf(const struct btf *btf); +bool btf_type_is_i32(const struct btf_type *t); +bool btf_type_is_i64(const struct btf_type *t); +bool btf_type_is_primitive(const struct btf_type *t); bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, const struct btf_member *m, u32 expected_offset, u32 expected_size); diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 0029ff880e27..b16b88bfbc3e 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -262,14 +262,12 @@ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, get_block_t *get_block); int __block_write_begin(struct folio *folio, loff_t pos, unsigned len, get_block_t *get_block); -int block_write_end(struct file *, struct address_space *, - loff_t, unsigned len, unsigned copied, - struct folio *, void *); -int generic_write_end(struct file *, struct address_space *, +int block_write_end(loff_t pos, unsigned len, unsigned copied, struct folio *); +int generic_write_end(const struct kiocb *, struct address_space *, loff_t, unsigned len, unsigned copied, struct folio *, void *); void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to); -int cont_write_begin(struct file *, struct address_space *, loff_t, +int cont_write_begin(const struct kiocb *, struct address_space *, loff_t, unsigned, struct folio **, void **, get_block_t *, loff_t *); int generic_cont_expand_simple(struct inode *inode, loff_t size); diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h index 9b8a9c39614b..5dfdbb63b1d5 100644 --- a/include/linux/can/bittiming.h +++ b/include/linux/can/bittiming.h @@ -14,7 +14,7 @@ #define CAN_BITRATE_UNSET 0 #define CAN_BITRATE_UNKNOWN (-1U) -#define CAN_CTRLMODE_TDC_MASK \ +#define CAN_CTRLMODE_FD_TDC_MASK \ (CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_TDC_MANUAL) /* diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 492d23bec7be..9a92cbe5b2cb 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -91,9 +91,9 @@ struct can_priv { struct can_berr_counter *bec); }; -static inline bool can_tdc_is_enabled(const struct can_priv *priv) +static inline bool can_fd_tdc_is_enabled(const struct can_priv *priv) { - return !!(priv->ctrlmode & CAN_CTRLMODE_TDC_MASK); + return !!(priv->ctrlmode & CAN_CTRLMODE_FD_TDC_MASK); } /* diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h index fdfb61ccf55a..b907e6c2307d 100644 --- a/include/linux/cdrom.h +++ b/include/linux/cdrom.h @@ -62,7 +62,6 @@ struct cdrom_device_info { __u8 last_sense; __u8 media_written; /* dirty flag, DVD+RW bookkeeping */ unsigned short mmc3_profile; /* current MMC3 profile */ - int (*exit)(struct cdrom_device_info *); int mrw_mode_page; bool opened_for_data; __s64 last_media_change_ms; diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h index 7093e1d08af0..bee606bebaca 100644 --- a/include/linux/cleanup.h +++ b/include/linux/cleanup.h @@ -277,6 +277,14 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \ class_##_name##_t var __cleanup(class_##_name##_destructor) = \ class_##_name##_constructor +#define scoped_class(_name, var, args) \ + for (CLASS(_name, var)(args); \ + __guard_ptr(_name)(&var) || !__is_cond_ptr(_name); \ + ({ goto _label; })) \ + if (0) { \ +_label: \ + break; \ + } else /* * DEFINE_GUARD(name, type, lock, unlock): diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 4fc8e26914ad..fa4ffe037bc7 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -89,6 +89,9 @@ #define __no_sanitize_coverage #endif +/* Only Clang needs to disable the coverage sanitizer for kstack_erase. */ +#define __no_kstack_erase __no_sanitize_coverage + #if __has_feature(shadow_call_stack) # define __noscs __attribute__((__no_sanitize__("shadow-call-stack"))) #endif diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 2b77d12e07b2..16755431fc11 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -424,6 +424,10 @@ struct ftrace_likely_data { # define randomized_struct_fields_end #endif +#ifndef __no_kstack_erase +# define __no_kstack_erase +#endif + #ifndef __noscs # define __noscs #endif diff --git a/include/linux/container_of.h b/include/linux/container_of.h index 713890c867be..1f6ebf27d962 100644 --- a/include/linux/container_of.h +++ b/include/linux/container_of.h @@ -14,6 +14,7 @@ * @member: the name of the member within the struct. * * WARNING: any const qualifier of @ptr is lost. + * Do not use container_of() in new code. */ #define container_of(ptr, type, member) ({ \ void *__mptr = (void *)(ptr); \ @@ -28,6 +29,8 @@ * @ptr: the pointer to the member * @type: the type of the container struct this is embedded in. * @member: the name of the member within the struct. + * + * Always prefer container_of_const() instead of container_of() in new code. */ #define container_of_const(ptr, type, member) \ _Generic(ptr, \ diff --git a/include/linux/coredump.h b/include/linux/coredump.h index 76e41805b92d..96e8a66da133 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -43,7 +43,7 @@ extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr); extern int dump_align(struct coredump_params *cprm, int align); int dump_user_range(struct coredump_params *cprm, unsigned long start, unsigned long len); -extern void do_coredump(const kernel_siginfo_t *siginfo); +extern void vfs_coredump(const kernel_siginfo_t *siginfo); /* * Logging for the coredump code, ratelimited. @@ -63,7 +63,7 @@ extern void do_coredump(const kernel_siginfo_t *siginfo); #define coredump_report_failure(fmt, ...) __COREDUMP_PRINTK(KERN_WARNING, fmt, ##__VA_ARGS__) #else -static inline void do_coredump(const kernel_siginfo_t *siginfo) {} +static inline void vfs_coredump(const kernel_siginfo_t *siginfo) {} #define coredump_report(...) #define coredump_report_failure(...) diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 6378370a952f..b91b993f58ee 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -187,20 +187,31 @@ static inline void arch_cpu_finalize_init(void) { } void play_idle_precise(u64 duration_ns, u64 latency_ns); -static inline void play_idle(unsigned long duration_us) -{ - play_idle_precise(duration_us * NSEC_PER_USEC, U64_MAX); -} - #ifdef CONFIG_HOTPLUG_CPU void cpuhp_report_idle_dead(void); #else static inline void cpuhp_report_idle_dead(void) { } #endif /* #ifdef CONFIG_HOTPLUG_CPU */ +enum cpu_attack_vectors { + CPU_MITIGATE_USER_KERNEL, + CPU_MITIGATE_USER_USER, + CPU_MITIGATE_GUEST_HOST, + CPU_MITIGATE_GUEST_GUEST, + NR_CPU_ATTACK_VECTORS, +}; + +enum smt_mitigations { + SMT_MITIGATIONS_OFF, + SMT_MITIGATIONS_AUTO, + SMT_MITIGATIONS_ON, +}; + #ifdef CONFIG_CPU_MITIGATIONS extern bool cpu_mitigations_off(void); extern bool cpu_mitigations_auto_nosmt(void); +extern bool cpu_attack_vector_mitigated(enum cpu_attack_vectors v); +extern enum smt_mitigations smt_mitigations; #else static inline bool cpu_mitigations_off(void) { @@ -210,6 +221,11 @@ static inline bool cpu_mitigations_auto_nosmt(void) { return false; } +static inline bool cpu_attack_vector_mitigated(enum cpu_attack_vectors v) +{ + return false; +} +#define smt_mitigations SMT_MITIGATIONS_OFF #endif #endif /* _LINUX_CPU_H_ */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index df366ee15456..d381420bbd5f 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -145,7 +145,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_EIOINTC_STARTING, CPUHP_AP_IRQ_AVECINTC_STARTING, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, - CPUHP_AP_IRQ_THEAD_ACLINT_SSWI_STARTING, + CPUHP_AP_IRQ_ACLINT_SSWI_STARTING, CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h index a559fdff3f7e..ecc8bc2dd7f4 100644 --- a/include/linux/crc-t10dif.h +++ b/include/linux/crc-t10dif.h @@ -4,15 +4,7 @@ #include <linux/types.h> -u16 crc_t10dif_arch(u16 crc, const u8 *p, size_t len); -u16 crc_t10dif_generic(u16 crc, const u8 *p, size_t len); - -static inline u16 crc_t10dif_update(u16 crc, const u8 *p, size_t len) -{ - if (IS_ENABLED(CONFIG_CRC_T10DIF_ARCH)) - return crc_t10dif_arch(crc, p, len); - return crc_t10dif_generic(crc, p, len); -} +u16 crc_t10dif_update(u16 crc, const u8 *p, size_t len); static inline u16 crc_t10dif(const u8 *p, size_t len) { diff --git a/include/linux/crc32.h b/include/linux/crc32.h index 8c1883b81b42..da78b215ff2e 100644 --- a/include/linux/crc32.h +++ b/include/linux/crc32.h @@ -5,33 +5,81 @@ #include <linux/types.h> #include <linux/bitrev.h> -u32 crc32_le_arch(u32 crc, const u8 *p, size_t len); -u32 crc32_le_base(u32 crc, const u8 *p, size_t len); -u32 crc32_be_arch(u32 crc, const u8 *p, size_t len); -u32 crc32_be_base(u32 crc, const u8 *p, size_t len); -u32 crc32c_arch(u32 crc, const u8 *p, size_t len); -u32 crc32c_base(u32 crc, const u8 *p, size_t len); +/** + * crc32_le() - Compute least-significant-bit-first IEEE CRC-32 + * @crc: Initial CRC value. ~0 (recommended) or 0 for a new CRC computation, or + * the previous CRC value if computing incrementally. + * @p: Pointer to the data buffer + * @len: Length of data in bytes + * + * This implements the CRC variant that is often known as the IEEE CRC-32, or + * simply CRC-32, and is widely used in Ethernet and other applications: + * + * - Polynomial: x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + + * x^7 + x^5 + x^4 + x^2 + x^1 + x^0 + * - Bit order: Least-significant-bit-first + * - Polynomial in integer form: 0xedb88320 + * + * This does *not* invert the CRC at the beginning or end. The caller is + * expected to do that if it needs to. Inverting at both ends is recommended. + * + * For new applications, prefer to use CRC-32C instead. See crc32c(). + * + * Context: Any context + * Return: The new CRC value + */ +u32 crc32_le(u32 crc, const void *p, size_t len); -static inline u32 crc32_le(u32 crc, const void *p, size_t len) +/* This is just an alias for crc32_le(). */ +static inline u32 crc32(u32 crc, const void *p, size_t len) { - if (IS_ENABLED(CONFIG_CRC32_ARCH)) - return crc32_le_arch(crc, p, len); - return crc32_le_base(crc, p, len); + return crc32_le(crc, p, len); } -static inline u32 crc32_be(u32 crc, const void *p, size_t len) -{ - if (IS_ENABLED(CONFIG_CRC32_ARCH)) - return crc32_be_arch(crc, p, len); - return crc32_be_base(crc, p, len); -} +/** + * crc32_be() - Compute most-significant-bit-first IEEE CRC-32 + * @crc: Initial CRC value. ~0 (recommended) or 0 for a new CRC computation, or + * the previous CRC value if computing incrementally. + * @p: Pointer to the data buffer + * @len: Length of data in bytes + * + * crc32_be() is the same as crc32_le() except that crc32_be() computes the + * *most-significant-bit-first* variant of the CRC. I.e., within each byte, the + * most significant bit is processed first (treated as highest order polynomial + * coefficient). The same bit order is also used for the CRC value itself: + * + * - Polynomial: x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + + * x^7 + x^5 + x^4 + x^2 + x^1 + x^0 + * - Bit order: Most-significant-bit-first + * - Polynomial in integer form: 0x04c11db7 + * + * Context: Any context + * Return: The new CRC value + */ +u32 crc32_be(u32 crc, const void *p, size_t len); -static inline u32 crc32c(u32 crc, const void *p, size_t len) -{ - if (IS_ENABLED(CONFIG_CRC32_ARCH)) - return crc32c_arch(crc, p, len); - return crc32c_base(crc, p, len); -} +/** + * crc32c() - Compute CRC-32C + * @crc: Initial CRC value. ~0 (recommended) or 0 for a new CRC computation, or + * the previous CRC value if computing incrementally. + * @p: Pointer to the data buffer + * @len: Length of data in bytes + * + * This implements CRC-32C, i.e. the Castagnoli CRC. This is the recommended + * CRC variant to use in new applications that want a 32-bit CRC. + * + * - Polynomial: x^32 + x^28 + x^27 + x^26 + x^25 + x^23 + x^22 + x^20 + x^19 + + * x^18 + x^14 + x^13 + x^11 + x^10 + x^9 + x^8 + x^6 + x^0 + * - Bit order: Least-significant-bit-first + * - Polynomial in integer form: 0x82f63b78 + * + * This does *not* invert the CRC at the beginning or end. The caller is + * expected to do that if it needs to. Inverting at both ends is recommended. + * + * Context: Any context + * Return: The new CRC value + */ +u32 crc32c(u32 crc, const void *p, size_t len); /* * crc32_optimizations() returns flags that indicate which CRC32 library @@ -48,33 +96,6 @@ u32 crc32_optimizations(void); static inline u32 crc32_optimizations(void) { return 0; } #endif -/** - * crc32_le_combine - Combine two crc32 check values into one. For two - * sequences of bytes, seq1 and seq2 with lengths len1 - * and len2, crc32_le() check values were calculated - * for each, crc1 and crc2. - * - * @crc1: crc32 of the first block - * @crc2: crc32 of the second block - * @len2: length of the second block - * - * Return: The crc32_le() check value of seq1 and seq2 concatenated, - * requiring only crc1, crc2, and len2. Note: If seq_full denotes - * the concatenated memory area of seq1 with seq2, and crc_full - * the crc32_le() value of seq_full, then crc_full == - * crc32_le_combine(crc1, crc2, len2) when crc_full was seeded - * with the same initializer as crc1, and crc2 seed was 0. See - * also crc32_combine_test(). - */ -u32 crc32_le_shift(u32 crc, size_t len); - -static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2) -{ - return crc32_le_shift(crc1, len2) ^ crc2; -} - -#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length) - /* * Helpers for hash table generation of ethernet nics: * diff --git a/include/linux/crc32poly.h b/include/linux/crc32poly.h index 62c4b7790a28..ccab711295fa 100644 --- a/include/linux/crc32poly.h +++ b/include/linux/crc32poly.h @@ -2,19 +2,13 @@ #ifndef _LINUX_CRC32_POLY_H #define _LINUX_CRC32_POLY_H -/* - * There are multiple 16-bit CRC polynomials in common use, but this is - * *the* standard CRC-32 polynomial, first popularized by Ethernet. - * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0 - */ +/* The polynomial used by crc32_le(), in integer form. See crc32_le(). */ #define CRC32_POLY_LE 0xedb88320 + +/* The polynomial used by crc32_be(), in integer form. See crc32_be(). */ #define CRC32_POLY_BE 0x04c11db7 -/* - * This is the CRC32c polynomial, as outlined by Castagnoli. - * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+ - * x^8+x^6+x^0 - */ -#define CRC32C_POLY_LE 0x82F63B78 +/* The polynomial used by crc32c(), in integer form. See crc32c(). */ +#define CRC32C_POLY_LE 0x82f63b78 #endif /* _LINUX_CRC32_POLY_H */ diff --git a/include/linux/crc64.h b/include/linux/crc64.h index 41de30b907df..fc0c06ab1993 100644 --- a/include/linux/crc64.h +++ b/include/linux/crc64.h @@ -1,17 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * See lib/crc64.c for the related specification and polynomial arithmetic. - */ #ifndef _LINUX_CRC64_H #define _LINUX_CRC64_H #include <linux/types.h> -u64 crc64_be_arch(u64 crc, const u8 *p, size_t len); -u64 crc64_be_generic(u64 crc, const u8 *p, size_t len); -u64 crc64_nvme_arch(u64 crc, const u8 *p, size_t len); -u64 crc64_nvme_generic(u64 crc, const u8 *p, size_t len); - /** * crc64_be - Calculate bitwise big-endian ECMA-182 CRC64 * @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation, @@ -19,12 +11,7 @@ u64 crc64_nvme_generic(u64 crc, const u8 *p, size_t len); * @p: pointer to buffer over which CRC64 is run * @len: length of buffer @p */ -static inline u64 crc64_be(u64 crc, const void *p, size_t len) -{ - if (IS_ENABLED(CONFIG_CRC64_ARCH)) - return crc64_be_arch(crc, p, len); - return crc64_be_generic(crc, p, len); -} +u64 crc64_be(u64 crc, const void *p, size_t len); /** * crc64_nvme - Calculate CRC64-NVME @@ -36,11 +23,6 @@ static inline u64 crc64_be(u64 crc, const void *p, size_t len) * This computes the CRC64 defined in the NVME NVM Command Set Specification, * *including the bitwise inversion at the beginning and end*. */ -static inline u64 crc64_nvme(u64 crc, const void *p, size_t len) -{ - if (IS_ENABLED(CONFIG_CRC64_ARCH)) - return ~crc64_nvme_arch(~crc, p, len); - return ~crc64_nvme_generic(~crc, p, len); -} +u64 crc64_nvme(u64 crc, const void *p, size_t len); #endif /* _LINUX_CRC64_H */ diff --git a/include/linux/cred.h b/include/linux/cred.h index 5658a3bfe803..a102a10f833f 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -263,6 +263,8 @@ static inline void put_cred(const struct cred *cred) put_cred_many(cred, 1); } +DEFINE_FREE(put_cred, struct cred *, if (!IS_ERR_OR_NULL(_T)) put_cred(_T)) + /** * current_cred - Access the current task's subjective credentials * diff --git a/include/linux/dax.h b/include/linux/dax.h index dcc9fcdf14e4..78891518291d 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -65,12 +65,13 @@ size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, /* * Check if given mapping is supported by the file / underlying device. */ -static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, - struct dax_device *dax_dev) +static inline bool daxdev_mapping_supported(vm_flags_t vm_flags, + const struct inode *inode, + struct dax_device *dax_dev) { - if (!(vma->vm_flags & VM_SYNC)) + if (!(vm_flags & VM_SYNC)) return true; - if (!IS_DAX(file_inode(vma->vm_file))) + if (!IS_DAX(inode)) return false; return dax_synchronous(dax_dev); } @@ -110,10 +111,11 @@ static inline void set_dax_nomc(struct dax_device *dax_dev) static inline void set_dax_synchronous(struct dax_device *dax_dev) { } -static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, - struct dax_device *dax_dev) +static inline bool daxdev_mapping_supported(vm_flags_t vm_flags, + const struct inode *inode, + struct dax_device *dax_dev) { - return !(vma->vm_flags & VM_SYNC); + return !(vm_flags & VM_SYNC); } static inline size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) diff --git a/include/linux/dcache.h b/include/linux/dcache.h index e29823c701ac..cc3e1c1a3454 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -237,7 +237,6 @@ extern void d_instantiate_new(struct dentry *, struct inode *); extern void __d_drop(struct dentry *dentry); extern void d_drop(struct dentry *dentry); extern void d_delete(struct dentry *); -extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op); /* allocate/de-allocate */ extern struct dentry * d_alloc(struct dentry *, const struct qstr *); @@ -245,6 +244,9 @@ extern struct dentry * d_alloc_anon(struct super_block *); extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, wait_queue_head_t *); extern struct dentry * d_splice_alias(struct inode *, struct dentry *); +/* weird procfs mess; *NOT* exported */ +extern struct dentry * d_splice_alias_ops(struct inode *, struct dentry *, + const struct dentry_operations *); extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); extern bool d_same_name(const struct dentry *dentry, const struct dentry *parent, const struct qstr *name); @@ -604,4 +606,6 @@ static inline struct dentry *d_next_sibling(const struct dentry *dentry) return hlist_entry_safe(dentry->d_sib.next, struct dentry, d_sib); } +void set_default_d_op(struct super_block *, const struct dentry_operations *); + #endif /* __LINUX_DCACHE_H */ diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index fa2568b4380d..7cecda29447e 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -162,8 +162,7 @@ void debugfs_remove(struct dentry *dentry); void debugfs_lookup_and_remove(const char *name, struct dentry *parent); -const struct file_operations *debugfs_real_fops(const struct file *filp); -const void *debugfs_get_aux(const struct file *file); +void *debugfs_get_aux(const struct file *file); int debugfs_file_get(struct dentry *dentry); void debugfs_file_put(struct dentry *dentry); @@ -329,7 +328,6 @@ static inline void debugfs_lookup_and_remove(const char *name, struct dentry *parent) { } -const struct file_operations *debugfs_real_fops(const struct file *filp); void *debugfs_get_aux(const struct file *file); static inline int debugfs_file_get(struct dentry *dentry) diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index d312ffbac4dd..dc1075dc3446 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -103,6 +103,8 @@ struct devfreq_dev_status { * * @is_cooling_device: A self-explanatory boolean giving the device a * cooling effect property. + * @dev_groups: Optional device-specific sysfs attribute groups that to + * be attached to the devfreq device. */ struct devfreq_dev_profile { unsigned long initial_freq; @@ -119,6 +121,8 @@ struct devfreq_dev_profile { unsigned int max_state; bool is_cooling_device; + + const struct attribute_group **dev_groups; }; /** diff --git a/include/linux/device.h b/include/linux/device.h index 4940db137fff..0470d19da7f2 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -879,6 +879,33 @@ static inline bool dev_pm_smart_suspend(struct device *dev) #endif } +/* + * dev_pm_set_strict_midlayer - Update the device's power.strict_midlayer flag + * @dev: Target device. + * @val: New flag value. + * + * When set, power.strict_midlayer means that the middle layer power management + * code (typically, a bus type or a PM domain) does not expect its runtime PM + * suspend callback to be invoked at all during system-wide PM transitions and + * it does not expect its runtime PM resume callback to be invoked at any point + * when runtime PM is disabled for the device during system-wide PM transitions. + */ +static inline void dev_pm_set_strict_midlayer(struct device *dev, bool val) +{ +#ifdef CONFIG_PM_SLEEP + dev->power.strict_midlayer = val; +#endif +} + +static inline bool dev_pm_strict_midlayer_is_set(struct device *dev) +{ +#ifdef CONFIG_PM_SLEEP + return dev->power.strict_midlayer; +#else + return false; +#endif +} + static inline void device_lock(struct device *dev) { mutex_lock(&dev->mutex); @@ -917,6 +944,18 @@ static inline bool dev_has_sync_state(struct device *dev) return false; } +static inline int dev_set_drv_sync_state(struct device *dev, + void (*fn)(struct device *dev)) +{ + if (!dev || !dev->driver) + return 0; + if (dev->driver->sync_state && dev->driver->sync_state != fn) + return -EBUSY; + if (!dev->driver->sync_state) + dev->driver->sync_state = fn; + return 0; +} + static inline void dev_set_removable(struct device *dev, enum device_removable removable) { @@ -1048,6 +1087,7 @@ void device_set_node(struct device *dev, struct fwnode_handle *fwnode); int device_add_of_node(struct device *dev, struct device_node *of_node); void device_remove_of_node(struct device *dev); void device_set_of_node_from_dev(struct device *dev, const struct device *dev2); +struct device *get_dev_from_fwnode(struct fwnode_handle *fwnode); static inline struct device_node *dev_of_node(struct device *dev) { @@ -1162,6 +1202,11 @@ void device_links_supplier_sync_state_pause(void); void device_links_supplier_sync_state_resume(void); void device_link_wait_removal(void); +static inline bool device_link_test(const struct device_link *link, u32 flags) +{ + return !!(link->flags & flags); +} + /* Create alias, so I can be autoloaded. */ #define MODULE_ALIAS_CHARDEV(major,minor) \ MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index b12776883d14..64639e104110 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -26,6 +26,7 @@ struct dma_fence; struct dma_fence_ops; struct dma_fence_cb; +struct seq_file; /** * struct dma_fence - software synchronization primitive @@ -97,6 +98,7 @@ struct dma_fence { }; enum dma_fence_flag_bits { + DMA_FENCE_FLAG_SEQNO64_BIT, DMA_FENCE_FLAG_SIGNALED_BIT, DMA_FENCE_FLAG_TIMESTAMP_BIT, DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, @@ -125,14 +127,6 @@ struct dma_fence_cb { */ struct dma_fence_ops { /** - * @use_64bit_seqno: - * - * True if this dma_fence implementation uses 64bit seqno, false - * otherwise. - */ - bool use_64bit_seqno; - - /** * @get_driver_name: * * Returns the driver name. This is a callback to allow drivers to @@ -262,6 +256,9 @@ struct dma_fence_ops { void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, spinlock_t *lock, u64 context, u64 seqno); +void dma_fence_init64(struct dma_fence *fence, const struct dma_fence_ops *ops, + spinlock_t *lock, u64 context, u64 seqno); + void dma_fence_release(struct kref *kref); void dma_fence_free(struct dma_fence *fence); void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq); @@ -382,6 +379,29 @@ bool dma_fence_remove_callback(struct dma_fence *fence, void dma_fence_enable_sw_signaling(struct dma_fence *fence); /** + * DOC: Safe external access to driver provided object members + * + * All data not stored directly in the dma-fence object, such as the + * &dma_fence.lock and memory potentially accessed by functions in the + * &dma_fence.ops table, MUST NOT be accessed after the fence has been signalled + * because after that point drivers are allowed to free it. + * + * All code accessing that data via the dma-fence API (or directly, which is + * discouraged), MUST make sure to contain the complete access within a + * &rcu_read_lock and &rcu_read_unlock pair. + * + * Some dma-fence API handles this automatically, while other, as for example + * &dma_fence_driver_name and &dma_fence_timeline_name, leave that + * responsibility to the caller. + * + * To enable this scheme to work drivers MUST ensure a RCU grace period elapses + * between signalling the fence and freeing the said data. + * + */ +const char __rcu *dma_fence_driver_name(struct dma_fence *fence); +const char __rcu *dma_fence_timeline_name(struct dma_fence *fence); + +/** * dma_fence_is_signaled_locked - Return an indication if the fence * is signaled yet. * @fence: the fence to check @@ -441,21 +461,20 @@ dma_fence_is_signaled(struct dma_fence *fence) /** * __dma_fence_is_later - return if f1 is chronologically later than f2 + * @fence: fence in whose context to do the comparison * @f1: the first fence's seqno * @f2: the second fence's seqno from the same context - * @ops: dma_fence_ops associated with the seqno * * Returns true if f1 is chronologically later than f2. Both fences must be * from the same context, since a seqno is not common across contexts. */ -static inline bool __dma_fence_is_later(u64 f1, u64 f2, - const struct dma_fence_ops *ops) +static inline bool __dma_fence_is_later(struct dma_fence *fence, u64 f1, u64 f2) { /* This is for backward compatibility with drivers which can only handle * 32bit sequence numbers. Use a 64bit compare when the driver says to * do so. */ - if (ops->use_64bit_seqno) + if (test_bit(DMA_FENCE_FLAG_SEQNO64_BIT, &fence->flags)) return f1 > f2; return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0; @@ -475,7 +494,7 @@ static inline bool dma_fence_is_later(struct dma_fence *f1, if (WARN_ON(f1->context != f2->context)) return false; - return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops); + return __dma_fence_is_later(f1, f1->seqno, f2->seqno); } /** diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index bb146c5ac3e4..6de7c05d6bd8 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -1524,6 +1524,7 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, struct dma_chan *dma_request_chan(struct device *dev, const char *name); struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask); +struct dma_chan *devm_dma_request_chan(struct device *dev, const char *name); void dma_release_channel(struct dma_chan *chan); int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); @@ -1560,6 +1561,12 @@ static inline struct dma_chan *dma_request_chan_by_mask( { return ERR_PTR(-ENODEV); } + +static inline struct dma_chan *devm_dma_request_chan(struct device *dev, const char *name) +{ + return ERR_PTR(-ENODEV); +} + static inline void dma_release_channel(struct dma_chan *chan) { } diff --git a/include/linux/dpll.h b/include/linux/dpll.h index 5e4f9ab1cf75..fa1e76920d0e 100644 --- a/include/linux/dpll.h +++ b/include/linux/dpll.h @@ -30,6 +30,14 @@ struct dpll_device_ops { void *dpll_priv, unsigned long *qls, struct netlink_ext_ack *extack); + int (*phase_offset_monitor_set)(const struct dpll_device *dpll, + void *dpll_priv, + enum dpll_feature_state state, + struct netlink_ext_ack *extack); + int (*phase_offset_monitor_get)(const struct dpll_device *dpll, + void *dpll_priv, + enum dpll_feature_state *state, + struct netlink_ext_ack *extack); }; struct dpll_pin_ops { @@ -95,6 +103,16 @@ struct dpll_pin_ops { const struct dpll_device *dpll, void *dpll_priv, struct dpll_pin_esync *esync, struct netlink_ext_ack *extack); + int (*ref_sync_set)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_pin *ref_sync_pin, + void *ref_sync_pin_priv, + const enum dpll_pin_state state, + struct netlink_ext_ack *extack); + int (*ref_sync_get)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_pin *ref_sync_pin, + void *ref_sync_pin_priv, + enum dpll_pin_state *state, + struct netlink_ext_ack *extack); }; struct dpll_pin_frequency { @@ -194,6 +212,9 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin, void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin, const struct dpll_pin_ops *ops, void *priv); +int dpll_pin_ref_sync_pair_add(struct dpll_pin *pin, + struct dpll_pin *ref_sync_pin); + int dpll_device_change_ntf(struct dpll_device *dpll); int dpll_pin_change_ntf(struct dpll_pin *pin); diff --git a/include/linux/efi.h b/include/linux/efi.h index 7d63d1d75f22..e3776d9cad07 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -1334,7 +1334,7 @@ struct linux_efi_initrd { bool xen_efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table); -static inline +static __always_inline bool efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table) { if (!IS_ENABLED(CONFIG_XEN_EFI)) diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index f94f3fdf15fc..7177436f0f9e 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -2,28 +2,16 @@ #ifndef __LINUX_ENTRYCOMMON_H #define __LINUX_ENTRYCOMMON_H -#include <linux/static_call_types.h> +#include <linux/irq-entry-common.h> #include <linux/ptrace.h> -#include <linux/syscalls.h> #include <linux/seccomp.h> #include <linux/sched.h> -#include <linux/context_tracking.h> #include <linux/livepatch.h> #include <linux/resume_user_mode.h> -#include <linux/tick.h> -#include <linux/kmsan.h> #include <asm/entry-common.h> #include <asm/syscall.h> -/* - * Define dummy _TIF work flags if not defined by the architecture or for - * disabled functionality. - */ -#ifndef _TIF_PATCH_PENDING -# define _TIF_PATCH_PENDING (0) -#endif - #ifndef _TIF_UPROBE # define _TIF_UPROBE (0) #endif @@ -56,69 +44,6 @@ SYSCALL_WORK_SYSCALL_EXIT_TRAP | \ ARCH_SYSCALL_WORK_EXIT) -/* - * TIF flags handled in exit_to_user_mode_loop() - */ -#ifndef ARCH_EXIT_TO_USER_MODE_WORK -# define ARCH_EXIT_TO_USER_MODE_WORK (0) -#endif - -#define EXIT_TO_USER_MODE_WORK \ - (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ - _TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \ - _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ - ARCH_EXIT_TO_USER_MODE_WORK) - -/** - * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs - * @regs: Pointer to currents pt_regs - * - * Defaults to an empty implementation. Can be replaced by architecture - * specific code. - * - * Invoked from syscall_enter_from_user_mode() in the non-instrumentable - * section. Use __always_inline so the compiler cannot push it out of line - * and make it instrumentable. - */ -static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs); - -#ifndef arch_enter_from_user_mode -static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {} -#endif - -/** - * enter_from_user_mode - Establish state when coming from user mode - * - * Syscall/interrupt entry disables interrupts, but user mode is traced as - * interrupts enabled. Also with NO_HZ_FULL RCU might be idle. - * - * 1) Tell lockdep that interrupts are disabled - * 2) Invoke context tracking if enabled to reactivate RCU - * 3) Trace interrupts off state - * - * Invoked from architecture specific syscall entry code with interrupts - * disabled. The calling code has to be non-instrumentable. When the - * function returns all state is correct and interrupts are still - * disabled. The subsequent functions can be instrumented. - * - * This is invoked when there is architecture specific functionality to be - * done between establishing state and enabling interrupts. The caller must - * enable interrupts before invoking syscall_enter_from_user_mode_work(). - */ -static __always_inline void enter_from_user_mode(struct pt_regs *regs) -{ - arch_enter_from_user_mode(regs); - lockdep_hardirqs_off(CALLER_ADDR0); - - CT_WARN_ON(__ct_state() != CT_STATE_USER); - user_exit_irqoff(); - - instrumentation_begin(); - kmsan_unpoison_entry_regs(regs); - trace_hardirqs_off_finish(); - instrumentation_end(); -} - /** * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts * @regs: Pointer to currents pt_regs @@ -204,170 +129,6 @@ static __always_inline long syscall_enter_from_user_mode(struct pt_regs *regs, l } /** - * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable() - * @ti_work: Cached TIF flags gathered with interrupts disabled - * - * Defaults to local_irq_enable(). Can be supplied by architecture specific - * code. - */ -static inline void local_irq_enable_exit_to_user(unsigned long ti_work); - -#ifndef local_irq_enable_exit_to_user -static inline void local_irq_enable_exit_to_user(unsigned long ti_work) -{ - local_irq_enable(); -} -#endif - -/** - * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable() - * - * Defaults to local_irq_disable(). Can be supplied by architecture specific - * code. - */ -static inline void local_irq_disable_exit_to_user(void); - -#ifndef local_irq_disable_exit_to_user -static inline void local_irq_disable_exit_to_user(void) -{ - local_irq_disable(); -} -#endif - -/** - * arch_exit_to_user_mode_work - Architecture specific TIF work for exit - * to user mode. - * @regs: Pointer to currents pt_regs - * @ti_work: Cached TIF flags gathered with interrupts disabled - * - * Invoked from exit_to_user_mode_loop() with interrupt enabled - * - * Defaults to NOOP. Can be supplied by architecture specific code. - */ -static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, - unsigned long ti_work); - -#ifndef arch_exit_to_user_mode_work -static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, - unsigned long ti_work) -{ -} -#endif - -/** - * arch_exit_to_user_mode_prepare - Architecture specific preparation for - * exit to user mode. - * @regs: Pointer to currents pt_regs - * @ti_work: Cached TIF flags gathered with interrupts disabled - * - * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last - * function before return. Defaults to NOOP. - */ -static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, - unsigned long ti_work); - -#ifndef arch_exit_to_user_mode_prepare -static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, - unsigned long ti_work) -{ -} -#endif - -/** - * arch_exit_to_user_mode - Architecture specific final work before - * exit to user mode. - * - * Invoked from exit_to_user_mode() with interrupt disabled as the last - * function before return. Defaults to NOOP. - * - * This needs to be __always_inline because it is non-instrumentable code - * invoked after context tracking switched to user mode. - * - * An architecture implementation must not do anything complex, no locking - * etc. The main purpose is for speculation mitigations. - */ -static __always_inline void arch_exit_to_user_mode(void); - -#ifndef arch_exit_to_user_mode -static __always_inline void arch_exit_to_user_mode(void) { } -#endif - -/** - * arch_do_signal_or_restart - Architecture specific signal delivery function - * @regs: Pointer to currents pt_regs - * - * Invoked from exit_to_user_mode_loop(). - */ -void arch_do_signal_or_restart(struct pt_regs *regs); - -/** - * exit_to_user_mode_loop - do any pending work before leaving to user space - */ -unsigned long exit_to_user_mode_loop(struct pt_regs *regs, - unsigned long ti_work); - -/** - * exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required - * @regs: Pointer to pt_regs on entry stack - * - * 1) check that interrupts are disabled - * 2) call tick_nohz_user_enter_prepare() - * 3) call exit_to_user_mode_loop() if any flags from - * EXIT_TO_USER_MODE_WORK are set - * 4) check that interrupts are still disabled - */ -static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs) -{ - unsigned long ti_work; - - lockdep_assert_irqs_disabled(); - - /* Flush pending rcuog wakeup before the last need_resched() check */ - tick_nohz_user_enter_prepare(); - - ti_work = read_thread_flags(); - if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK)) - ti_work = exit_to_user_mode_loop(regs, ti_work); - - arch_exit_to_user_mode_prepare(regs, ti_work); - - /* Ensure that kernel state is sane for a return to userspace */ - kmap_assert_nomap(); - lockdep_assert_irqs_disabled(); - lockdep_sys_exit(); -} - -/** - * exit_to_user_mode - Fixup state when exiting to user mode - * - * Syscall/interrupt exit enables interrupts, but the kernel state is - * interrupts disabled when this is invoked. Also tell RCU about it. - * - * 1) Trace interrupts on state - * 2) Invoke context tracking if enabled to adjust RCU state - * 3) Invoke architecture specific last minute exit code, e.g. speculation - * mitigations, etc.: arch_exit_to_user_mode() - * 4) Tell lockdep that interrupts are enabled - * - * Invoked from architecture specific code when syscall_exit_to_user_mode() - * is not suitable as the last step before returning to userspace. Must be - * invoked with interrupts disabled and the caller must be - * non-instrumentable. - * The caller has to invoke syscall_exit_to_user_mode_work() before this. - */ -static __always_inline void exit_to_user_mode(void) -{ - instrumentation_begin(); - trace_hardirqs_on_prepare(); - lockdep_hardirqs_on_prepare(); - instrumentation_end(); - - user_enter_irqoff(); - arch_exit_to_user_mode(); - lockdep_hardirqs_on(CALLER_ADDR0); -} - -/** * syscall_exit_work - Handle work before returning to user mode * @regs: Pointer to current pt_regs * @work: Current thread syscall work @@ -451,145 +212,4 @@ static __always_inline void syscall_exit_to_user_mode(struct pt_regs *regs) exit_to_user_mode(); } -/** - * irqentry_enter_from_user_mode - Establish state before invoking the irq handler - * @regs: Pointer to currents pt_regs - * - * Invoked from architecture specific entry code with interrupts disabled. - * Can only be called when the interrupt entry came from user mode. The - * calling code must be non-instrumentable. When the function returns all - * state is correct and the subsequent functions can be instrumented. - * - * The function establishes state (lockdep, RCU (context tracking), tracing) - */ -void irqentry_enter_from_user_mode(struct pt_regs *regs); - -/** - * irqentry_exit_to_user_mode - Interrupt exit work - * @regs: Pointer to current's pt_regs - * - * Invoked with interrupts disabled and fully valid regs. Returns with all - * work handled, interrupts disabled such that the caller can immediately - * switch to user mode. Called from architecture specific interrupt - * handling code. - * - * The call order is #2 and #3 as described in syscall_exit_to_user_mode(). - * Interrupt exit is not invoking #1 which is the syscall specific one time - * work. - */ -void irqentry_exit_to_user_mode(struct pt_regs *regs); - -#ifndef irqentry_state -/** - * struct irqentry_state - Opaque object for exception state storage - * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the - * exit path has to invoke ct_irq_exit(). - * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that - * lockdep state is restored correctly on exit from nmi. - * - * This opaque object is filled in by the irqentry_*_enter() functions and - * must be passed back into the corresponding irqentry_*_exit() functions - * when the exception is complete. - * - * Callers of irqentry_*_[enter|exit]() must consider this structure opaque - * and all members private. Descriptions of the members are provided to aid in - * the maintenance of the irqentry_*() functions. - */ -typedef struct irqentry_state { - union { - bool exit_rcu; - bool lockdep; - }; -} irqentry_state_t; -#endif - -/** - * irqentry_enter - Handle state tracking on ordinary interrupt entries - * @regs: Pointer to pt_regs of interrupted context - * - * Invokes: - * - lockdep irqflag state tracking as low level ASM entry disabled - * interrupts. - * - * - Context tracking if the exception hit user mode. - * - * - The hardirq tracer to keep the state consistent as low level ASM - * entry disabled interrupts. - * - * As a precondition, this requires that the entry came from user mode, - * idle, or a kernel context in which RCU is watching. - * - * For kernel mode entries RCU handling is done conditional. If RCU is - * watching then the only RCU requirement is to check whether the tick has - * to be restarted. If RCU is not watching then ct_irq_enter() has to be - * invoked on entry and ct_irq_exit() on exit. - * - * Avoiding the ct_irq_enter/exit() calls is an optimization but also - * solves the problem of kernel mode pagefaults which can schedule, which - * is not possible after invoking ct_irq_enter() without undoing it. - * - * For user mode entries irqentry_enter_from_user_mode() is invoked to - * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit - * would not be possible. - * - * Returns: An opaque object that must be passed to idtentry_exit() - */ -irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs); - -/** - * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt - * - * Conditional reschedule with additional sanity checks. - */ -void raw_irqentry_exit_cond_resched(void); -#ifdef CONFIG_PREEMPT_DYNAMIC -#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) -#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched -#define irqentry_exit_cond_resched_dynamic_disabled NULL -DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched); -#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)() -#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) -DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); -void dynamic_irqentry_exit_cond_resched(void); -#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched() -#endif -#else /* CONFIG_PREEMPT_DYNAMIC */ -#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched() -#endif /* CONFIG_PREEMPT_DYNAMIC */ - -/** - * irqentry_exit - Handle return from exception that used irqentry_enter() - * @regs: Pointer to pt_regs (exception entry regs) - * @state: Return value from matching call to irqentry_enter() - * - * Depending on the return target (kernel/user) this runs the necessary - * preemption and work checks if possible and required and returns to - * the caller with interrupts disabled and no further work pending. - * - * This is the last action before returning to the low level ASM code which - * just needs to return to the appropriate context. - * - * Counterpart to irqentry_enter(). - */ -void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state); - -/** - * irqentry_nmi_enter - Handle NMI entry - * @regs: Pointer to currents pt_regs - * - * Similar to irqentry_enter() but taking care of the NMI constraints. - */ -irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs); - -/** - * irqentry_nmi_exit - Handle return from NMI handling - * @regs: Pointer to pt_regs (NMI entry regs) - * @irq_state: Return value from matching call to irqentry_nmi_enter() - * - * Last action before returning to the low level assembly code. - * - * Counterpart to irqentry_nmi_enter(). - */ -void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state); - #endif diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 5e0dd333ad1f..de5bd76a400c 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -536,7 +536,7 @@ struct ethtool_rmon_hist_range { u16 high; }; -#define ETHTOOL_RMON_HIST_MAX 10 +#define ETHTOOL_RMON_HIST_MAX 11 /** * struct ethtool_rmon_stats - selected RMON (RFC 2819) statistics @@ -826,6 +826,19 @@ struct ethtool_rxfh_param { }; /** + * struct ethtool_rxfh_fields - Rx Flow Hashing (RXFH) header field config + * @data: which header fields are used for hashing, bitmask of RXH_* defines + * @flow_type: L2-L4 network traffic flow type + * @rss_context: RSS context, will only be used if rxfh_per_ctx_fields is + * set in struct ethtool_ops + */ +struct ethtool_rxfh_fields { + u32 data; + u32 flow_type; + u32 rss_context; +}; + +/** * struct kernel_ethtool_ts_info - kernel copy of struct ethtool_ts_info * @cmd: command number = %ETHTOOL_GET_TS_INFO * @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags @@ -852,9 +865,8 @@ struct kernel_ethtool_ts_info { * @supported_input_xfrm: supported types of input xfrm from %RXH_XFRM_*. * @cap_link_lanes_supported: indicates if the driver supports lanes * parameter. - * @cap_rss_ctx_supported: indicates if the driver supports RSS - * contexts via legacy API, drivers implementing @create_rxfh_context - * do not have to set this bit. + * @rxfh_per_ctx_fields: device supports selecting different header fields + * for Rx hash calculation and RSS for each additional context. * @rxfh_per_ctx_key: device supports setting different RSS key for each * additional context. Netlink API should report hfunc, key, and input_xfrm * for every context, not just context 0. @@ -968,6 +980,8 @@ struct kernel_ethtool_ts_info { * will remain unchanged. * Returns a negative error code or zero. An error code must be returned * if at least one unsupported change was requested. + * @get_rxfh_fields: Get header fields used for flow hashing. + * @set_rxfh_fields: Set header fields used for flow hashing. * @create_rxfh_context: Create a new RSS context with the specified RX flow * hash indirection table, hash key, and hash function. * The &struct ethtool_rxfh_context for this context is passed in @ctx; @@ -1083,7 +1097,7 @@ struct kernel_ethtool_ts_info { struct ethtool_ops { u32 supported_input_xfrm:8; u32 cap_link_lanes_supported:1; - u32 cap_rss_ctx_supported:1; + u32 rxfh_per_ctx_fields:1; u32 rxfh_per_ctx_key:1; u32 cap_rss_rxnfc_adds:1; u32 rxfh_indir_space; @@ -1153,6 +1167,11 @@ struct ethtool_ops { int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *); int (*set_rxfh)(struct net_device *, struct ethtool_rxfh_param *, struct netlink_ext_ack *extack); + int (*get_rxfh_fields)(struct net_device *, + struct ethtool_rxfh_fields *); + int (*set_rxfh_fields)(struct net_device *, + const struct ethtool_rxfh_fields *, + struct netlink_ext_ack *extack); int (*create_rxfh_context)(struct net_device *, struct ethtool_rxfh_context *ctx, const struct ethtool_rxfh_param *rxfh, diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h index aba91335273a..39254b2726c0 100644 --- a/include/linux/ethtool_netlink.h +++ b/include/linux/ethtool_netlink.h @@ -43,6 +43,8 @@ void ethtool_aggregate_rmon_stats(struct net_device *dev, struct ethtool_rmon_stats *rmon_stats); bool ethtool_dev_mm_supported(struct net_device *dev); +void ethnl_pse_send_ntf(struct net_device *netdev, unsigned long notif); + #else static inline int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd) { @@ -120,6 +122,11 @@ static inline bool ethtool_dev_mm_supported(struct net_device *dev) return false; } +static inline void ethnl_pse_send_ntf(struct net_device *netdev, + unsigned long notif) +{ +} + #endif /* IS_ENABLED(CONFIG_ETHTOOL_NETLINK) */ static inline int ethnl_cable_test_result(struct phy_device *phydev, u8 pair, diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index 25c4a5afbd44..cfb0dd1ea49c 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -230,7 +230,7 @@ struct handle_to_path_ctx { * directory. The name should be stored in the @name (with the * understanding that it is already pointing to a %NAME_MAX+1 sized * buffer. get_name() should return %0 on success, a negative error code - * or error. @get_name will be called without @parent->i_mutex held. + * or error. @get_name will be called without @parent->i_rwsem held. * * get_parent: * @get_parent should find the parent directory for the given @child which @@ -247,7 +247,7 @@ struct handle_to_path_ctx { * @commit_metadata should commit metadata changes to stable storage. * * Locking rules: - * get_parent is called with child->d_inode->i_mutex down + * get_parent is called with child->d_inode->i_rwsem down * get_name is not (which is possibly inconsistent) */ diff --git a/include/linux/falloc.h b/include/linux/falloc.h index 3f49f3df6af5..7c38c6b76b60 100644 --- a/include/linux/falloc.h +++ b/include/linux/falloc.h @@ -36,7 +36,8 @@ struct space_resv { FALLOC_FL_COLLAPSE_RANGE | \ FALLOC_FL_ZERO_RANGE | \ FALLOC_FL_INSERT_RANGE | \ - FALLOC_FL_UNSHARE_RANGE) + FALLOC_FL_UNSHARE_RANGE | \ + FALLOC_FL_WRITE_ZEROES) /* on ia32 l_start is on a 32-bit boundary */ #if defined(CONFIG_X86_64) diff --git a/include/linux/fileattr.h b/include/linux/fileattr.h index 47c05a9851d0..f89dcfad3f8f 100644 --- a/include/linux/fileattr.h +++ b/include/linux/fileattr.h @@ -14,13 +14,33 @@ FS_XFLAG_NODUMP | FS_XFLAG_NOATIME | FS_XFLAG_DAX | \ FS_XFLAG_PROJINHERIT) +/* Read-only inode flags */ +#define FS_XFLAG_RDONLY_MASK \ + (FS_XFLAG_PREALLOC | FS_XFLAG_HASATTR) + +/* Flags to indicate valid value of fsx_ fields */ +#define FS_XFLAG_VALUES_MASK \ + (FS_XFLAG_EXTSIZE | FS_XFLAG_COWEXTSIZE) + +/* Flags for directories */ +#define FS_XFLAG_DIRONLY_MASK \ + (FS_XFLAG_RTINHERIT | FS_XFLAG_NOSYMLINKS | FS_XFLAG_EXTSZINHERIT) + +/* Misc settable flags */ +#define FS_XFLAG_MISC_MASK \ + (FS_XFLAG_REALTIME | FS_XFLAG_NODEFRAG | FS_XFLAG_FILESTREAM) + +#define FS_XFLAGS_MASK \ + (FS_XFLAG_COMMON | FS_XFLAG_RDONLY_MASK | FS_XFLAG_VALUES_MASK | \ + FS_XFLAG_DIRONLY_MASK | FS_XFLAG_MISC_MASK) + /* * Merged interface for miscellaneous file attributes. 'flags' originates from * ext* and 'fsx_flags' from xfs. There's some overlap between the two, which * is handled by the VFS helpers, so filesystems are free to implement just one * or both of these sub-interfaces. */ -struct fileattr { +struct file_kattr { u32 flags; /* flags (FS_IOC_GETFLAGS/FS_IOC_SETFLAGS) */ /* struct fsxattr: */ u32 fsx_xflags; /* xflags field value (get/set) */ @@ -33,10 +53,10 @@ struct fileattr { bool fsx_valid:1; }; -int copy_fsxattr_to_user(const struct fileattr *fa, struct fsxattr __user *ufa); +int copy_fsxattr_to_user(const struct file_kattr *fa, struct fsxattr __user *ufa); -void fileattr_fill_xflags(struct fileattr *fa, u32 xflags); -void fileattr_fill_flags(struct fileattr *fa, u32 flags); +void fileattr_fill_xflags(struct file_kattr *fa, u32 xflags); +void fileattr_fill_flags(struct file_kattr *fa, u32 flags); /** * fileattr_has_fsx - check for extended flags/attributes @@ -45,15 +65,19 @@ void fileattr_fill_flags(struct fileattr *fa, u32 flags); * Return: true if any attributes are present that are not represented in * ->flags. */ -static inline bool fileattr_has_fsx(const struct fileattr *fa) +static inline bool fileattr_has_fsx(const struct file_kattr *fa) { return fa->fsx_valid && ((fa->fsx_xflags & ~FS_XFLAG_COMMON) || fa->fsx_extsize != 0 || fa->fsx_projid != 0 || fa->fsx_cowextsize != 0); } -int vfs_fileattr_get(struct dentry *dentry, struct fileattr *fa); +int vfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa); int vfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, - struct fileattr *fa); + struct file_kattr *fa); +int ioctl_getflags(struct file *file, unsigned int __user *argp); +int ioctl_setflags(struct file *file, unsigned int __user *argp); +int ioctl_fsgetxattr(struct file *file, void __user *argp); +int ioctl_fssetxattr(struct file *file, void __user *argp); #endif /* _LINUX_FILEATTR_H */ diff --git a/include/linux/filelock.h b/include/linux/filelock.h index c412ded9171e..c2ce8ba05d06 100644 --- a/include/linux/filelock.h +++ b/include/linux/filelock.h @@ -175,9 +175,14 @@ static inline bool lock_is_write(struct file_lock *fl) return fl->c.flc_type == F_WRLCK; } +static inline void locks_wake_up_waiter(struct file_lock_core *flc) +{ + wake_up(&flc->flc_wait); +} + static inline void locks_wake_up(struct file_lock *fl) { - wake_up(&fl->c.flc_wait); + locks_wake_up_waiter(&fl->c); } static inline bool locks_can_async_lock(const struct file_operations *fops) diff --git a/include/linux/filter.h b/include/linux/filter.h index f5cf4d35d83e..1e7fd3ee759e 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -82,7 +82,7 @@ struct ctl_table_header; #define BPF_CALL_ARGS 0xe0 /* unused opcode to mark speculation barrier for mitigating - * Speculative Store Bypass + * Spectre v1 and v4 */ #define BPF_NOSPEC 0xc0 @@ -1073,10 +1073,20 @@ bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) return set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT); } -int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); +int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap, + enum skb_drop_reason *reason); + static inline int sk_filter(struct sock *sk, struct sk_buff *skb) { - return sk_filter_trim_cap(sk, skb, 1); + enum skb_drop_reason ignore_reason; + + return sk_filter_trim_cap(sk, skb, 1, &ignore_reason); +} + +static inline int sk_filter_reason(struct sock *sk, struct sk_buff *skb, + enum skb_drop_reason *reason) +{ + return sk_filter_trim_cap(sk, skb, 1, reason); } struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); @@ -1278,6 +1288,8 @@ int bpf_jit_get_func_addr(const struct bpf_prog *prog, const struct bpf_insn *insn, bool extra_pass, u64 *func_addr, bool *func_addr_fixed); +const char *bpf_jit_get_prog_name(struct bpf_prog *prog); + struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); diff --git a/include/linux/firmware/cirrus/cs_dsp.h b/include/linux/firmware/cirrus/cs_dsp.h index 7cae703b3137..a66eb7624730 100644 --- a/include/linux/firmware/cirrus/cs_dsp.h +++ b/include/linux/firmware/cirrus/cs_dsp.h @@ -64,14 +64,12 @@ struct cs_dsp_region { /** * struct cs_dsp_alg_region - Describes a logical algorithm region in DSP address space - * @list: List node for internal use * @alg: Algorithm id * @ver: Expected algorithm version * @type: Memory region type * @base: Address of region */ struct cs_dsp_alg_region { - struct list_head list; unsigned int alg; unsigned int ver; int type; diff --git a/include/linux/firmware/imx/sm.h b/include/linux/firmware/imx/sm.h index a8a17eeb7d90..d4212bc42b2c 100644 --- a/include/linux/firmware/imx/sm.h +++ b/include/linux/firmware/imx/sm.h @@ -11,12 +11,20 @@ #include <linux/scmi_imx_protocol.h> #include <linux/types.h> -#define SCMI_IMX_CTRL_PDM_CLK_SEL 0 /* AON PDM clock sel */ -#define SCMI_IMX_CTRL_MQS1_SETTINGS 1 /* AON MQS settings */ -#define SCMI_IMX_CTRL_SAI1_MCLK 2 /* AON SAI1 MCLK */ -#define SCMI_IMX_CTRL_SAI3_MCLK 3 /* WAKE SAI3 MCLK */ -#define SCMI_IMX_CTRL_SAI4_MCLK 4 /* WAKE SAI4 MCLK */ -#define SCMI_IMX_CTRL_SAI5_MCLK 5 /* WAKE SAI5 MCLK */ +#define SCMI_IMX95_CTRL_PDM_CLK_SEL 0 /* AON PDM clock sel */ +#define SCMI_IMX95_CTRL_MQS1_SETTINGS 1 /* AON MQS settings */ +#define SCMI_IMX95_CTRL_SAI1_MCLK 2 /* AON SAI1 MCLK */ +#define SCMI_IMX95_CTRL_SAI3_MCLK 3 /* WAKE SAI3 MCLK */ +#define SCMI_IMX95_CTRL_SAI4_MCLK 4 /* WAKE SAI4 MCLK */ +#define SCMI_IMX95_CTRL_SAI5_MCLK 5 /* WAKE SAI5 MCLK */ + +#define SCMI_IMX94_CTRL_PDM_CLK_SEL 0U /*!< AON PDM clock sel */ +#define SCMI_IMX94_CTRL_MQS1_SETTINGS 1U /*!< AON MQS settings */ +#define SCMI_IMX94_CTRL_MQS2_SETTINGS 2U /*!< WAKE MQS settings */ +#define SCMI_IMX94_CTRL_SAI1_MCLK 3U /*!< AON SAI1 MCLK */ +#define SCMI_IMX94_CTRL_SAI2_MCLK 4U /*!< WAKE SAI2 MCLK */ +#define SCMI_IMX94_CTRL_SAI3_MCLK 5U /*!< WAKE SAI3 MCLK */ +#define SCMI_IMX94_CTRL_SAI4_MCLK 6U /*!< WAKE SAI4 MCLK */ int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val); int scmi_imx_misc_ctrl_set(u32 id, u32 val); diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h index 983e1591bbba..0f667bf1d4d9 100644 --- a/include/linux/firmware/qcom/qcom_scm.h +++ b/include/linux/firmware/qcom/qcom_scm.h @@ -148,11 +148,10 @@ bool qcom_scm_lmh_dcvsh_available(void); int qcom_scm_gpu_init_regs(u32 gpu_req); -int qcom_scm_shm_bridge_enable(void); -int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags, +int qcom_scm_shm_bridge_create(u64 pfn_and_ns_perm_flags, u64 ipfn_and_s_perm_flags, u64 size_and_flags, u64 ns_vmids, u64 *handle); -int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle); +int qcom_scm_shm_bridge_delete(u64 handle); #ifdef CONFIG_QCOM_QSEECOM diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 6d4dbc196b93..ae48d619c4e0 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -585,7 +585,6 @@ int zynqmp_pm_reset_assert(const u32 reset, int zynqmp_pm_reset_get_status(const u32 reset, u32 *status); unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode); int zynqmp_pm_bootmode_write(u32 ps_mode); -int zynqmp_pm_init_finalize(void); int zynqmp_pm_set_suspend_mode(u32 mode); int zynqmp_pm_request_node(const u32 node, const u32 capabilities, const u32 qos, const enum zynqmp_pm_request_ack ack); @@ -746,11 +745,6 @@ static inline int zynqmp_pm_bootmode_write(u32 ps_mode) return -ENODEV; } -static inline int zynqmp_pm_init_finalize(void) -{ - return -ENODEV; -} - static inline int zynqmp_pm_set_suspend_mode(u32 mode) { return -ENODEV; diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index e4ce1cae03bf..b3b53f8c1b28 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -596,7 +596,7 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size, if (p_size != SIZE_MAX && p_size < size) fortify_panic(func, FORTIFY_WRITE, p_size, size, true); else if (q_size != SIZE_MAX && q_size < size) - fortify_panic(func, FORTIFY_READ, p_size, size, true); + fortify_panic(func, FORTIFY_READ, q_size, size, true); /* * Warn when writing beyond destination field size. diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h index 702099f08929..7964db96e41a 100644 --- a/include/linux/fprobe.h +++ b/include/linux/fprobe.h @@ -94,6 +94,7 @@ int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num); int register_fprobe_syms(struct fprobe *fp, const char **syms, int num); int unregister_fprobe(struct fprobe *fp); bool fprobe_is_registered(struct fprobe *fp); +int fprobe_count_ips_from_filter(const char *filter, const char *notfilter); #else static inline int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter) { @@ -115,6 +116,10 @@ static inline bool fprobe_is_registered(struct fprobe *fp) { return false; } +static inline int fprobe_count_ips_from_filter(const char *filter, const char *notfilter) +{ + return -EOPNOTSUPP; +} #endif /** diff --git a/include/linux/fs.h b/include/linux/fs.h index 040c0036320f..2ec4807d4ea8 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -80,7 +80,7 @@ struct fsnotify_mark_connector; struct fsnotify_sb_info; struct fs_context; struct fs_parameter_spec; -struct fileattr; +struct file_kattr; struct iomap_ops; extern void __init inode_init(void); @@ -446,10 +446,10 @@ struct address_space_operations { void (*readahead)(struct readahead_control *); - int (*write_begin)(struct file *, struct address_space *mapping, + int (*write_begin)(const struct kiocb *, struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata); - int (*write_end)(struct file *, struct address_space *mapping, + int (*write_end)(const struct kiocb *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata); @@ -839,7 +839,7 @@ static inline void inode_fake_hash(struct inode *inode) } /* - * inode->i_mutex nesting subclasses for the lock validator: + * inode->i_rwsem nesting subclasses for the lock validator: * * 0: the object of the current VFS operation * 1: parent @@ -991,7 +991,7 @@ static inline loff_t i_size_read(const struct inode *inode) /* * NOTE: unlike i_size_read(), i_size_write() does need locking around it - * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount + * (normally i_rwsem), otherwise on 32bit/SMP an update of i_size_seqcount * can be lost, resulting in subsequent i_size_read() calls spinning forever. */ static inline void i_size_write(struct inode *inode, loff_t i_size) @@ -1402,6 +1402,7 @@ struct super_block { char s_sysfs_name[UUID_STRING_LEN + 1]; unsigned int s_max_links; + unsigned int s_d_flags; /* default d_flags for dentries */ /* * The next field is for VFS *only*. No filesystems have any business @@ -1415,7 +1416,7 @@ struct super_block { */ const char *s_subtype; - const struct dentry_operations *s_d_op; /* default d_op for dentries */ + const struct dentry_operations *__s_d_op; /* default d_op for dentries */ struct shrinker *s_shrink; /* per-sb shrinker handle */ @@ -1923,7 +1924,7 @@ static inline void sb_end_intwrite(struct super_block *sb) * freeze protection should be the outermost lock. In particular, we have: * * sb_start_write - * -> i_mutex (write path, truncate, directory ops, ...) + * -> i_rwsem (write path, truncate, directory ops, ...) * -> s_umount (freeze_super, thaw_super) */ static inline void sb_start_write(struct super_block *sb) @@ -2006,20 +2007,20 @@ int vfs_unlink(struct mnt_idmap *, struct inode *, struct dentry *, /** * struct renamedata - contains all information required for renaming * @old_mnt_idmap: idmap of the old mount the inode was found from - * @old_dir: parent of source + * @old_parent: parent of source * @old_dentry: source * @new_mnt_idmap: idmap of the new mount the inode was found from - * @new_dir: parent of destination + * @new_parent: parent of destination * @new_dentry: destination * @delegated_inode: returns an inode needing a delegation break * @flags: rename flags */ struct renamedata { struct mnt_idmap *old_mnt_idmap; - struct inode *old_dir; + struct dentry *old_parent; struct dentry *old_dentry; struct mnt_idmap *new_mnt_idmap; - struct inode *new_dir; + struct dentry *new_parent; struct dentry *new_dentry; struct inode **delegated_inode; unsigned int flags; @@ -2256,13 +2257,13 @@ struct inode_operations { int (*set_acl)(struct mnt_idmap *, struct dentry *, struct posix_acl *, int); int (*fileattr_set)(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa); - int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa); + struct dentry *dentry, struct file_kattr *fa); + int (*fileattr_get)(struct dentry *dentry, struct file_kattr *fa); struct offset_ctx *(*get_offset_ctx)(struct inode *inode); } ____cacheline_aligned; /* Did the driver provide valid mmap hook configuration? */ -static inline bool file_has_valid_mmap_hooks(struct file *file) +static inline bool can_mmap_file(struct file *file) { bool has_mmap = file->f_op->mmap; bool has_mmap_prepare = file->f_op->mmap_prepare; @@ -2278,7 +2279,7 @@ static inline bool file_has_valid_mmap_hooks(struct file *file) int compat_vma_mmap_prepare(struct file *file, struct vm_area_struct *vma); -static inline int call_mmap(struct file *file, struct vm_area_struct *vma) +static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma) { if (file->f_op->mmap_prepare) return compat_vma_mmap_prepare(file, vma); @@ -2286,8 +2287,7 @@ static inline int call_mmap(struct file *file, struct vm_area_struct *vma) return file->f_op->mmap(file, vma); } -static inline int __call_mmap_prepare(struct file *file, - struct vm_area_desc *desc) +static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc) { return file->f_op->mmap_prepare(desc); } @@ -2367,6 +2367,15 @@ struct super_operations { struct shrink_control *); long (*free_cached_objects)(struct super_block *, struct shrink_control *); + /* + * If a filesystem can support graceful removal of a device and + * continue read-write operations, implement this callback. + * + * Return 0 if the filesystem can continue read-write. + * Non-zero return value or no such callback means the fs will be shutdown + * as usual. + */ + int (*remove_bdev)(struct super_block *sb, struct block_device *bdev); void (*shutdown)(struct super_block *sb); }; @@ -2868,7 +2877,7 @@ struct file *dentry_open_nonotify(const struct path *path, int flags, const struct cred *cred); struct file *dentry_create(const struct path *path, int flags, umode_t mode, const struct cred *cred); -struct path *backing_file_user_path(struct file *f); +struct path *backing_file_user_path(const struct file *f); /* * When mmapping a file on a stackable filesystem (e.g., overlayfs), the file @@ -2880,14 +2889,14 @@ struct path *backing_file_user_path(struct file *f); * by fstat() on that same fd. */ /* Get the path to display in /proc/<pid>/maps */ -static inline const struct path *file_user_path(struct file *f) +static inline const struct path *file_user_path(const struct file *f) { if (unlikely(f->f_mode & FMODE_BACKING)) return backing_file_user_path(f); return &f->f_path; } /* Get the inode whose inode number to display in /proc/<pid>/maps */ -static inline const struct inode *file_user_inode(struct file *f) +static inline const struct inode *file_user_inode(const struct file *f) { if (unlikely(f->f_mode & FMODE_BACKING)) return d_inode(backing_file_user_path(f)->dentry); @@ -3268,6 +3277,22 @@ static inline bool is_dot_dotdot(const char *name, size_t len) (len == 1 || (len == 2 && name[1] == '.')); } +/** + * name_contains_dotdot - check if a file name contains ".." path components + * + * Search for ".." surrounded by either '/' or start/end of string. + */ +static inline bool name_contains_dotdot(const char *name) +{ + size_t name_len; + + name_len = strlen(name); + return strcmp(name, "..") == 0 || + strncmp(name, "../", 3) == 0 || + strstr(name, "/../") != NULL || + (name_len >= 3 && strcmp(name + name_len - 3, "/..") == 0); +} + #include <linux/err.h> /* needed for stackable file system support */ @@ -3396,8 +3421,10 @@ extern void inode_add_lru(struct inode *inode); extern int sb_set_blocksize(struct super_block *, int); extern int sb_min_blocksize(struct super_block *, int); -extern int generic_file_mmap(struct file *, struct vm_area_struct *); -extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); +int generic_file_mmap(struct file *, struct vm_area_struct *); +int generic_file_mmap_prepare(struct vm_area_desc *desc); +int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); +int generic_file_readonly_mmap_prepare(struct vm_area_desc *desc); extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *); int generic_write_checks_count(struct kiocb *iocb, loff_t *count); extern int generic_write_check_limits(struct file *file, loff_t pos, @@ -3599,19 +3626,21 @@ extern int simple_rename(struct mnt_idmap *, struct inode *, unsigned int); extern void simple_recursive_removal(struct dentry *, void (*callback)(struct dentry *)); +extern void locked_recursive_removal(struct dentry *, + void (*callback)(struct dentry *)); extern int noop_fsync(struct file *, loff_t, loff_t, int); extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter); extern int simple_empty(struct dentry *); -extern int simple_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, - struct folio **foliop, void **fsdata); +extern int simple_write_begin(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, + struct folio **foliop, void **fsdata); extern const struct address_space_operations ram_aops; extern int always_delete_dentry(const struct dentry *); extern struct inode *alloc_anon_inode(struct super_block *); struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name, const struct inode *context_inode); extern int simple_nosetlease(struct file *, int, struct file_lease **, void **); -extern const struct dentry_operations simple_dentry_operations; extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags); extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *); @@ -3625,6 +3654,7 @@ extern int simple_fill_super(struct super_block *, unsigned long, const struct tree_descr *); extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count); extern void simple_release_fs(struct vfsmount **mount, int *count); +struct dentry *simple_start_creating(struct dentry *, const char *); extern ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos, const void *from, size_t available); diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index a19e4bd32e4d..7773eb870039 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -200,7 +200,7 @@ void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt, */ #define infof(fc, fmt, ...) __logfc(fc, 'i', fmt, ## __VA_ARGS__) #define info_plog(p, fmt, ...) __plog(p, 'i', fmt, ## __VA_ARGS__) -#define infofc(p, fmt, ...) __plog((&(fc)->log), 'i', fmt, ## __VA_ARGS__) +#define infofc(fc, fmt, ...) __plog((&(fc)->log), 'i', fmt, ## __VA_ARGS__) /** * warnf - Store supplementary warning message diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h index 2b1f74b24070..0cc2fa283305 100644 --- a/include/linux/fs_stack.h +++ b/include/linux/fs_stack.h @@ -3,7 +3,7 @@ #define _LINUX_FS_STACK_H /* This file defines generic functions used primarily by stackable - * filesystems; none of these functions require i_mutex to be held. + * filesystems; none of these functions require i_rwsem to be held. */ #include <linux/fs.h> diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index 783b48dedb72..baf200ab5c77 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h @@ -8,8 +8,7 @@ struct fs_struct { int users; - spinlock_t lock; - seqcount_spinlock_t seq; + seqlock_t seq; int umask; int in_exec; struct path root, pwd; @@ -26,18 +25,18 @@ extern int unshare_fs_struct(void); static inline void get_fs_root(struct fs_struct *fs, struct path *root) { - spin_lock(&fs->lock); + read_seqlock_excl(&fs->seq); *root = fs->root; path_get(root); - spin_unlock(&fs->lock); + read_sequnlock_excl(&fs->seq); } static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd) { - spin_lock(&fs->lock); + read_seqlock_excl(&fs->seq); *pwd = fs->pwd; path_get(pwd); - spin_unlock(&fs->lock); + read_sequnlock_excl(&fs->seq); } extern bool current_chrooted(void); diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 56fad33043d5..8d0e3ad89b94 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -314,7 +314,7 @@ struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio, size_t len, size_t offs, gfp_t gfp_flags); int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page, unsigned int len, unsigned int offs, - u64 lblk_num, gfp_t gfp_flags); + u64 lblk_num); int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len, size_t offs); @@ -487,8 +487,7 @@ static inline struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio, static inline int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page, unsigned int len, - unsigned int offs, u64 lblk_num, - gfp_t gfp_flags) + unsigned int offs, u64 lblk_num) { return -EOPNOTSUPP; } diff --git a/include/linux/fsi.h b/include/linux/fsi.h index 8c5eef808788..adea1b432f2d 100644 --- a/include/linux/fsi.h +++ b/include/linux/fsi.h @@ -68,7 +68,7 @@ extern int fsi_slave_read(struct fsi_slave *slave, uint32_t addr, extern int fsi_slave_write(struct fsi_slave *slave, uint32_t addr, const void *val, size_t size); -extern struct bus_type fsi_bus_type; +extern const struct bus_type fsi_bus_type; extern const struct device_type fsi_cdev_type; enum fsi_dev_type { diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index 5d231ce8709b..49f20c2f99bf 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -118,7 +118,6 @@ struct fsl_usb2_platform_data { #define FSL_USB2_PORT0_ENABLED 0x00000001 #define FSL_USB2_PORT1_ENABLED 0x00000002 -#define FLS_USB2_WORKAROUND_ENGCM09152 (1 << 0) struct spi_device; diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index b672ca15f265..7ded7df6e9b5 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -1108,7 +1108,7 @@ static __always_inline unsigned long get_lock_parent_ip(void) # define trace_preempt_off(a0, a1) do { } while (0) #endif -#ifdef CONFIG_FTRACE_MCOUNT_RECORD +#ifdef CONFIG_DYNAMIC_FTRACE extern void ftrace_init(void); #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY #define FTRACE_CALLSITE_SECTION "__patchable_function_entries" diff --git a/include/linux/futex.h b/include/linux/futex.h index b37193653e6b..9e9750f04980 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -85,18 +85,12 @@ int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4) #ifdef CONFIG_FUTEX_PRIVATE_HASH int futex_hash_allocate_default(void); void futex_hash_free(struct mm_struct *mm); - -static inline void futex_mm_init(struct mm_struct *mm) -{ - RCU_INIT_POINTER(mm->futex_phash, NULL); - mm->futex_phash_new = NULL; - mutex_init(&mm->futex_hash_lock); -} +int futex_mm_init(struct mm_struct *mm); #else /* !CONFIG_FUTEX_PRIVATE_HASH */ static inline int futex_hash_allocate_default(void) { return 0; } -static inline void futex_hash_free(struct mm_struct *mm) { } -static inline void futex_mm_init(struct mm_struct *mm) { } +static inline int futex_hash_free(struct mm_struct *mm) { return 0; } +static inline int futex_mm_init(struct mm_struct *mm) { return 0; } #endif /* CONFIG_FUTEX_PRIVATE_HASH */ #else /* !CONFIG_FUTEX */ @@ -118,8 +112,8 @@ static inline int futex_hash_allocate_default(void) { return 0; } -static inline void futex_hash_free(struct mm_struct *mm) { } -static inline void futex_mm_init(struct mm_struct *mm) { } +static inline int futex_hash_free(struct mm_struct *mm) { return 0; } +static inline int futex_mm_init(struct mm_struct *mm) { return 0; } #endif diff --git a/include/linux/gpio.h b/include/linux/gpio.h index c1ec62c11ed3..8f85ddb26429 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -13,6 +13,11 @@ #define __LINUX_GPIO_H #include <linux/types.h> +#ifdef CONFIG_GPIOLIB +#include <linux/gpio/consumer.h> +#endif + +#ifdef CONFIG_GPIOLIB_LEGACY struct device; @@ -21,22 +26,7 @@ struct device; #define GPIOF_OUT_INIT_LOW ((0 << 0) | (0 << 1)) #define GPIOF_OUT_INIT_HIGH ((0 << 0) | (1 << 1)) -/** - * struct gpio - a structure describing a GPIO with configuration - * @gpio: the GPIO number - * @flags: GPIO configuration as specified by GPIOF_* - * @label: a literal description string of this GPIO - */ -struct gpio { - unsigned gpio; - unsigned long flags; - const char *label; -}; - #ifdef CONFIG_GPIOLIB - -#include <linux/gpio/consumer.h> - /* * "valid" GPIO numbers are nonnegative and may be passed to * setup routines like gpio_request(). Only some valid numbers @@ -57,19 +47,6 @@ static inline bool gpio_is_valid(int number) * extra memory (for code and for per-GPIO table entries). */ -/* - * At the end we want all GPIOs to be dynamically allocated from 0. - * However, some legacy drivers still perform fixed allocation. - * Until they are all fixed, leave 0-512 space for them. - */ -#define GPIO_DYNAMIC_BASE 512 -/* - * Define the maximum of the possible GPIO in the global numberspace. - * While the GPIO base and numbers are positive, we limit it with signed - * maximum as a lot of code is using negative values for special cases. - */ -#define GPIO_DYNAMIC_MAX INT_MAX - /* Always use the library code for GPIO management calls, * or when sleeping may be involved. */ @@ -110,7 +87,6 @@ static inline int gpio_to_irq(unsigned gpio) int gpio_request_one(unsigned gpio, unsigned long flags, const char *label); -int devm_gpio_request(struct device *dev, unsigned gpio, const char *label); int devm_gpio_request_one(struct device *dev, unsigned gpio, unsigned long flags, const char *label); @@ -188,13 +164,6 @@ static inline int gpio_to_irq(unsigned gpio) return -EINVAL; } -static inline int devm_gpio_request(struct device *dev, unsigned gpio, - const char *label) -{ - WARN_ON(1); - return -EINVAL; -} - static inline int devm_gpio_request_one(struct device *dev, unsigned gpio, unsigned long flags, const char *label) { @@ -203,5 +172,5 @@ static inline int devm_gpio_request_one(struct device *dev, unsigned gpio, } #endif /* ! CONFIG_GPIOLIB */ - +#endif /* CONFIG_GPIOLIB_LEGACY */ #endif /* __LINUX_GPIO_H */ diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index f0b1982da0cc..00df68c51405 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -181,7 +181,8 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev, enum gpiod_flags flags, const char *label); -bool gpiod_is_equal(struct gpio_desc *desc, struct gpio_desc *other); +bool gpiod_is_equal(const struct gpio_desc *desc, + const struct gpio_desc *other); #else /* CONFIG_GPIOLIB */ @@ -551,7 +552,7 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev, } static inline bool -gpiod_is_equal(struct gpio_desc *desc, struct gpio_desc *other) +gpiod_is_equal(const struct gpio_desc *desc, const struct gpio_desc *other) { WARN_ON(desc || other); return false; diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index b53233051bee..4b984e8f8fcd 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -718,12 +718,6 @@ const unsigned long *gpiochip_query_valid_mask(const struct gpio_chip *gc); /* get driver data */ void *gpiochip_get_data(struct gpio_chip *gc); -struct bgpio_pdata { - const char *label; - int base; - int ngpio; -}; - #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY int gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, @@ -750,6 +744,7 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev, #define BGPIOF_NO_OUTPUT BIT(5) /* only input */ #define BGPIOF_NO_SET_ON_INPUT BIT(6) #define BGPIOF_PINCTRL_BACKEND BIT(7) /* Call pinctrl direction setters */ +#define BGPIOF_NO_INPUT BIT(8) /* only output */ #ifdef CONFIG_GPIOLIB_IRQCHIP int gpiochip_irqchip_add_domain(struct gpio_chip *gc, diff --git a/include/linux/gpio/generic.h b/include/linux/gpio/generic.h new file mode 100644 index 000000000000..b511acd58ab0 --- /dev/null +++ b/include/linux/gpio/generic.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_GPIO_GENERIC_H +#define __LINUX_GPIO_GENERIC_H + +#include <linux/cleanup.h> +#include <linux/gpio/driver.h> +#include <linux/spinlock.h> + +struct device; + +/** + * struct gpio_generic_chip_config - Generic GPIO chip configuration data + * @dev: Parent device of the new GPIO chip (compulsory). + * @sz: Size (width) of the MMIO registers in bytes, typically 1, 2 or 4. + * @dat: MMIO address for the register to READ the value of the GPIO lines, it + * is expected that a 1 in the corresponding bit in this register means + * the line is asserted. + * @set: MMIO address for the register to SET the value of the GPIO lines, it + * is expected that we write the line with 1 in this register to drive + * the GPIO line high. + * @clr: MMIO address for the register to CLEAR the value of the GPIO lines, + * it is expected that we write the line with 1 in this register to + * drive the GPIO line low. It is allowed to leave this address as NULL, + * in that case the SET register will be assumed to also clear the GPIO + * lines, by actively writing the line with 0. + * @dirout: MMIO address for the register to set the line as OUTPUT. It is + * assumed that setting a line to 1 in this register will turn that + * line into an output line. Conversely, setting the line to 0 will + * turn that line into an input. + * @dirin: MMIO address for the register to set this line as INPUT. It is + * assumed that setting a line to 1 in this register will turn that + * line into an input line. Conversely, setting the line to 0 will + * turn that line into an output. + * @flags: Different flags that will affect the behaviour of the device, such + * as endianness etc. + */ +struct gpio_generic_chip_config { + struct device *dev; + unsigned long sz; + void __iomem *dat; + void __iomem *set; + void __iomem *clr; + void __iomem *dirout; + void __iomem *dirin; + unsigned long flags; +}; + +/** + * struct gpio_generic_chip - Generic GPIO chip implementation. + * @gc: The underlying struct gpio_chip object, implementing low-level GPIO + * chip routines. + */ +struct gpio_generic_chip { + struct gpio_chip gc; +}; + +/** + * gpio_generic_chip_init() - Initialize a generic GPIO chip. + * @chip: Generic GPIO chip to set up. + * @cfg: Generic GPIO chip configuration. + * + * Returns 0 on success, negative error number on failure. + */ +static inline int +gpio_generic_chip_init(struct gpio_generic_chip *chip, + const struct gpio_generic_chip_config *cfg) +{ + return bgpio_init(&chip->gc, cfg->dev, cfg->sz, cfg->dat, cfg->set, + cfg->clr, cfg->dirout, cfg->dirin, cfg->flags); +} + +/** + * gpio_generic_chip_set() - Set the GPIO line value of the generic GPIO chip. + * @chip: Generic GPIO chip to use. + * @offset: Hardware offset of the line to set. + * @value: New GPIO line value. + * + * Some modules using the generic GPIO chip, need to set line values in their + * direction setters but they don't have access to the gpio-mmio symbols so + * they use the function pointer in struct gpio_chip directly. This is not + * optimal and can lead to crashes at run-time in some instances. This wrapper + * provides a safe interface for users. + * + * Returns: 0 on success, negative error number of failure. + */ +static inline int +gpio_generic_chip_set(struct gpio_generic_chip *chip, unsigned int offset, + int value) +{ + if (WARN_ON(!chip->gc.set_rv)) + return -EOPNOTSUPP; + + return chip->gc.set_rv(&chip->gc, offset, value); +} + +#define gpio_generic_chip_lock(gen_gc) \ + raw_spin_lock(&(gen_gc)->gc.bgpio_lock) + +#define gpio_generic_chip_unlock(gen_gc) \ + raw_spin_unlock(&(gen_gc)->gc.bgpio_lock) + +#define gpio_generic_chip_lock_irqsave(gen_gc, flags) \ + raw_spin_lock_irqsave(&(gen_gc)->gc.bgpio_lock, flags) + +#define gpio_generic_chip_unlock_irqrestore(gen_gc, flags) \ + raw_spin_unlock_irqrestore(&(gen_gc)->gc.bgpio_lock, flags) + +DEFINE_LOCK_GUARD_1(gpio_generic_lock, + struct gpio_generic_chip, + gpio_generic_chip_lock(_T->lock), + gpio_generic_chip_unlock(_T->lock)) + +DEFINE_LOCK_GUARD_1(gpio_generic_lock_irqsave, + struct gpio_generic_chip, + gpio_generic_chip_lock_irqsave(_T->lock, _T->flags), + gpio_generic_chip_unlock_irqrestore(_T->lock, _T->flags), + unsigned long flags) + +#endif /* __LINUX_GPIO_GENERIC_H */ diff --git a/include/linux/group_cpus.h b/include/linux/group_cpus.h index e42807ec61f6..9d4e5ab6c314 100644 --- a/include/linux/group_cpus.h +++ b/include/linux/group_cpus.h @@ -9,6 +9,6 @@ #include <linux/kernel.h> #include <linux/cpu.h> -struct cpumask *group_cpus_evenly(unsigned int numgrps); +struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks); #endif diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 996be3c2cff0..e5a2096e022e 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -9,7 +9,7 @@ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright (c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (c) 2018 - 2024 Intel Corporation + * Copyright (c) 2018 - 2025 Intel Corporation */ #ifndef LINUX_IEEE80211_H @@ -2825,11 +2825,12 @@ static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len) #define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000 #define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000 -#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0 -#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1 -#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2 -#define IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP 3 -#define IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP 4 +#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0 +#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1 +#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2 +#define IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP 3 +#define IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP_OLD 4 +#define IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP 8 /** * struct ieee80211_he_6ghz_oper - HE 6 GHz operation Information field @@ -2847,13 +2848,31 @@ struct ieee80211_he_6ghz_oper { #define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2 #define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3 #define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4 -#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x38 +#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x78 u8 control; u8 ccfs0; u8 ccfs1; u8 minrate; } __packed; +/** + * enum ieee80211_reg_conn_bits - represents Regulatory connectivity field bits. + * + * This enumeration defines bit flags used to represent regulatory connectivity + * field bits. + * + * @IEEE80211_REG_CONN_LPI_VALID: Indicates whether the LPI bit is valid. + * @IEEE80211_REG_CONN_LPI_VALUE: Represents the value of the LPI bit. + * @IEEE80211_REG_CONN_SP_VALID: Indicates whether the SP bit is valid. + * @IEEE80211_REG_CONN_SP_VALUE: Represents the value of the SP bit. + */ +enum ieee80211_reg_conn_bits { + IEEE80211_REG_CONN_LPI_VALID = BIT(0), + IEEE80211_REG_CONN_LPI_VALUE = BIT(1), + IEEE80211_REG_CONN_SP_VALID = BIT(2), + IEEE80211_REG_CONN_SP_VALUE = BIT(3), +}; + /* transmit power interpretation type of transmit power envelope element */ enum ieee80211_tx_power_intrpt_type { IEEE80211_TPE_LOCAL_EIRP, @@ -3835,6 +3854,7 @@ enum ieee80211_eid_ext { WLAN_EID_EXT_FILS_PUBLIC_KEY = 12, WLAN_EID_EXT_FILS_NONCE = 13, WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14, + WLAN_EID_EXT_DH_PARAMETER = 32, WLAN_EID_EXT_HE_CAPABILITY = 35, WLAN_EID_EXT_HE_OPERATION = 36, WLAN_EID_EXT_UORA = 37, @@ -3858,6 +3878,8 @@ enum ieee80211_eid_ext { WLAN_EID_EXT_EHT_CAPABILITY = 108, WLAN_EID_EXT_TID_TO_LINK_MAPPING = 109, WLAN_EID_EXT_BANDWIDTH_INDICATION = 135, + WLAN_EID_EXT_KNOWN_STA_IDENTIFCATION = 136, + WLAN_EID_EXT_NON_AP_STA_REG_CON = 137, }; /* Action category code */ @@ -3995,6 +4017,16 @@ enum ieee80211_s1g_actioncode { WLAN_S1G_TWT_INFORMATION = 11, }; +/* Radio measurement action codes as defined in IEEE 802.11-2024 - Table 9-470 */ +enum ieee80211_radio_measurement_actioncode { + WLAN_RM_ACTION_RADIO_MEASUREMENT_REQUEST = 0, + WLAN_RM_ACTION_RADIO_MEASUREMENT_REPORT = 1, + WLAN_RM_ACTION_LINK_MEASUREMENT_REQUEST = 2, + WLAN_RM_ACTION_LINK_MEASUREMENT_REPORT = 3, + WLAN_RM_ACTION_NEIGHBOR_REPORT_REQUEST = 4, + WLAN_RM_ACTION_NEIGHBOR_REPORT_RESPONSE = 5, +}; + #define IEEE80211_WEP_IV_LEN 4 #define IEEE80211_WEP_ICV_LEN 4 #define IEEE80211_CCMP_HDR_LEN 8 @@ -5344,6 +5376,13 @@ static inline u16 ieee80211_mle_get_mld_capa_op(const u8 *data) return get_unaligned_le16(common); } +/* Defined in Figure 9-1074t in P802.11be_D7.0 */ +#define IEEE80211_EHT_ML_EXT_MLD_CAPA_OP_PARAM_UPDATE 0x0001 +#define IEEE80211_EHT_ML_EXT_MLD_CAPA_OP_RECO_MAX_LINKS_MASK 0x001e +#define IEEE80211_EHT_ML_EXT_MLD_CAPA_NSTR_UPDATE 0x0020 +#define IEEE80211_EHT_ML_EXT_MLD_CAPA_EMLSR_ENA_ON_ONE_LINK 0x0040 +#define IEEE80211_EHT_ML_EXT_MLD_CAPA_BTM_MLD_RECO_MULTI_AP 0x0080 + /** * ieee80211_mle_get_ext_mld_capa_op - returns the extended MLD capabilities * and operations. diff --git a/include/linux/if_team.h b/include/linux/if_team.h index cdc684e04a2f..ce97d891cf72 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -191,8 +191,6 @@ struct team { const struct header_ops *header_ops_cache; - struct mutex lock; /* used for overall locking, e.g. port lists write */ - /* * List of enabled ports and their count */ @@ -223,7 +221,6 @@ struct team { atomic_t count_pending; struct delayed_work dw; } mcast_rejoin; - struct lock_class_key team_lock_key; long mode_priv[TEAM_MODE_PRIV_LONGS]; }; diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index 043d442994b0..80166eb62f41 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -19,11 +19,6 @@ struct tun_msg_ctl { void *ptr; }; -struct tun_xdp_hdr { - int buflen; - struct virtio_net_hdr gso; -}; - #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) struct socket *tun_get_socket(struct file *); struct ptr_ring *tun_get_tx_ring(struct file *file); diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 38456b42cdb5..15e01935d3fa 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -79,11 +79,6 @@ static inline struct vlan_ethhdr *skb_vlan_eth_hdr(const struct sk_buff *skb) /* found in socket.c */ extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); -static inline bool is_vlan_dev(const struct net_device *dev) -{ - return dev->priv_flags & IFF_802_1Q_VLAN; -} - #define skb_vlan_tag_present(__skb) (!!(__skb)->vlan_all) #define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci) #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) @@ -136,7 +131,7 @@ struct vlan_pcpu_stats { u32 tx_dropped; }; -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#if IS_ENABLED(CONFIG_VLAN_8021Q) extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev, __be16 vlan_proto, u16 vlan_id); @@ -200,6 +195,11 @@ struct vlan_dev_priv { #endif }; +static inline bool is_vlan_dev(const struct net_device *dev) +{ + return dev->priv_flags & IFF_802_1Q_VLAN; +} + static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) { return netdev_priv(dev); @@ -237,6 +237,11 @@ extern void vlan_vids_del_by_dev(struct net_device *dev, extern bool vlan_uses_dev(const struct net_device *dev); #else +static inline bool is_vlan_dev(const struct net_device *dev) +{ + return false; +} + static inline struct net_device * __vlan_find_dev_deep_rcu(struct net_device *real_dev, __be16 vlan_proto, u16 vlan_id) @@ -254,19 +259,19 @@ vlan_for_each(struct net_device *dev, static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) { - BUG(); + WARN_ON_ONCE(1); return NULL; } static inline u16 vlan_dev_vlan_id(const struct net_device *dev) { - BUG(); + WARN_ON_ONCE(1); return 0; } static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev) { - BUG(); + WARN_ON_ONCE(1); return 0; } diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h index f242b285081b..6e70a412e218 100644 --- a/include/linux/iio/adc/ad_sigma_delta.h +++ b/include/linux/iio/adc/ad_sigma_delta.h @@ -31,6 +31,8 @@ struct ad_sigma_delta; struct device; struct gpio_desc; struct iio_dev; +struct spi_offload; +struct spi_offload_trigger; /** * struct ad_sigma_delta_info - Sigma Delta driver specific callbacks and options @@ -47,6 +49,10 @@ struct iio_dev; * @has_registers: true if the device has writable and readable registers, false * if there is just one read-only sample data shift register. * @has_named_irqs: Set to true if there is more than one IRQ line. + * @supports_spi_offload: Set to true if the driver supports SPI offload. Often + * special considerations are needed for scan_type and other channel + * info, so individual drivers have to set this to let the core + * code know that it can use SPI offload if it is available. * @addr_shift: Shift of the register address in the communications register. * @read_mask: Mask for the communications register having the read bit set. * @status_ch_mask: Mask for the channel number stored in status register. @@ -65,6 +71,7 @@ struct ad_sigma_delta_info { int (*postprocess_sample)(struct ad_sigma_delta *, unsigned int raw_sample); bool has_registers; bool has_named_irqs; + bool supports_spi_offload; unsigned int addr_shift; unsigned int read_mask; unsigned int status_ch_mask; @@ -94,7 +101,7 @@ struct ad_sigma_delta { bool bus_locked; bool keep_cs_asserted; - uint8_t comm; + u8 comm; const struct ad_sigma_delta_info *info; unsigned int active_slots; @@ -105,7 +112,11 @@ struct ad_sigma_delta { bool status_appended; /* map slots to channels in order to know what to expect from devices */ unsigned int *slots; - uint8_t *samples_buf; + struct spi_message sample_msg; + struct spi_transfer sample_xfer[2]; + u8 *samples_buf; + struct spi_offload *offload; + struct spi_offload_trigger *offload_trigger; /* * DMA (thus cache coherency maintenance) requires the @@ -114,10 +125,16 @@ struct ad_sigma_delta { * 'rx_buf' is up to 32 bits per sample + 64 bit timestamp, * rounded to 16 bytes to take into account padding. */ - uint8_t tx_buf[4] __aligned(IIO_DMA_MINALIGN); - uint8_t rx_buf[16] __aligned(8); + u8 tx_buf[4] __aligned(IIO_DMA_MINALIGN); + u8 rx_buf[16] __aligned(8); + u8 sample_addr; }; +static inline bool ad_sigma_delta_has_spi_offload(struct ad_sigma_delta *sd) +{ + return sd->offload != NULL; +} + static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd, unsigned int channel) { @@ -177,7 +194,7 @@ static inline int ad_sigma_delta_postprocess_sample(struct ad_sigma_delta *sd, return 0; } -void ad_sd_set_comm(struct ad_sigma_delta *sigma_delta, uint8_t comm); +void ad_sd_set_comm(struct ad_sigma_delta *sigma_delta, u8 comm); int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg, unsigned int size, unsigned int val); int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg, diff --git a/include/linux/iio/backend.h b/include/linux/iio/backend.h index e59d909cb659..7f815f3fed6a 100644 --- a/include/linux/iio/backend.h +++ b/include/linux/iio/backend.h @@ -76,6 +76,14 @@ enum iio_backend_interface_type { IIO_BACKEND_INTERFACE_MAX }; +enum iio_backend_filter_type { + IIO_BACKEND_FILTER_TYPE_DISABLED, + IIO_BACKEND_FILTER_TYPE_SINC1, + IIO_BACKEND_FILTER_TYPE_SINC5, + IIO_BACKEND_FILTER_TYPE_SINC5_PLUS_COMP, + IIO_BACKEND_FILTER_TYPE_MAX +}; + /** * struct iio_backend_ops - operations structure for an iio_backend * @enable: Enable backend. @@ -101,6 +109,9 @@ enum iio_backend_interface_type { * @read_raw: Read a channel attribute from a backend device * @debugfs_print_chan_status: Print channel status into a buffer. * @debugfs_reg_access: Read or write register value of backend. + * @filter_type_set: Set filter type. + * @interface_data_align: Perform the data alignment process. + * @num_lanes_set: Set the number of lanes enabled. * @ddr_enable: Enable interface DDR (Double Data Rate) mode. * @ddr_disable: Disable interface DDR (Double Data Rate) mode. * @data_stream_enable: Enable data stream. @@ -144,7 +155,7 @@ struct iio_backend_ops { enum iio_backend_interface_type *type); int (*data_size_set)(struct iio_backend *back, unsigned int size); int (*oversampling_ratio_set)(struct iio_backend *back, - unsigned int ratio); + unsigned int chan, unsigned int ratio); int (*read_raw)(struct iio_backend *back, struct iio_chan_spec const *chan, int *val, int *val2, long mask); @@ -153,6 +164,10 @@ struct iio_backend_ops { size_t len); int (*debugfs_reg_access)(struct iio_backend *back, unsigned int reg, unsigned int writeval, unsigned int *readval); + int (*filter_type_set)(struct iio_backend *back, + enum iio_backend_filter_type type); + int (*interface_data_align)(struct iio_backend *back, u32 timeout_us); + int (*num_lanes_set)(struct iio_backend *back, unsigned int num_lanes); int (*ddr_enable)(struct iio_backend *back); int (*ddr_disable)(struct iio_backend *back); int (*data_stream_enable)(struct iio_backend *back); @@ -195,6 +210,10 @@ int iio_backend_data_sample_trigger(struct iio_backend *back, int devm_iio_backend_request_buffer(struct device *dev, struct iio_backend *back, struct iio_dev *indio_dev); +int iio_backend_filter_type_set(struct iio_backend *back, + enum iio_backend_filter_type type); +int iio_backend_interface_data_align(struct iio_backend *back, u32 timeout_us); +int iio_backend_num_lanes_set(struct iio_backend *back, unsigned int num_lanes); int iio_backend_ddr_enable(struct iio_backend *back); int iio_backend_ddr_disable(struct iio_backend *back); int iio_backend_data_stream_enable(struct iio_backend *back); @@ -209,6 +228,7 @@ int iio_backend_interface_type_get(struct iio_backend *back, enum iio_backend_interface_type *type); int iio_backend_data_size_set(struct iio_backend *back, unsigned int size); int iio_backend_oversampling_ratio_set(struct iio_backend *back, + unsigned int chan, unsigned int ratio); int iio_backend_read_raw(struct iio_backend *back, struct iio_chan_spec const *chan, int *val, int *val2, diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index e72167b96d27..bb966abcde53 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -126,5 +126,6 @@ extern const struct dev_pm_ops cros_ec_sensors_pm_ops; /* List of extended channel specification for all sensors. */ extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[]; +extern const struct iio_chan_spec_ext_info cros_ec_sensors_limited_info[]; #endif /* __CROS_EC_SENSORS_CORE_H */ diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h index d89982c98368..ad2761efcc83 100644 --- a/include/linux/iio/types.h +++ b/include/linux/iio/types.h @@ -69,6 +69,7 @@ enum iio_chan_info_enum { IIO_CHAN_INFO_CALIBAMBIENT, IIO_CHAN_INFO_ZEROPOINT, IIO_CHAN_INFO_TROUGH, + IIO_CHAN_INFO_CONVDELAY, }; #endif /* _IIO_TYPES_H_ */ diff --git a/include/linux/in6.h b/include/linux/in6.h index 0777a21cbf86..403f926d33d8 100644 --- a/include/linux/in6.h +++ b/include/linux/in6.h @@ -18,6 +18,13 @@ #include <uapi/linux/in6.h> +/* Large enough to hold both sockaddr_in and sockaddr_in6. */ +struct sockaddr_inet { + unsigned short sa_family; + char sa_data[sizeof(struct sockaddr_in6) - + sizeof(unsigned short)]; +}; + /* IPv6 Wildcard Address (::) and Loopback Address (::1) defined in RFC2553 * NOTE: Be aware the IN6ADDR_* constants and in6addr_* externals are defined * in network byte order, not in host byte order as are the IPv4 equivalents diff --git a/include/linux/init.h b/include/linux/init.h index ee1309473bc6..a60d32d227ee 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -49,7 +49,9 @@ /* These are for everybody (although not all archs will actually discard it in modules) */ -#define __init __section(".init.text") __cold __latent_entropy __noinitretpoline +#define __init __section(".init.text") __cold __latent_entropy \ + __noinitretpoline \ + __no_kstack_erase #define __initdata __section(".init.data") #define __initconst __section(".init.rodata") #define __exitdata __section(".exit.data") diff --git a/include/linux/intel_dg_nvm_aux.h b/include/linux/intel_dg_nvm_aux.h new file mode 100644 index 000000000000..625d46a6b96e --- /dev/null +++ b/include/linux/intel_dg_nvm_aux.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright(c) 2019-2025, Intel Corporation. All rights reserved. + */ + +#ifndef __INTEL_DG_NVM_AUX_H__ +#define __INTEL_DG_NVM_AUX_H__ + +#include <linux/auxiliary_bus.h> +#include <linux/container_of.h> +#include <linux/ioport.h> +#include <linux/types.h> + +#define INTEL_DG_NVM_REGIONS 13 + +struct intel_dg_nvm_region { + const char *name; +}; + +struct intel_dg_nvm_dev { + struct auxiliary_device aux_dev; + bool writable_override; + bool non_posted_erase; + struct resource bar; + struct resource bar2; + const struct intel_dg_nvm_region *regions; +}; + +#define auxiliary_dev_to_intel_dg_nvm_dev(auxiliary_dev) \ + container_of(auxiliary_dev, struct intel_dg_nvm_dev, aux_dev) + +#endif /* __INTEL_DG_NVM_AUX_H__ */ diff --git a/include/linux/intel_pmt_features.h b/include/linux/intel_pmt_features.h new file mode 100644 index 000000000000..53573a4a49b7 --- /dev/null +++ b/include/linux/intel_pmt_features.h @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FEATURES_H +#define _FEATURES_H + +#include <linux/bits.h> +#include <linux/types.h> + +/* Common masks */ +#define PMT_CAP_TELEM BIT(0) +#define PMT_CAP_WATCHER BIT(1) +#define PMT_CAP_CRASHLOG BIT(2) +#define PMT_CAP_STREAMING BIT(3) +#define PMT_CAP_THRESHOLD BIT(4) +#define PMT_CAP_WINDOW BIT(5) +#define PMT_CAP_CONFIG BIT(6) +#define PMT_CAP_TRACING BIT(7) +#define PMT_CAP_INBAND BIT(8) +#define PMT_CAP_OOB BIT(9) +#define PMT_CAP_SECURED_CHAN BIT(10) + +#define PMT_CAP_PMT_SP BIT(11) +#define PMT_CAP_PMT_SP_POLICY GENMASK(17, 12) + +/* Per Core Performance Telemetry (PCPT) specific masks */ +#define PMT_CAP_PCPT_CORE_PERF BIT(18) +#define PMT_CAP_PCPT_CORE_C0_RES BIT(19) +#define PMT_CAP_PCPT_CORE_ACTIVITY BIT(20) +#define PMT_CAP_PCPT_CACHE_PERF BIT(21) +#define PMT_CAP_PCPT_QUALITY_TELEM BIT(22) + +/* Per Core Environmental Telemetry (PCET) specific masks */ +#define PMT_CAP_PCET_WORKPOINT_HIST BIT(18) +#define PMT_CAP_PCET_CORE_CURR_TEMP BIT(19) +#define PMT_CAP_PCET_CORE_INST_RES BIT(20) +#define PMT_CAP_PCET_QUALITY_TELEM BIT(21) /* Same as PMT_CAP_PCPT */ +#define PMT_CAP_PCET_CORE_CDYN_LVL BIT(22) +#define PMT_CAP_PCET_CORE_STRESS_LVL BIT(23) +#define PMT_CAP_PCET_CORE_DAS BIT(24) +#define PMT_CAP_PCET_FIVR_HEALTH BIT(25) +#define PMT_CAP_PCET_ENERGY BIT(26) +#define PMT_CAP_PCET_PEM_STATUS BIT(27) +#define PMT_CAP_PCET_CORE_C_STATE BIT(28) + +/* Per RMID Performance Telemetry specific masks */ +#define PMT_CAP_RMID_CORES_PERF BIT(18) +#define PMT_CAP_RMID_CACHE_PERF BIT(19) +#define PMT_CAP_RMID_PERF_QUAL BIT(20) + +/* Accelerator Telemetry specific masks */ +#define PMT_CAP_ACCEL_CPM_TELEM BIT(18) +#define PMT_CAP_ACCEL_TIP_TELEM BIT(19) + +/* Uncore Telemetry specific masks */ +#define PMT_CAP_UNCORE_IO_CA_TELEM BIT(18) +#define PMT_CAP_UNCORE_RMID_TELEM BIT(19) +#define PMT_CAP_UNCORE_D2D_ULA_TELEM BIT(20) +#define PMT_CAP_UNCORE_PKGC_TELEM BIT(21) + +/* Crash Log specific masks */ +#define PMT_CAP_CRASHLOG_MAN_TRIG BIT(11) +#define PMT_CAP_CRASHLOG_CORE BIT(12) +#define PMT_CAP_CRASHLOG_UNCORE BIT(13) +#define PMT_CAP_CRASHLOG_TOR BIT(14) +#define PMT_CAP_CRASHLOG_S3M BIT(15) +#define PMT_CAP_CRASHLOG_PERSISTENCY BIT(16) +#define PMT_CAP_CRASHLOG_CLIP_GPIO BIT(17) +#define PMT_CAP_CRASHLOG_PRE_RESET BIT(18) +#define PMT_CAP_CRASHLOG_POST_RESET BIT(19) + +/* PeTe Log specific masks */ +#define PMT_CAP_PETE_MAN_TRIG BIT(11) +#define PMT_CAP_PETE_ENCRYPTION BIT(12) +#define PMT_CAP_PETE_PERSISTENCY BIT(13) +#define PMT_CAP_PETE_REQ_TOKENS BIT(14) +#define PMT_CAP_PETE_PROD_ENABLED BIT(15) +#define PMT_CAP_PETE_DEBUG_ENABLED BIT(16) + +/* TPMI control specific masks */ +#define PMT_CAP_TPMI_MAILBOX BIT(11) +#define PMT_CAP_TPMI_LOCK BIT(12) + +/* Tracing specific masks */ +#define PMT_CAP_TRACE_SRAR BIT(11) +#define PMT_CAP_TRACE_CORRECTABLE BIT(12) +#define PMT_CAP_TRACE_MCTP BIT(13) +#define PMT_CAP_TRACE_MRT BIT(14) + +/* Per RMID Energy Telemetry specific masks */ +#define PMT_CAP_RMID_ENERGY BIT(18) +#define PMT_CAP_RMID_ACTIVITY BIT(19) +#define PMT_CAP_RMID_ENERGY_QUAL BIT(20) + +enum pmt_feature_id { + FEATURE_INVALID = 0x0, + FEATURE_PER_CORE_PERF_TELEM = 0x1, + FEATURE_PER_CORE_ENV_TELEM = 0x2, + FEATURE_PER_RMID_PERF_TELEM = 0x3, + FEATURE_ACCEL_TELEM = 0x4, + FEATURE_UNCORE_TELEM = 0x5, + FEATURE_CRASH_LOG = 0x6, + FEATURE_PETE_LOG = 0x7, + FEATURE_TPMI_CTRL = 0x8, + FEATURE_RESERVED = 0x9, + FEATURE_TRACING = 0xA, + FEATURE_PER_RMID_ENERGY_TELEM = 0xB, + FEATURE_MAX = 0xB, +}; + +enum feature_layout { + LAYOUT_RMID, + LAYOUT_WATCHER, + LAYOUT_COMMAND, + LAYOUT_CAPS_ONLY, +}; + +struct pmt_cap { + u32 mask; + const char *name; +}; + +extern const char * const pmt_feature_names[]; +extern enum feature_layout feature_layout[]; +extern struct pmt_cap pmt_cap_common[]; +extern struct pmt_cap pmt_cap_pcpt[]; +extern struct pmt_cap *pmt_caps_pcpt[]; +extern struct pmt_cap pmt_cap_pcet[]; +extern struct pmt_cap *pmt_caps_pcet[]; +extern struct pmt_cap pmt_cap_rmid_perf[]; +extern struct pmt_cap *pmt_caps_rmid_perf[]; +extern struct pmt_cap pmt_cap_accel[]; +extern struct pmt_cap *pmt_caps_accel[]; +extern struct pmt_cap pmt_cap_uncore[]; +extern struct pmt_cap *pmt_caps_uncore[]; +extern struct pmt_cap pmt_cap_crashlog[]; +extern struct pmt_cap *pmt_caps_crashlog[]; +extern struct pmt_cap pmt_cap_pete[]; +extern struct pmt_cap *pmt_caps_pete[]; +extern struct pmt_cap pmt_cap_tpmi[]; +extern struct pmt_cap *pmt_caps_tpmi[]; +extern struct pmt_cap pmt_cap_s3m[]; +extern struct pmt_cap *pmt_caps_s3m[]; +extern struct pmt_cap pmt_cap_tracing[]; +extern struct pmt_cap *pmt_caps_tracing[]; +extern struct pmt_cap pmt_cap_rmid_energy[]; +extern struct pmt_cap *pmt_caps_rmid_energy[]; + +static inline bool pmt_feature_id_is_valid(enum pmt_feature_id id) +{ + if (id > FEATURE_MAX) + return false; + + if (id == FEATURE_INVALID || id == FEATURE_RESERVED) + return false; + + return true; +} +#endif diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h index ff480b47ae64..94c06bf214fb 100644 --- a/include/linux/intel_tpmi.h +++ b/include/linux/intel_tpmi.h @@ -8,6 +8,8 @@ #include <linux/bitfield.h> +struct oobmsm_plat_info; + #define TPMI_VERSION_INVALID 0xff #define TPMI_MINOR_VERSION(val) FIELD_GET(GENMASK(4, 0), val) #define TPMI_MAJOR_VERSION(val) FIELD_GET(GENMASK(7, 5), val) @@ -26,30 +28,7 @@ enum intel_tpmi_id { TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */ }; -/** - * struct intel_tpmi_plat_info - Platform information for a TPMI device instance - * @cdie_mask: Mask of all compute dies in the partition - * @package_id: CPU Package id - * @partition: Package partition id when multiple VSEC PCI devices per package - * @segment: PCI segment ID - * @bus_number: PCI bus number - * @device_number: PCI device number - * @function_number: PCI function number - * - * Structure to store platform data for a TPMI device instance. This - * struct is used to return data via tpmi_get_platform_data(). - */ -struct intel_tpmi_plat_info { - u16 cdie_mask; - u8 package_id; - u8 partition; - u8 segment; - u8 bus_number; - u8 device_number; - u8 function_number; -}; - -struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev); +struct oobmsm_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev); struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index); int tpmi_get_resource_count(struct auxiliary_device *auxdev); int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, bool *read_blocked, diff --git a/include/linux/intel_vsec.h b/include/linux/intel_vsec.h index bc95821f1bfb..53f6fe88e369 100644 --- a/include/linux/intel_vsec.h +++ b/include/linux/intel_vsec.h @@ -4,12 +4,22 @@ #include <linux/auxiliary_bus.h> #include <linux/bits.h> +#include <linux/err.h> +#include <linux/intel_pmt_features.h> -#define VSEC_CAP_TELEMETRY BIT(0) -#define VSEC_CAP_WATCHER BIT(1) -#define VSEC_CAP_CRASHLOG BIT(2) -#define VSEC_CAP_SDSI BIT(3) -#define VSEC_CAP_TPMI BIT(4) +/* + * VSEC_CAP_UNUSED is reserved. It exists to prevent zero initialized + * intel_vsec devices from being automatically set to a known + * capability with ID 0 + */ +#define VSEC_CAP_UNUSED BIT(0) +#define VSEC_CAP_TELEMETRY BIT(1) +#define VSEC_CAP_WATCHER BIT(2) +#define VSEC_CAP_CRASHLOG BIT(3) +#define VSEC_CAP_SDSI BIT(4) +#define VSEC_CAP_TPMI BIT(5) +#define VSEC_CAP_DISCOVERY BIT(6) +#define VSEC_FEATURE_COUNT 7 /* Intel DVSEC offsets */ #define INTEL_DVSEC_ENTRIES 0xA @@ -26,6 +36,7 @@ enum intel_vsec_id { VSEC_ID_TELEMETRY = 2, VSEC_ID_WATCHER = 3, VSEC_ID_CRASHLOG = 4, + VSEC_ID_DISCOVERY = 12, VSEC_ID_SDSI = 65, VSEC_ID_TPMI = 66, }; @@ -81,22 +92,31 @@ struct pmt_callbacks { int (*read_telem)(struct pci_dev *pdev, u32 guid, u64 *data, loff_t off, u32 count); }; +struct vsec_feature_dependency { + unsigned long feature; + unsigned long supplier_bitmap; +}; + /** * struct intel_vsec_platform_info - Platform specific data * @parent: parent device in the auxbus chain * @headers: list of headers to define the PMT client devices to create + * @deps: array of feature dependencies * @priv_data: private data, usable by parent devices, currently a callback * @caps: bitmask of PMT capabilities for the given headers * @quirks: bitmask of VSEC device quirks * @base_addr: allow a base address to be specified (rather than derived) + * @num_deps: Count feature dependencies */ struct intel_vsec_platform_info { struct device *parent; struct intel_vsec_header **headers; + const struct vsec_feature_dependency *deps; void *priv_data; unsigned long caps; unsigned long quirks; u64 base_addr; + int num_deps; }; /** @@ -110,6 +130,7 @@ struct intel_vsec_platform_info { * @priv_data: any private data needed * @quirks: specified quirks * @base_addr: base address of entries (if specified) + * @cap_id: the enumerated id of the vsec feature */ struct intel_vsec_device { struct auxiliary_device auxdev; @@ -122,6 +143,44 @@ struct intel_vsec_device { size_t priv_data_size; unsigned long quirks; u64 base_addr; + unsigned long cap_id; +}; + +/** + * struct oobmsm_plat_info - Platform information for a device instance + * @cdie_mask: Mask of all compute dies in the partition + * @package_id: CPU Package id + * @partition: Package partition id when multiple VSEC PCI devices per package + * @segment: PCI segment ID + * @bus_number: PCI bus number + * @device_number: PCI device number + * @function_number: PCI function number + * + * Structure to store platform data for a OOBMSM device instance. + */ +struct oobmsm_plat_info { + u16 cdie_mask; + u8 package_id; + u8 partition; + u8 segment; + u8 bus_number; + u8 device_number; + u8 function_number; +}; + +struct telemetry_region { + struct oobmsm_plat_info plat_info; + void __iomem *addr; + size_t size; + u32 guid; + u32 num_rmids; +}; + +struct pmt_feature_group { + enum pmt_feature_id id; + int count; + struct kref kref; + struct telemetry_region regions[]; }; int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, @@ -141,11 +200,40 @@ static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device #if IS_ENABLED(CONFIG_INTEL_VSEC) int intel_vsec_register(struct pci_dev *pdev, struct intel_vsec_platform_info *info); +int intel_vsec_set_mapping(struct oobmsm_plat_info *plat_info, + struct intel_vsec_device *vsec_dev); +struct oobmsm_plat_info *intel_vsec_get_mapping(struct pci_dev *pdev); #else static inline int intel_vsec_register(struct pci_dev *pdev, struct intel_vsec_platform_info *info) { return -ENODEV; } +static inline int intel_vsec_set_mapping(struct oobmsm_plat_info *plat_info, + struct intel_vsec_device *vsec_dev) +{ + return -ENODEV; +} +static inline struct oobmsm_plat_info *intel_vsec_get_mapping(struct pci_dev *pdev) +{ + return ERR_PTR(-ENODEV); +} +#endif + +#if IS_ENABLED(CONFIG_INTEL_PMT_TELEMETRY) +struct pmt_feature_group * +intel_pmt_get_regions_by_feature(enum pmt_feature_id id); + +void intel_pmt_put_feature_group(struct pmt_feature_group *feature_group); +#else +static inline struct pmt_feature_group * +intel_pmt_get_regions_by_feature(enum pmt_feature_id id) +{ + return ERR_PTR(-ENODEV); +} + +static inline void +intel_pmt_put_feature_group(struct pmt_feature_group *feature_group) {} #endif + #endif diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h index 53408124c1e5..cfa6d0c0c322 100644 --- a/include/linux/io_uring/cmd.h +++ b/include/linux/io_uring/cmd.h @@ -8,6 +8,8 @@ /* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */ #define IORING_URING_CMD_CANCELABLE (1U << 30) +/* io_uring_cmd is being issued again */ +#define IORING_URING_CMD_REISSUE (1U << 31) struct io_uring_cmd { struct file *file; @@ -19,10 +21,6 @@ struct io_uring_cmd { u8 pdu[32]; /* available inline for free use */ }; -struct io_uring_cmd_data { - void *op_data; -}; - static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe) { return sqe->cmd; @@ -135,11 +133,6 @@ static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd return cmd_to_io_kiocb(cmd)->tctx->task; } -static inline struct io_uring_cmd_data *io_uring_cmd_get_async_data(struct io_uring_cmd *cmd) -{ - return cmd_to_io_kiocb(cmd)->async_data; -} - /* * Return uring_cmd's context reference as its context handle for driver to * track per-context resource, such as registered kernel IO buffer diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index a7efcec2e3d0..80a178f3d896 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -26,6 +26,8 @@ enum io_uring_cmd_flags { IO_URING_F_MULTISHOT = 4, /* executed by io-wq */ IO_URING_F_IOWQ = 8, + /* executed inline from syscall */ + IO_URING_F_INLINE = 16, /* int's last bit, sign checks are usually faster than a bit test */ IO_URING_F_NONBLOCK = INT_MIN, @@ -502,6 +504,7 @@ enum { REQ_F_BUF_NODE_BIT, REQ_F_HAS_METADATA_BIT, REQ_F_IMPORT_BUFFER_BIT, + REQ_F_SQE_COPIED_BIT, /* not a real bit, just to check we're not overflowing the space */ __REQ_F_LAST_BIT, @@ -591,6 +594,8 @@ enum { * For SEND_ZC, whether to import buffers (i.e. the first issue). */ REQ_F_IMPORT_BUFFER = IO_REQ_FLAG(REQ_F_IMPORT_BUFFER_BIT), + /* ->sqe_copy() has been called, if necessary */ + REQ_F_SQE_COPIED = IO_REQ_FLAG(REQ_F_SQE_COPIED_BIT), }; typedef void (*io_req_tw_func_t)(struct io_kiocb *req, io_tw_token_t tw); diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 522644d62f30..73dceabc21c8 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -101,8 +101,6 @@ struct vm_fault; */ #define IOMAP_NULL_ADDR -1ULL /* addr is not valid */ -struct iomap_folio_ops; - struct iomap { u64 addr; /* disk offset of mapping, bytes */ loff_t offset; /* file offset of mapping, bytes */ @@ -113,7 +111,6 @@ struct iomap { struct dax_device *dax_dev; /* dax_dev for dax operations */ void *inline_data; void *private; /* filesystem private */ - const struct iomap_folio_ops *folio_ops; u64 validity_cookie; /* used with .iomap_valid() */ }; @@ -143,16 +140,11 @@ static inline bool iomap_inline_data_valid(const struct iomap *iomap) } /* - * When a filesystem sets folio_ops in an iomap mapping it returns, get_folio - * and put_folio will be called for each folio written to. This only applies - * to buffered writes as unbuffered writes will not typically have folios - * associated with them. - * * When get_folio succeeds, put_folio will always be called to do any * cleanup work necessary. put_folio is responsible for unlocking and putting * @folio. */ -struct iomap_folio_ops { +struct iomap_write_ops { struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos, unsigned len); void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied, @@ -174,6 +166,16 @@ struct iomap_folio_ops { * locked by the iomap code. */ bool (*iomap_valid)(struct inode *inode, const struct iomap *iomap); + + /* + * Optional if the filesystem wishes to provide a custom handler for + * reading in the contents of a folio, otherwise iomap will default to + * submitting a bio read request. + * + * The read must be done synchronously. + */ + int (*read_folio_range)(const struct iomap_iter *iter, + struct folio *folio, loff_t pos, size_t len); }; /* @@ -335,7 +337,8 @@ static inline bool iomap_want_unshare_iter(const struct iomap_iter *iter) } ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, - const struct iomap_ops *ops, void *private); + const struct iomap_ops *ops, + const struct iomap_write_ops *write_ops, void *private); int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops); void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count); @@ -344,11 +347,14 @@ bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags); void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len); bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio); int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, - const struct iomap_ops *ops); + const struct iomap_ops *ops, + const struct iomap_write_ops *write_ops); int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, - bool *did_zero, const struct iomap_ops *ops, void *private); + bool *did_zero, const struct iomap_ops *ops, + const struct iomap_write_ops *write_ops, void *private); int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, - const struct iomap_ops *ops, void *private); + const struct iomap_ops *ops, + const struct iomap_write_ops *write_ops, void *private); vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops, void *private); typedef void (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length, @@ -391,8 +397,7 @@ sector_t iomap_bmap(struct address_space *mapping, sector_t bno, /* * Structure for writeback I/O completions. * - * File systems implementing ->submit_ioend (for buffered I/O) or ->submit_io - * for direct I/O) can split a bio generated by iomap. In that case the parent + * File systems can split a bio generated by iomap. In that case the parent * ioend it was split from is recorded in ioend->io_parent. */ struct iomap_ioend { @@ -416,41 +421,38 @@ static inline struct iomap_ioend *iomap_ioend_from_bio(struct bio *bio) struct iomap_writeback_ops { /* - * Required, maps the blocks so that writeback can be performed on - * the range starting at offset. + * Performs writeback on the passed in range * - * Can return arbitrarily large regions, but we need to call into it at + * Can map arbitrarily large regions, but we need to call into it at * least once per folio to allow the file systems to synchronize with * the write path that could be invalidating mappings. * * An existing mapping from a previous call to this method can be reused * by the file system if it is still valid. - */ - int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode, - loff_t offset, unsigned len); - - /* - * Optional, allows the file systems to hook into bio submission, - * including overriding the bi_end_io handler. * - * Returns 0 if the bio was successfully submitted, or a negative - * error code if status was non-zero or another error happened and - * the bio could not be submitted. + * Returns the number of bytes processed or a negative errno. */ - int (*submit_ioend)(struct iomap_writepage_ctx *wpc, int status); + ssize_t (*writeback_range)(struct iomap_writepage_ctx *wpc, + struct folio *folio, u64 pos, unsigned int len, + u64 end_pos); /* - * Optional, allows the file system to discard state on a page where - * we failed to submit any I/O. + * Submit a writeback context previously build up by ->writeback_range. + * + * Returns 0 if the context was successfully submitted, or a negative + * error code if not. If @error is non-zero a failure occurred, and + * the writeback context should be completed with an error. */ - void (*discard_folio)(struct folio *folio, loff_t pos); + int (*writeback_submit)(struct iomap_writepage_ctx *wpc, int error); }; struct iomap_writepage_ctx { struct iomap iomap; - struct iomap_ioend *ioend; + struct inode *inode; + struct writeback_control *wbc; const struct iomap_writeback_ops *ops; u32 nr_folios; /* folios added to the ioend */ + void *wb_ctx; /* pending writeback context */ }; struct iomap_ioend *iomap_init_ioend(struct inode *inode, struct bio *bio, @@ -461,9 +463,17 @@ void iomap_finish_ioends(struct iomap_ioend *ioend, int error); void iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends); void iomap_sort_ioends(struct list_head *ioend_list); -int iomap_writepages(struct address_space *mapping, - struct writeback_control *wbc, struct iomap_writepage_ctx *wpc, - const struct iomap_writeback_ops *ops); +ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio, + loff_t pos, loff_t end_pos, unsigned int dirty_len); +int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error); + +void iomap_start_folio_write(struct inode *inode, struct folio *folio, + size_t len); +void iomap_finish_folio_write(struct inode *inode, struct folio *folio, + size_t len); + +int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio); +int iomap_writepages(struct iomap_writepage_ctx *wpc); /* * Flags for direct I/O ->end_io: diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 156732807994..7073be1d8841 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -604,7 +604,6 @@ iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size, * It is required to call iommufd_viommu_alloc() helper for * a bundled allocation of the core and the driver structures, * using the given @ictx pointer. - * @pgsize_bitmap: bitmap of all possible supported page sizes * @owner: Driver module providing these ops * @identity_domain: An always available, always attachable identity * translation. @@ -659,7 +658,6 @@ struct iommu_ops { struct iommufd_ctx *ictx, unsigned int viommu_type); const struct iommu_domain_ops *default_domain_ops; - unsigned long pgsize_bitmap; struct module *owner; struct iommu_domain *identity_domain; struct iommu_domain *blocked_domain; diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 5aeeed22f35b..bc6ec2959173 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -17,6 +17,7 @@ struct ipv6_devconf { __s32 hop_limit; __s32 mtu6; __s32 forwarding; + __s32 force_forwarding; __s32 disable_policy; __s32 proxy_ndp; __cacheline_group_end(ipv6_devconf_read_txrx); @@ -156,6 +157,7 @@ struct inet6_skb_parm { #define IP6SKB_SEG6 256 #define IP6SKB_FAKEJUMBO 512 #define IP6SKB_MULTIPATH 1024 +#define IP6SKB_MCROUTE 2048 }; #if defined(CONFIG_NET_L3_MASTER_DEV) diff --git a/include/linux/irq-entry-common.h b/include/linux/irq-entry-common.h new file mode 100644 index 000000000000..4fb2f93d82ed --- /dev/null +++ b/include/linux/irq-entry-common.h @@ -0,0 +1,405 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_IRQENTRYCOMMON_H +#define __LINUX_IRQENTRYCOMMON_H + +#include <linux/static_call_types.h> +#include <linux/syscalls.h> +#include <linux/context_tracking.h> +#include <linux/tick.h> +#include <linux/kmsan.h> + +#include <asm/entry-common.h> + +/* + * Define dummy _TIF work flags if not defined by the architecture or for + * disabled functionality. + */ +#ifndef _TIF_PATCH_PENDING +# define _TIF_PATCH_PENDING (0) +#endif + +/* + * TIF flags handled in exit_to_user_mode_loop() + */ +#ifndef ARCH_EXIT_TO_USER_MODE_WORK +# define ARCH_EXIT_TO_USER_MODE_WORK (0) +#endif + +#define EXIT_TO_USER_MODE_WORK \ + (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ + _TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \ + _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ + ARCH_EXIT_TO_USER_MODE_WORK) + +/** + * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs + * @regs: Pointer to currents pt_regs + * + * Defaults to an empty implementation. Can be replaced by architecture + * specific code. + * + * Invoked from syscall_enter_from_user_mode() in the non-instrumentable + * section. Use __always_inline so the compiler cannot push it out of line + * and make it instrumentable. + */ +static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs); + +#ifndef arch_enter_from_user_mode +static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {} +#endif + +/** + * arch_in_rcu_eqs - Architecture specific check for RCU extended quiescent + * states. + * + * Returns: true if the CPU is potentially in an RCU EQS, false otherwise. + * + * Architectures only need to define this if threads other than the idle thread + * may have an interruptible EQS. This does not need to handle idle threads. It + * is safe to over-estimate at the cost of redundant RCU management work. + * + * Invoked from irqentry_enter() + */ +#ifndef arch_in_rcu_eqs +static __always_inline bool arch_in_rcu_eqs(void) { return false; } +#endif + +/** + * enter_from_user_mode - Establish state when coming from user mode + * + * Syscall/interrupt entry disables interrupts, but user mode is traced as + * interrupts enabled. Also with NO_HZ_FULL RCU might be idle. + * + * 1) Tell lockdep that interrupts are disabled + * 2) Invoke context tracking if enabled to reactivate RCU + * 3) Trace interrupts off state + * + * Invoked from architecture specific syscall entry code with interrupts + * disabled. The calling code has to be non-instrumentable. When the + * function returns all state is correct and interrupts are still + * disabled. The subsequent functions can be instrumented. + * + * This is invoked when there is architecture specific functionality to be + * done between establishing state and enabling interrupts. The caller must + * enable interrupts before invoking syscall_enter_from_user_mode_work(). + */ +static __always_inline void enter_from_user_mode(struct pt_regs *regs) +{ + arch_enter_from_user_mode(regs); + lockdep_hardirqs_off(CALLER_ADDR0); + + CT_WARN_ON(__ct_state() != CT_STATE_USER); + user_exit_irqoff(); + + instrumentation_begin(); + kmsan_unpoison_entry_regs(regs); + trace_hardirqs_off_finish(); + instrumentation_end(); +} + +/** + * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable() + * @ti_work: Cached TIF flags gathered with interrupts disabled + * + * Defaults to local_irq_enable(). Can be supplied by architecture specific + * code. + */ +static inline void local_irq_enable_exit_to_user(unsigned long ti_work); + +#ifndef local_irq_enable_exit_to_user +static inline void local_irq_enable_exit_to_user(unsigned long ti_work) +{ + local_irq_enable(); +} +#endif + +/** + * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable() + * + * Defaults to local_irq_disable(). Can be supplied by architecture specific + * code. + */ +static inline void local_irq_disable_exit_to_user(void); + +#ifndef local_irq_disable_exit_to_user +static inline void local_irq_disable_exit_to_user(void) +{ + local_irq_disable(); +} +#endif + +/** + * arch_exit_to_user_mode_work - Architecture specific TIF work for exit + * to user mode. + * @regs: Pointer to currents pt_regs + * @ti_work: Cached TIF flags gathered with interrupts disabled + * + * Invoked from exit_to_user_mode_loop() with interrupt enabled + * + * Defaults to NOOP. Can be supplied by architecture specific code. + */ +static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, + unsigned long ti_work); + +#ifndef arch_exit_to_user_mode_work +static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, + unsigned long ti_work) +{ +} +#endif + +/** + * arch_exit_to_user_mode_prepare - Architecture specific preparation for + * exit to user mode. + * @regs: Pointer to currents pt_regs + * @ti_work: Cached TIF flags gathered with interrupts disabled + * + * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last + * function before return. Defaults to NOOP. + */ +static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, + unsigned long ti_work); + +#ifndef arch_exit_to_user_mode_prepare +static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, + unsigned long ti_work) +{ +} +#endif + +/** + * arch_exit_to_user_mode - Architecture specific final work before + * exit to user mode. + * + * Invoked from exit_to_user_mode() with interrupt disabled as the last + * function before return. Defaults to NOOP. + * + * This needs to be __always_inline because it is non-instrumentable code + * invoked after context tracking switched to user mode. + * + * An architecture implementation must not do anything complex, no locking + * etc. The main purpose is for speculation mitigations. + */ +static __always_inline void arch_exit_to_user_mode(void); + +#ifndef arch_exit_to_user_mode +static __always_inline void arch_exit_to_user_mode(void) { } +#endif + +/** + * arch_do_signal_or_restart - Architecture specific signal delivery function + * @regs: Pointer to currents pt_regs + * + * Invoked from exit_to_user_mode_loop(). + */ +void arch_do_signal_or_restart(struct pt_regs *regs); + +/** + * exit_to_user_mode_loop - do any pending work before leaving to user space + */ +unsigned long exit_to_user_mode_loop(struct pt_regs *regs, + unsigned long ti_work); + +/** + * exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required + * @regs: Pointer to pt_regs on entry stack + * + * 1) check that interrupts are disabled + * 2) call tick_nohz_user_enter_prepare() + * 3) call exit_to_user_mode_loop() if any flags from + * EXIT_TO_USER_MODE_WORK are set + * 4) check that interrupts are still disabled + */ +static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs) +{ + unsigned long ti_work; + + lockdep_assert_irqs_disabled(); + + /* Flush pending rcuog wakeup before the last need_resched() check */ + tick_nohz_user_enter_prepare(); + + ti_work = read_thread_flags(); + if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK)) + ti_work = exit_to_user_mode_loop(regs, ti_work); + + arch_exit_to_user_mode_prepare(regs, ti_work); + + /* Ensure that kernel state is sane for a return to userspace */ + kmap_assert_nomap(); + lockdep_assert_irqs_disabled(); + lockdep_sys_exit(); +} + +/** + * exit_to_user_mode - Fixup state when exiting to user mode + * + * Syscall/interrupt exit enables interrupts, but the kernel state is + * interrupts disabled when this is invoked. Also tell RCU about it. + * + * 1) Trace interrupts on state + * 2) Invoke context tracking if enabled to adjust RCU state + * 3) Invoke architecture specific last minute exit code, e.g. speculation + * mitigations, etc.: arch_exit_to_user_mode() + * 4) Tell lockdep that interrupts are enabled + * + * Invoked from architecture specific code when syscall_exit_to_user_mode() + * is not suitable as the last step before returning to userspace. Must be + * invoked with interrupts disabled and the caller must be + * non-instrumentable. + * The caller has to invoke syscall_exit_to_user_mode_work() before this. + */ +static __always_inline void exit_to_user_mode(void) +{ + instrumentation_begin(); + trace_hardirqs_on_prepare(); + lockdep_hardirqs_on_prepare(); + instrumentation_end(); + + user_enter_irqoff(); + arch_exit_to_user_mode(); + lockdep_hardirqs_on(CALLER_ADDR0); +} + +/** + * irqentry_enter_from_user_mode - Establish state before invoking the irq handler + * @regs: Pointer to currents pt_regs + * + * Invoked from architecture specific entry code with interrupts disabled. + * Can only be called when the interrupt entry came from user mode. The + * calling code must be non-instrumentable. When the function returns all + * state is correct and the subsequent functions can be instrumented. + * + * The function establishes state (lockdep, RCU (context tracking), tracing) + */ +void irqentry_enter_from_user_mode(struct pt_regs *regs); + +/** + * irqentry_exit_to_user_mode - Interrupt exit work + * @regs: Pointer to current's pt_regs + * + * Invoked with interrupts disabled and fully valid regs. Returns with all + * work handled, interrupts disabled such that the caller can immediately + * switch to user mode. Called from architecture specific interrupt + * handling code. + * + * The call order is #2 and #3 as described in syscall_exit_to_user_mode(). + * Interrupt exit is not invoking #1 which is the syscall specific one time + * work. + */ +void irqentry_exit_to_user_mode(struct pt_regs *regs); + +#ifndef irqentry_state +/** + * struct irqentry_state - Opaque object for exception state storage + * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the + * exit path has to invoke ct_irq_exit(). + * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that + * lockdep state is restored correctly on exit from nmi. + * + * This opaque object is filled in by the irqentry_*_enter() functions and + * must be passed back into the corresponding irqentry_*_exit() functions + * when the exception is complete. + * + * Callers of irqentry_*_[enter|exit]() must consider this structure opaque + * and all members private. Descriptions of the members are provided to aid in + * the maintenance of the irqentry_*() functions. + */ +typedef struct irqentry_state { + union { + bool exit_rcu; + bool lockdep; + }; +} irqentry_state_t; +#endif + +/** + * irqentry_enter - Handle state tracking on ordinary interrupt entries + * @regs: Pointer to pt_regs of interrupted context + * + * Invokes: + * - lockdep irqflag state tracking as low level ASM entry disabled + * interrupts. + * + * - Context tracking if the exception hit user mode. + * + * - The hardirq tracer to keep the state consistent as low level ASM + * entry disabled interrupts. + * + * As a precondition, this requires that the entry came from user mode, + * idle, or a kernel context in which RCU is watching. + * + * For kernel mode entries RCU handling is done conditional. If RCU is + * watching then the only RCU requirement is to check whether the tick has + * to be restarted. If RCU is not watching then ct_irq_enter() has to be + * invoked on entry and ct_irq_exit() on exit. + * + * Avoiding the ct_irq_enter/exit() calls is an optimization but also + * solves the problem of kernel mode pagefaults which can schedule, which + * is not possible after invoking ct_irq_enter() without undoing it. + * + * For user mode entries irqentry_enter_from_user_mode() is invoked to + * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit + * would not be possible. + * + * Returns: An opaque object that must be passed to idtentry_exit() + */ +irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs); + +/** + * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt + * + * Conditional reschedule with additional sanity checks. + */ +void raw_irqentry_exit_cond_resched(void); +#ifdef CONFIG_PREEMPT_DYNAMIC +#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) +#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched +#define irqentry_exit_cond_resched_dynamic_disabled NULL +DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched); +#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)() +#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) +DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); +void dynamic_irqentry_exit_cond_resched(void); +#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched() +#endif +#else /* CONFIG_PREEMPT_DYNAMIC */ +#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched() +#endif /* CONFIG_PREEMPT_DYNAMIC */ + +/** + * irqentry_exit - Handle return from exception that used irqentry_enter() + * @regs: Pointer to pt_regs (exception entry regs) + * @state: Return value from matching call to irqentry_enter() + * + * Depending on the return target (kernel/user) this runs the necessary + * preemption and work checks if possible and required and returns to + * the caller with interrupts disabled and no further work pending. + * + * This is the last action before returning to the low level ASM code which + * just needs to return to the appropriate context. + * + * Counterpart to irqentry_enter(). + */ +void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state); + +/** + * irqentry_nmi_enter - Handle NMI entry + * @regs: Pointer to currents pt_regs + * + * Similar to irqentry_enter() but taking care of the NMI constraints. + */ +irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs); + +/** + * irqentry_nmi_exit - Handle return from NMI handling + * @regs: Pointer to pt_regs (NMI entry regs) + * @irq_state: Return value from matching call to irqentry_nmi_enter() + * + * Last action before returning to the low level assembly code. + * + * Counterpart to irqentry_nmi_enter(). + */ +void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state); + +#endif diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h index 9bdb2a781841..ede1fa938152 100644 --- a/include/linux/irqbypass.h +++ b/include/linux/irqbypass.h @@ -10,6 +10,7 @@ #include <linux/list.h> +struct eventfd_ctx; struct irq_bypass_consumer; /* @@ -18,20 +19,22 @@ struct irq_bypass_consumer; * The IRQ bypass manager is a simple set of lists and callbacks that allows * IRQ producers (ex. physical interrupt sources) to be matched to IRQ * consumers (ex. virtualization hardware that allows IRQ bypass or offload) - * via a shared token (ex. eventfd_ctx). Producers and consumers register - * independently. When a token match is found, the optional @stop callback - * will be called for each participant. The pair will then be connected via - * the @add_* callbacks, and finally the optional @start callback will allow - * any final coordination. When either participant is unregistered, the - * process is repeated using the @del_* callbacks in place of the @add_* - * callbacks. Match tokens must be unique per producer/consumer, 1:N pairings - * are not supported. + * via a shared eventfd_ctx. Producers and consumers register independently. + * When a producer and consumer are paired, i.e. an eventfd match is found, the + * optional @stop callback will be called for each participant. The pair will + * then be connected via the @add_* callbacks, and finally the optional @start + * callback will allow any final coordination. When either participant is + * unregistered, the process is repeated using the @del_* callbacks in place of + * the @add_* callbacks. eventfds must be unique per producer/consumer, 1:N + * pairings are not supported. */ +struct irq_bypass_consumer; + /** * struct irq_bypass_producer - IRQ bypass producer definition - * @node: IRQ bypass manager private list management - * @token: opaque token to match between producer and consumer (non-NULL) + * @eventfd: eventfd context used to match producers and consumers + * @consumer: The connected consumer (NULL if no connection) * @irq: Linux IRQ number for the producer device * @add_consumer: Connect the IRQ producer to an IRQ consumer (optional) * @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional) @@ -43,8 +46,8 @@ struct irq_bypass_consumer; * for a physical device assigned to a VM. */ struct irq_bypass_producer { - struct list_head node; - void *token; + struct eventfd_ctx *eventfd; + struct irq_bypass_consumer *consumer; int irq; int (*add_consumer)(struct irq_bypass_producer *, struct irq_bypass_consumer *); @@ -56,8 +59,8 @@ struct irq_bypass_producer { /** * struct irq_bypass_consumer - IRQ bypass consumer definition - * @node: IRQ bypass manager private list management - * @token: opaque token to match between producer and consumer (non-NULL) + * @eventfd: eventfd context used to match producers and consumers + * @producer: The connected producer (NULL if no connection) * @add_producer: Connect the IRQ consumer to an IRQ producer * @del_producer: Disconnect the IRQ consumer from an IRQ producer * @stop: Perform any quiesce operations necessary prior to add/del (optional) @@ -69,8 +72,9 @@ struct irq_bypass_producer { * portions of the interrupt handling to the VM. */ struct irq_bypass_consumer { - struct list_head node; - void *token; + struct eventfd_ctx *eventfd; + struct irq_bypass_producer *producer; + int (*add_producer)(struct irq_bypass_consumer *, struct irq_bypass_producer *); void (*del_producer)(struct irq_bypass_consumer *, @@ -79,9 +83,11 @@ struct irq_bypass_consumer { void (*start)(struct irq_bypass_consumer *); }; -int irq_bypass_register_producer(struct irq_bypass_producer *); -void irq_bypass_unregister_producer(struct irq_bypass_producer *); -int irq_bypass_register_consumer(struct irq_bypass_consumer *); -void irq_bypass_unregister_consumer(struct irq_bypass_consumer *); +int irq_bypass_register_producer(struct irq_bypass_producer *producer, + struct eventfd_ctx *eventfd, int irq); +void irq_bypass_unregister_producer(struct irq_bypass_producer *producer); +int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer, + struct eventfd_ctx *eventfd); +void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer); #endif /* IRQBYPASS_H */ diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 7f1f11a5e4e4..0b0887099fd7 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -146,7 +146,7 @@ int its_commit_vpe(struct its_vpe *vpe); int its_invall_vpe(struct its_vpe *vpe); int its_map_vlpi(int irq, struct its_vlpi_map *map); int its_get_vlpi(int irq, struct its_vlpi_map *map); -int its_unmap_vlpi(int irq); +void its_unmap_vlpi(int irq); int its_prop_update_vlpi(int irq, u8 config, bool inv); int its_prop_update_vsgi(int irq, u8 priority, bool group); diff --git a/include/linux/irqchip/arm-gic-v5.h b/include/linux/irqchip/arm-gic-v5.h new file mode 100644 index 000000000000..68ddcdb1cec5 --- /dev/null +++ b/include/linux/irqchip/arm-gic-v5.h @@ -0,0 +1,394 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 ARM Limited, All Rights Reserved. + */ +#ifndef __LINUX_IRQCHIP_ARM_GIC_V5_H +#define __LINUX_IRQCHIP_ARM_GIC_V5_H + +#include <linux/iopoll.h> + +#include <asm/cacheflush.h> +#include <asm/smp.h> +#include <asm/sysreg.h> + +#define GICV5_IPIS_PER_CPU MAX_IPI + +/* + * INTID handling + */ +#define GICV5_HWIRQ_ID GENMASK(23, 0) +#define GICV5_HWIRQ_TYPE GENMASK(31, 29) +#define GICV5_HWIRQ_INTID GENMASK_ULL(31, 0) + +#define GICV5_HWIRQ_TYPE_PPI UL(0x1) +#define GICV5_HWIRQ_TYPE_LPI UL(0x2) +#define GICV5_HWIRQ_TYPE_SPI UL(0x3) + +/* + * Tables attributes + */ +#define GICV5_NO_READ_ALLOC 0b0 +#define GICV5_READ_ALLOC 0b1 +#define GICV5_NO_WRITE_ALLOC 0b0 +#define GICV5_WRITE_ALLOC 0b1 + +#define GICV5_NON_CACHE 0b00 +#define GICV5_WB_CACHE 0b01 +#define GICV5_WT_CACHE 0b10 + +#define GICV5_NON_SHARE 0b00 +#define GICV5_OUTER_SHARE 0b10 +#define GICV5_INNER_SHARE 0b11 + +/* + * IRS registers and tables structures + */ +#define GICV5_IRS_IDR1 0x0004 +#define GICV5_IRS_IDR2 0x0008 +#define GICV5_IRS_IDR5 0x0014 +#define GICV5_IRS_IDR6 0x0018 +#define GICV5_IRS_IDR7 0x001c +#define GICV5_IRS_CR0 0x0080 +#define GICV5_IRS_CR1 0x0084 +#define GICV5_IRS_SYNCR 0x00c0 +#define GICV5_IRS_SYNC_STATUSR 0x00c4 +#define GICV5_IRS_SPI_SELR 0x0108 +#define GICV5_IRS_SPI_CFGR 0x0114 +#define GICV5_IRS_SPI_STATUSR 0x0118 +#define GICV5_IRS_PE_SELR 0x0140 +#define GICV5_IRS_PE_STATUSR 0x0144 +#define GICV5_IRS_PE_CR0 0x0148 +#define GICV5_IRS_IST_BASER 0x0180 +#define GICV5_IRS_IST_CFGR 0x0190 +#define GICV5_IRS_IST_STATUSR 0x0194 +#define GICV5_IRS_MAP_L2_ISTR 0x01c0 + +#define GICV5_IRS_IDR1_PRIORITY_BITS GENMASK(22, 20) +#define GICV5_IRS_IDR1_IAFFID_BITS GENMASK(19, 16) + +#define GICV5_IRS_IDR1_PRIORITY_BITS_1BITS 0b000 +#define GICV5_IRS_IDR1_PRIORITY_BITS_2BITS 0b001 +#define GICV5_IRS_IDR1_PRIORITY_BITS_3BITS 0b010 +#define GICV5_IRS_IDR1_PRIORITY_BITS_4BITS 0b011 +#define GICV5_IRS_IDR1_PRIORITY_BITS_5BITS 0b100 + +#define GICV5_IRS_IDR2_ISTMD_SZ GENMASK(19, 15) +#define GICV5_IRS_IDR2_ISTMD BIT(14) +#define GICV5_IRS_IDR2_IST_L2SZ GENMASK(13, 11) +#define GICV5_IRS_IDR2_IST_LEVELS BIT(10) +#define GICV5_IRS_IDR2_MIN_LPI_ID_BITS GENMASK(9, 6) +#define GICV5_IRS_IDR2_LPI BIT(5) +#define GICV5_IRS_IDR2_ID_BITS GENMASK(4, 0) + +#define GICV5_IRS_IDR5_SPI_RANGE GENMASK(24, 0) +#define GICV5_IRS_IDR6_SPI_IRS_RANGE GENMASK(24, 0) +#define GICV5_IRS_IDR7_SPI_BASE GENMASK(23, 0) + +#define GICV5_IRS_IST_L2SZ_SUPPORT_4KB(r) FIELD_GET(BIT(11), (r)) +#define GICV5_IRS_IST_L2SZ_SUPPORT_16KB(r) FIELD_GET(BIT(12), (r)) +#define GICV5_IRS_IST_L2SZ_SUPPORT_64KB(r) FIELD_GET(BIT(13), (r)) + +#define GICV5_IRS_CR0_IDLE BIT(1) +#define GICV5_IRS_CR0_IRSEN BIT(0) + +#define GICV5_IRS_CR1_VPED_WA BIT(15) +#define GICV5_IRS_CR1_VPED_RA BIT(14) +#define GICV5_IRS_CR1_VMD_WA BIT(13) +#define GICV5_IRS_CR1_VMD_RA BIT(12) +#define GICV5_IRS_CR1_VPET_WA BIT(11) +#define GICV5_IRS_CR1_VPET_RA BIT(10) +#define GICV5_IRS_CR1_VMT_WA BIT(9) +#define GICV5_IRS_CR1_VMT_RA BIT(8) +#define GICV5_IRS_CR1_IST_WA BIT(7) +#define GICV5_IRS_CR1_IST_RA BIT(6) +#define GICV5_IRS_CR1_IC GENMASK(5, 4) +#define GICV5_IRS_CR1_OC GENMASK(3, 2) +#define GICV5_IRS_CR1_SH GENMASK(1, 0) + +#define GICV5_IRS_SYNCR_SYNC BIT(31) + +#define GICV5_IRS_SYNC_STATUSR_IDLE BIT(0) + +#define GICV5_IRS_SPI_STATUSR_V BIT(1) +#define GICV5_IRS_SPI_STATUSR_IDLE BIT(0) + +#define GICV5_IRS_SPI_SELR_ID GENMASK(23, 0) + +#define GICV5_IRS_SPI_CFGR_TM BIT(0) + +#define GICV5_IRS_PE_SELR_IAFFID GENMASK(15, 0) + +#define GICV5_IRS_PE_STATUSR_V BIT(1) +#define GICV5_IRS_PE_STATUSR_IDLE BIT(0) + +#define GICV5_IRS_PE_CR0_DPS BIT(0) + +#define GICV5_IRS_IST_STATUSR_IDLE BIT(0) + +#define GICV5_IRS_IST_CFGR_STRUCTURE BIT(16) +#define GICV5_IRS_IST_CFGR_ISTSZ GENMASK(8, 7) +#define GICV5_IRS_IST_CFGR_L2SZ GENMASK(6, 5) +#define GICV5_IRS_IST_CFGR_LPI_ID_BITS GENMASK(4, 0) + +#define GICV5_IRS_IST_CFGR_STRUCTURE_LINEAR 0b0 +#define GICV5_IRS_IST_CFGR_STRUCTURE_TWO_LEVEL 0b1 + +#define GICV5_IRS_IST_CFGR_ISTSZ_4 0b00 +#define GICV5_IRS_IST_CFGR_ISTSZ_8 0b01 +#define GICV5_IRS_IST_CFGR_ISTSZ_16 0b10 + +#define GICV5_IRS_IST_CFGR_L2SZ_4K 0b00 +#define GICV5_IRS_IST_CFGR_L2SZ_16K 0b01 +#define GICV5_IRS_IST_CFGR_L2SZ_64K 0b10 + +#define GICV5_IRS_IST_BASER_ADDR_MASK GENMASK_ULL(55, 6) +#define GICV5_IRS_IST_BASER_VALID BIT_ULL(0) + +#define GICV5_IRS_MAP_L2_ISTR_ID GENMASK(23, 0) + +#define GICV5_ISTL1E_VALID BIT_ULL(0) + +#define GICV5_ISTL1E_L2_ADDR_MASK GENMASK_ULL(55, 12) + +/* + * ITS registers and tables structures + */ +#define GICV5_ITS_IDR1 0x0004 +#define GICV5_ITS_IDR2 0x0008 +#define GICV5_ITS_CR0 0x0080 +#define GICV5_ITS_CR1 0x0084 +#define GICV5_ITS_DT_BASER 0x00c0 +#define GICV5_ITS_DT_CFGR 0x00d0 +#define GICV5_ITS_DIDR 0x0100 +#define GICV5_ITS_EIDR 0x0108 +#define GICV5_ITS_INV_EVENTR 0x010c +#define GICV5_ITS_INV_DEVICER 0x0110 +#define GICV5_ITS_STATUSR 0x0120 +#define GICV5_ITS_SYNCR 0x0140 +#define GICV5_ITS_SYNC_STATUSR 0x0148 + +#define GICV5_ITS_IDR1_L2SZ GENMASK(10, 8) +#define GICV5_ITS_IDR1_ITT_LEVELS BIT(7) +#define GICV5_ITS_IDR1_DT_LEVELS BIT(6) +#define GICV5_ITS_IDR1_DEVICEID_BITS GENMASK(5, 0) + +#define GICV5_ITS_IDR1_L2SZ_SUPPORT_4KB(r) FIELD_GET(BIT(8), (r)) +#define GICV5_ITS_IDR1_L2SZ_SUPPORT_16KB(r) FIELD_GET(BIT(9), (r)) +#define GICV5_ITS_IDR1_L2SZ_SUPPORT_64KB(r) FIELD_GET(BIT(10), (r)) + +#define GICV5_ITS_IDR2_XDMN_EVENTs GENMASK(6, 5) +#define GICV5_ITS_IDR2_EVENTID_BITS GENMASK(4, 0) + +#define GICV5_ITS_CR0_IDLE BIT(1) +#define GICV5_ITS_CR0_ITSEN BIT(0) + +#define GICV5_ITS_CR1_ITT_RA BIT(7) +#define GICV5_ITS_CR1_DT_RA BIT(6) +#define GICV5_ITS_CR1_IC GENMASK(5, 4) +#define GICV5_ITS_CR1_OC GENMASK(3, 2) +#define GICV5_ITS_CR1_SH GENMASK(1, 0) + +#define GICV5_ITS_DT_CFGR_STRUCTURE BIT(16) +#define GICV5_ITS_DT_CFGR_L2SZ GENMASK(7, 6) +#define GICV5_ITS_DT_CFGR_DEVICEID_BITS GENMASK(5, 0) + +#define GICV5_ITS_DT_BASER_ADDR_MASK GENMASK_ULL(55, 3) + +#define GICV5_ITS_INV_DEVICER_I BIT(31) +#define GICV5_ITS_INV_DEVICER_EVENTID_BITS GENMASK(5, 1) +#define GICV5_ITS_INV_DEVICER_L1 BIT(0) + +#define GICV5_ITS_DIDR_DEVICEID GENMASK_ULL(31, 0) + +#define GICV5_ITS_EIDR_EVENTID GENMASK(15, 0) + +#define GICV5_ITS_INV_EVENTR_I BIT(31) +#define GICV5_ITS_INV_EVENTR_ITT_L2SZ GENMASK(2, 1) +#define GICV5_ITS_INV_EVENTR_L1 BIT(0) + +#define GICV5_ITS_STATUSR_IDLE BIT(0) + +#define GICV5_ITS_SYNCR_SYNC BIT_ULL(63) +#define GICV5_ITS_SYNCR_SYNCALL BIT_ULL(32) +#define GICV5_ITS_SYNCR_DEVICEID GENMASK_ULL(31, 0) + +#define GICV5_ITS_SYNC_STATUSR_IDLE BIT(0) + +#define GICV5_DTL1E_VALID BIT_ULL(0) +/* Note that there is no shift for the address by design */ +#define GICV5_DTL1E_L2_ADDR_MASK GENMASK_ULL(55, 3) +#define GICV5_DTL1E_SPAN GENMASK_ULL(63, 60) + +#define GICV5_DTL2E_VALID BIT_ULL(0) +#define GICV5_DTL2E_ITT_L2SZ GENMASK_ULL(2, 1) +/* Note that there is no shift for the address by design */ +#define GICV5_DTL2E_ITT_ADDR_MASK GENMASK_ULL(55, 3) +#define GICV5_DTL2E_ITT_DSWE BIT_ULL(57) +#define GICV5_DTL2E_ITT_STRUCTURE BIT_ULL(58) +#define GICV5_DTL2E_EVENT_ID_BITS GENMASK_ULL(63, 59) + +#define GICV5_ITTL1E_VALID BIT_ULL(0) +/* Note that there is no shift for the address by design */ +#define GICV5_ITTL1E_L2_ADDR_MASK GENMASK_ULL(55, 3) +#define GICV5_ITTL1E_SPAN GENMASK_ULL(63, 60) + +#define GICV5_ITTL2E_LPI_ID GENMASK_ULL(23, 0) +#define GICV5_ITTL2E_DAC GENMASK_ULL(29, 28) +#define GICV5_ITTL2E_VIRTUAL BIT_ULL(30) +#define GICV5_ITTL2E_VALID BIT_ULL(31) +#define GICV5_ITTL2E_VM_ID GENMASK_ULL(47, 32) + +#define GICV5_ITS_DT_ITT_CFGR_L2SZ_4k 0b00 +#define GICV5_ITS_DT_ITT_CFGR_L2SZ_16k 0b01 +#define GICV5_ITS_DT_ITT_CFGR_L2SZ_64k 0b10 + +#define GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR 0 +#define GICV5_ITS_DT_ITT_CFGR_STRUCTURE_TWO_LEVEL 1 + +#define GICV5_ITS_HWIRQ_DEVICE_ID GENMASK_ULL(31, 0) +#define GICV5_ITS_HWIRQ_EVENT_ID GENMASK_ULL(63, 32) + +/* + * IWB registers + */ +#define GICV5_IWB_IDR0 0x0000 +#define GICV5_IWB_CR0 0x0080 +#define GICV5_IWB_WENABLE_STATUSR 0x00c0 +#define GICV5_IWB_WENABLER 0x2000 +#define GICV5_IWB_WTMR 0x4000 + +#define GICV5_IWB_IDR0_INT_DOMS GENMASK(14, 11) +#define GICV5_IWB_IDR0_IW_RANGE GENMASK(10, 0) + +#define GICV5_IWB_CR0_IDLE BIT(1) +#define GICV5_IWB_CR0_IWBEN BIT(0) + +#define GICV5_IWB_WENABLE_STATUSR_IDLE BIT(0) + +/* + * Global Data structures and functions + */ +struct gicv5_chip_data { + struct fwnode_handle *fwnode; + struct irq_domain *ppi_domain; + struct irq_domain *spi_domain; + struct irq_domain *lpi_domain; + struct irq_domain *ipi_domain; + u32 global_spi_count; + u8 cpuif_pri_bits; + u8 cpuif_id_bits; + u8 irs_pri_bits; + struct { + __le64 *l1ist_addr; + u32 l2_size; + u8 l2_bits; + bool l2; + } ist; +}; + +extern struct gicv5_chip_data gicv5_global_data __read_mostly; + +struct gicv5_irs_chip_data { + struct list_head entry; + struct fwnode_handle *fwnode; + void __iomem *irs_base; + u32 flags; + u32 spi_min; + u32 spi_range; + raw_spinlock_t spi_config_lock; +}; + +static inline int gicv5_wait_for_op_s_atomic(void __iomem *addr, u32 offset, + const char *reg_s, u32 mask, + u32 *val) +{ + void __iomem *reg = addr + offset; + u32 tmp; + int ret; + + ret = readl_poll_timeout_atomic(reg, tmp, tmp & mask, 1, 10 * USEC_PER_MSEC); + if (unlikely(ret == -ETIMEDOUT)) { + pr_err_ratelimited("%s timeout...\n", reg_s); + return ret; + } + + if (val) + *val = tmp; + + return 0; +} + +static inline int gicv5_wait_for_op_s(void __iomem *addr, u32 offset, + const char *reg_s, u32 mask) +{ + void __iomem *reg = addr + offset; + u32 val; + int ret; + + ret = readl_poll_timeout(reg, val, val & mask, 1, 10 * USEC_PER_MSEC); + if (unlikely(ret == -ETIMEDOUT)) { + pr_err_ratelimited("%s timeout...\n", reg_s); + return ret; + } + + return 0; +} + +#define gicv5_wait_for_op_atomic(base, reg, mask, val) \ + gicv5_wait_for_op_s_atomic(base, reg, #reg, mask, val) + +#define gicv5_wait_for_op(base, reg, mask) \ + gicv5_wait_for_op_s(base, reg, #reg, mask) + +void __init gicv5_init_lpi_domain(void); +void __init gicv5_free_lpi_domain(void); + +int gicv5_irs_of_probe(struct device_node *parent); +void gicv5_irs_remove(void); +int gicv5_irs_enable(void); +void gicv5_irs_its_probe(void); +int gicv5_irs_register_cpu(int cpuid); +int gicv5_irs_cpu_to_iaffid(int cpu_id, u16 *iaffid); +struct gicv5_irs_chip_data *gicv5_irs_lookup_by_spi_id(u32 spi_id); +int gicv5_spi_irq_set_type(struct irq_data *d, unsigned int type); +int gicv5_irs_iste_alloc(u32 lpi); +void gicv5_irs_syncr(void); + +struct gicv5_its_devtab_cfg { + union { + struct { + __le64 *devtab; + } linear; + struct { + __le64 *l1devtab; + __le64 **l2ptrs; + } l2; + }; + u32 cfgr; +}; + +struct gicv5_its_itt_cfg { + union { + struct { + __le64 *itt; + unsigned int num_ents; + } linear; + struct { + __le64 *l1itt; + __le64 **l2ptrs; + unsigned int num_l1_ents; + u8 l2sz; + } l2; + }; + u8 event_id_bits; + bool l2itt; +}; + +void gicv5_init_lpis(u32 max); +void gicv5_deinit_lpis(void); + +int gicv5_alloc_lpi(void); +void gicv5_free_lpi(u32 lpi); + +void __init gicv5_its_of_probe(struct device_node *parent); +#endif diff --git a/include/linux/irqchip/arm-vgic-info.h b/include/linux/irqchip/arm-vgic-info.h index a75b2c7de69d..ca1713fac6e3 100644 --- a/include/linux/irqchip/arm-vgic-info.h +++ b/include/linux/irqchip/arm-vgic-info.h @@ -15,6 +15,8 @@ enum gic_type { GIC_V2, /* Full GICv3, optionally with v2 compat */ GIC_V3, + /* Full GICv5, optionally with v3 compat */ + GIC_V5, }; struct gic_kvm_info { @@ -34,6 +36,8 @@ struct gic_kvm_info { bool has_v4_1; /* Deactivation impared, subpar stuff */ bool no_hw_deactivation; + /* v3 compat support (GICv5 hosts, only) */ + bool has_gcie_v3_compat; }; #ifdef CONFIG_KVM diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 7387d183029b..4a86e6b915dd 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -212,6 +212,9 @@ enum { /* Address and data pair is mutable when irq_set_affinity() */ IRQ_DOMAIN_FLAG_MSI_IMMUTABLE = (1 << 11), + /* IRQ domain requires parent fwnode matching */ + IRQ_DOMAIN_FLAG_FWNODE_PARENT = (1 << 12), + /* * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved * for implementation specific purposes and ignored by the @@ -279,6 +282,7 @@ struct irq_domain_chip_generic_info; * domains are added using same fwnode * @ops: Domain operation callbacks * @host_data: Controller private data pointer + * @dev: Device which creates the domain * @dgc_info: Geneneric chip information structure pointer used to * create generic chips for the domain if not NULL. * @init: Function called when the domain is created. @@ -298,6 +302,7 @@ struct irq_domain_info { const char *name_suffix; const struct irq_domain_ops *ops; void *host_data; + struct device *dev; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY /** * @parent: Pointer to the parent irq domain used in a hierarchy domain diff --git a/include/linux/ism.h b/include/linux/ism.h index 5428edd90982..8358b4cd7ba6 100644 --- a/include/linux/ism.h +++ b/include/linux/ism.h @@ -28,6 +28,7 @@ struct ism_dmb { struct ism_dev { spinlock_t lock; /* protects the ism device */ + spinlock_t cmd_lock; /* serializes cmds */ struct list_head list; struct pci_dev *pdev; diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 1cce1f6410a9..989315dabb86 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -373,9 +373,9 @@ ftrace_vprintk(const char *fmt, va_list ap) static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } #endif /* CONFIG_TRACING */ -/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */ -#ifdef CONFIG_FTRACE_MCOUNT_RECORD -# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD +/* Rebuild everything on CONFIG_DYNAMIC_FTRACE */ +#ifdef CONFIG_DYNAMIC_FTRACE +# define REBUILD_DUE_TO_DYNAMIC_FTRACE #endif /* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */ diff --git a/include/linux/kmod.h b/include/linux/kmod.h index 68f69362d427..9a07c3215389 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h @@ -14,10 +14,7 @@ #include <linux/workqueue.h> #include <linux/sysctl.h> -#define KMOD_PATH_LEN 256 - #ifdef CONFIG_MODULES -extern char modprobe_path[]; /* for sysctl */ /* modprobe exit status on success, -ve on error. Return value * usually useless though. */ extern __printf(2, 3) diff --git a/include/linux/stackleak.h b/include/linux/kstack_erase.h index 3be2cb564710..bf3bf1905557 100644 --- a/include/linux/stackleak.h +++ b/include/linux/kstack_erase.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_STACKLEAK_H -#define _LINUX_STACKLEAK_H +#ifndef _LINUX_KSTACK_ERASE_H +#define _LINUX_KSTACK_ERASE_H #include <linux/sched.h> #include <linux/sched/task_stack.h> @@ -9,10 +9,10 @@ * Check that the poison value points to the unused hole in the * virtual memory map for your platform. */ -#define STACKLEAK_POISON -0xBEEF -#define STACKLEAK_SEARCH_DEPTH 128 +#define KSTACK_ERASE_POISON -0xBEEF +#define KSTACK_ERASE_SEARCH_DEPTH 128 -#ifdef CONFIG_GCC_PLUGIN_STACKLEAK +#ifdef CONFIG_KSTACK_ERASE #include <asm/stacktrace.h> #include <linux/linkage.h> @@ -50,7 +50,7 @@ stackleak_task_high_bound(const struct task_struct *tsk) static __always_inline unsigned long stackleak_find_top_of_poison(const unsigned long low, const unsigned long high) { - const unsigned int depth = STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long); + const unsigned int depth = KSTACK_ERASE_SEARCH_DEPTH / sizeof(unsigned long); unsigned int poison_count = 0; unsigned long poison_high = high; unsigned long sp = high; @@ -58,7 +58,7 @@ stackleak_find_top_of_poison(const unsigned long low, const unsigned long high) while (sp > low && poison_count < depth) { sp -= sizeof(unsigned long); - if (*(unsigned long *)sp == STACKLEAK_POISON) { + if (*(unsigned long *)sp == KSTACK_ERASE_POISON) { poison_count++; } else { poison_count = 0; @@ -72,7 +72,7 @@ stackleak_find_top_of_poison(const unsigned long low, const unsigned long high) static inline void stackleak_task_init(struct task_struct *t) { t->lowest_stack = stackleak_task_low_bound(t); -# ifdef CONFIG_STACKLEAK_METRICS +# ifdef CONFIG_KSTACK_ERASE_METRICS t->prev_lowest_stack = t->lowest_stack; # endif } @@ -80,9 +80,9 @@ static inline void stackleak_task_init(struct task_struct *t) asmlinkage void noinstr stackleak_erase(void); asmlinkage void noinstr stackleak_erase_on_task_stack(void); asmlinkage void noinstr stackleak_erase_off_task_stack(void); -void __no_caller_saved_registers noinstr stackleak_track_stack(void); +void __no_caller_saved_registers noinstr __sanitizer_cov_stack_depth(void); -#else /* !CONFIG_GCC_PLUGIN_STACKLEAK */ +#else /* !CONFIG_KSTACK_ERASE */ static inline void stackleak_task_init(struct task_struct *t) { } #endif diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h index da4d9b5f58f1..eb10d87adf7d 100644 --- a/include/linux/kvm_dirty_ring.h +++ b/include/linux/kvm_dirty_ring.h @@ -49,9 +49,10 @@ static inline int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *r } static inline int kvm_dirty_ring_reset(struct kvm *kvm, - struct kvm_dirty_ring *ring) + struct kvm_dirty_ring *ring, + int *nr_entries_reset) { - return 0; + return -ENOENT; } static inline void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, @@ -77,17 +78,8 @@ bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm); u32 kvm_dirty_ring_get_rsvd_entries(struct kvm *kvm); int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *ring, int index, u32 size); - -/* - * called with kvm->slots_lock held, returns the number of - * processed pages. - */ -int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring); - -/* - * returns =0: successfully pushed - * <0: unable to push, need to wait - */ +int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring, + int *nr_entries_reset); void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset); bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 3bde4fb5c6aa..15656b7fba6c 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -190,6 +190,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 +#define KVM_PIT_IRQ_SOURCE_ID 2 extern struct mutex kvm_lock; extern struct list_head vm_list; @@ -1022,16 +1023,12 @@ void kvm_unlock_all_vcpus(struct kvm *kvm); void vcpu_load(struct kvm_vcpu *vcpu); void vcpu_put(struct kvm_vcpu *vcpu); -#ifdef __KVM_HAVE_IOAPIC +#ifdef CONFIG_KVM_IOAPIC void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm); -void kvm_arch_post_irq_routing_update(struct kvm *kvm); #else static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) { } -static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) -{ -} #endif #ifdef CONFIG_HAVE_KVM_IRQCHIP @@ -1693,24 +1690,6 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) return false; } #endif -#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE -void kvm_arch_start_assignment(struct kvm *kvm); -void kvm_arch_end_assignment(struct kvm *kvm); -bool kvm_arch_has_assigned_device(struct kvm *kvm); -#else -static inline void kvm_arch_start_assignment(struct kvm *kvm) -{ -} - -static inline void kvm_arch_end_assignment(struct kvm *kvm) -{ -} - -static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm) -{ - return false; -} -#endif static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu) { @@ -1788,8 +1767,6 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); void kvm_unregister_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); -int kvm_request_irq_source_id(struct kvm *kvm); -void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); /* @@ -2406,6 +2383,8 @@ struct kvm_vcpu *kvm_get_running_vcpu(void); struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); #if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS) +struct kvm_kernel_irqfd; + bool kvm_arch_has_irq_bypass(void); int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, struct irq_bypass_producer *); @@ -2413,10 +2392,9 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, struct irq_bypass_producer *); void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); -int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, - uint32_t guest_irq, bool set); -bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *, - struct kvm_kernel_irq_routing_entry *); +void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, + struct kvm_kernel_irq_routing_entry *old, + struct kvm_kernel_irq_routing_entry *new); #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h index 8ad43692e3bb..ef8c134ded8a 100644 --- a/include/linux/kvm_irqfd.h +++ b/include/linux/kvm_irqfd.h @@ -55,10 +55,13 @@ struct kvm_kernel_irqfd { /* Used for setup/shutdown */ struct eventfd_ctx *eventfd; struct list_head list; - poll_table pt; struct work_struct shutdown; struct irq_bypass_consumer consumer; struct irq_bypass_producer *producer; + + struct kvm_vcpu *irq_bypass_vcpu; + struct list_head vcpu_list; + void *irq_bypass_data; }; #endif /* __LINUX_KVM_IRQFD_H */ diff --git a/include/linux/libata.h b/include/linux/libata.h index 1e5aec839041..912ace523880 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -144,7 +144,6 @@ enum { ATA_DFLAG_DEVSLP = (1 << 27), /* device supports Device Sleep */ ATA_DFLAG_ACPI_DISABLED = (1 << 28), /* ACPI for the device is disabled */ ATA_DFLAG_D_SENSE = (1 << 29), /* Descriptor sense requested */ - ATA_DFLAG_ZAC = (1 << 30), /* ZAC device */ ATA_DFLAG_FEATURES_MASK = (ATA_DFLAG_TRUSTED | ATA_DFLAG_DA | \ ATA_DFLAG_DEVSLP | ATA_DFLAG_NCQ_SEND_RECV | \ @@ -500,16 +499,28 @@ enum ata_completion_errors { }; /* - * Link power management policy: If you alter this, you also need to - * alter libata-sata.c (for the ascii descriptions) + * Link Power Management (LPM) policies. + * + * The default LPM policy to use for a device link is defined using these values + * with the CONFIG_SATA_MOBILE_LPM_POLICY config option and applied through the + * target_lpm_policy field of struct ata_port. + * + * If you alter this, you also need to alter the policy names used with the + * sysfs attribute link_power_management_policy defined in libata-sata.c. */ enum ata_lpm_policy { + /* Keep firmware settings */ ATA_LPM_UNKNOWN, + /* No power savings (maximum performance) */ ATA_LPM_MAX_POWER, + /* HIPM (Partial) */ ATA_LPM_MED_POWER, - ATA_LPM_MED_POWER_WITH_DIPM, /* Med power + DIPM as win IRST does */ - ATA_LPM_MIN_POWER_WITH_PARTIAL, /* Min Power + partial and slumber */ - ATA_LPM_MIN_POWER, /* Min power + no partial (slumber only) */ + /* HIPM (Partial) and DIPM (Partial and Slumber) */ + ATA_LPM_MED_POWER_WITH_DIPM, + /* HIPM (Partial and DevSleep) and DIPM (Partial and Slumber) */ + ATA_LPM_MIN_POWER_WITH_PARTIAL, + /* HIPM (Slumber and DevSleep) and DIPM (Partial and Slumber) */ + ATA_LPM_MIN_POWER, }; enum ata_lpm_hints { @@ -750,6 +761,9 @@ struct ata_device { u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ } ____cacheline_aligned; + /* General Purpose Log Directory log page */ + u8 gp_log_dir[ATA_SECT_SIZE] ____cacheline_aligned; + /* DEVSLP Timing Variables from Identify Device Data Log */ u8 devslp_timing[ATA_LOG_DEVSLP_SIZE]; @@ -930,6 +944,13 @@ struct ata_port { */ #define ATA_OP_NULL (void *)(unsigned long)(-ENOENT) +struct ata_reset_operations { + ata_prereset_fn_t prereset; + ata_reset_fn_t softreset; + ata_reset_fn_t hardreset; + ata_postreset_fn_t postreset; +}; + struct ata_port_operations { /* * Command execution @@ -956,14 +977,8 @@ struct ata_port_operations { void (*freeze)(struct ata_port *ap); void (*thaw)(struct ata_port *ap); - ata_prereset_fn_t prereset; - ata_reset_fn_t softreset; - ata_reset_fn_t hardreset; - ata_postreset_fn_t postreset; - ata_prereset_fn_t pmp_prereset; - ata_reset_fn_t pmp_softreset; - ata_reset_fn_t pmp_hardreset; - ata_postreset_fn_t pmp_postreset; + struct ata_reset_operations reset; + struct ata_reset_operations pmp_reset; void (*error_handler)(struct ata_port *ap); void (*lost_interrupt)(struct ata_port *ap); void (*post_internal_cmd)(struct ata_queued_cmd *qc); @@ -1204,7 +1219,7 @@ extern int ata_ncq_prio_enabled(struct ata_port *ap, struct scsi_device *sdev, extern int ata_ncq_prio_enable(struct ata_port *ap, struct scsi_device *sdev, bool enable); extern struct ata_device *ata_dev_pair(struct ata_device *adev); -extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); +int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q); @@ -1398,9 +1413,6 @@ extern void ata_eh_thaw_port(struct ata_port *ap); extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); -extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, - ata_reset_fn_t softreset, ata_reset_fn_t hardreset, - ata_postreset_fn_t postreset); extern void ata_std_error_handler(struct ata_port *ap); extern void ata_std_sched_eh(struct ata_port *ap); extern void ata_std_end_eh(struct ata_port *ap); @@ -2138,6 +2150,12 @@ static inline u8 ata_wait_idle(struct ata_port *ap) return status; } +#else /* CONFIG_ATA_SFF */ +static inline int sata_sff_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_ATA_SFF */ #endif /* __LINUX_LIBATA_H__ */ diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h index 16a2ee4f8310..2ba846419524 100644 --- a/include/linux/local_lock.h +++ b/include/linux/local_lock.h @@ -13,13 +13,13 @@ * local_lock - Acquire a per CPU local lock * @lock: The lock variable */ -#define local_lock(lock) __local_lock(lock) +#define local_lock(lock) __local_lock(this_cpu_ptr(lock)) /** * local_lock_irq - Acquire a per CPU local lock and disable interrupts * @lock: The lock variable */ -#define local_lock_irq(lock) __local_lock_irq(lock) +#define local_lock_irq(lock) __local_lock_irq(this_cpu_ptr(lock)) /** * local_lock_irqsave - Acquire a per CPU local lock, save and disable @@ -28,19 +28,19 @@ * @flags: Storage for interrupt flags */ #define local_lock_irqsave(lock, flags) \ - __local_lock_irqsave(lock, flags) + __local_lock_irqsave(this_cpu_ptr(lock), flags) /** * local_unlock - Release a per CPU local lock * @lock: The lock variable */ -#define local_unlock(lock) __local_unlock(lock) +#define local_unlock(lock) __local_unlock(this_cpu_ptr(lock)) /** * local_unlock_irq - Release a per CPU local lock and enable interrupts * @lock: The lock variable */ -#define local_unlock_irq(lock) __local_unlock_irq(lock) +#define local_unlock_irq(lock) __local_unlock_irq(this_cpu_ptr(lock)) /** * local_unlock_irqrestore - Release a per CPU local lock and restore @@ -49,7 +49,7 @@ * @flags: Interrupt flags to restore */ #define local_unlock_irqrestore(lock, flags) \ - __local_unlock_irqrestore(lock, flags) + __local_unlock_irqrestore(this_cpu_ptr(lock), flags) /** * local_lock_init - Runtime initialize a lock instance @@ -64,7 +64,7 @@ * locking constrains it will _always_ fail to acquire the lock in NMI or * HARDIRQ context on PREEMPT_RT. */ -#define local_trylock(lock) __local_trylock(lock) +#define local_trylock(lock) __local_trylock(this_cpu_ptr(lock)) /** * local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable @@ -77,7 +77,7 @@ * HARDIRQ context on PREEMPT_RT. */ #define local_trylock_irqsave(lock, flags) \ - __local_trylock_irqsave(lock, flags) + __local_trylock_irqsave(this_cpu_ptr(lock), flags) DEFINE_GUARD(local_lock, local_lock_t __percpu*, local_lock(_T), @@ -91,10 +91,10 @@ DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu, unsigned long flags) #define local_lock_nested_bh(_lock) \ - __local_lock_nested_bh(_lock) + __local_lock_nested_bh(this_cpu_ptr(_lock)) #define local_unlock_nested_bh(_lock) \ - __local_unlock_nested_bh(_lock) + __local_unlock_nested_bh(this_cpu_ptr(_lock)) DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*, local_lock_nested_bh(_T), diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index 8d5ac16a9b17..d80b5306a2c0 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -99,14 +99,14 @@ do { \ local_trylock_t *tl; \ local_lock_t *l; \ \ - l = (local_lock_t *)this_cpu_ptr(lock); \ + l = (local_lock_t *)(lock); \ tl = (local_trylock_t *)l; \ _Generic((lock), \ - __percpu local_trylock_t *: ({ \ + local_trylock_t *: ({ \ lockdep_assert(tl->acquired == 0); \ WRITE_ONCE(tl->acquired, 1); \ }), \ - __percpu local_lock_t *: (void)0); \ + local_lock_t *: (void)0); \ local_lock_acquire(l); \ } while (0) @@ -133,7 +133,7 @@ do { \ local_trylock_t *tl; \ \ preempt_disable(); \ - tl = this_cpu_ptr(lock); \ + tl = (lock); \ if (READ_ONCE(tl->acquired)) { \ preempt_enable(); \ tl = NULL; \ @@ -150,7 +150,7 @@ do { \ local_trylock_t *tl; \ \ local_irq_save(flags); \ - tl = this_cpu_ptr(lock); \ + tl = (lock); \ if (READ_ONCE(tl->acquired)) { \ local_irq_restore(flags); \ tl = NULL; \ @@ -167,15 +167,15 @@ do { \ local_trylock_t *tl; \ local_lock_t *l; \ \ - l = (local_lock_t *)this_cpu_ptr(lock); \ + l = (local_lock_t *)(lock); \ tl = (local_trylock_t *)l; \ local_lock_release(l); \ _Generic((lock), \ - __percpu local_trylock_t *: ({ \ + local_trylock_t *: ({ \ lockdep_assert(tl->acquired == 1); \ WRITE_ONCE(tl->acquired, 0); \ }), \ - __percpu local_lock_t *: (void)0); \ + local_lock_t *: (void)0); \ } while (0) #define __local_unlock(lock) \ @@ -199,11 +199,11 @@ do { \ #define __local_lock_nested_bh(lock) \ do { \ lockdep_assert_in_softirq(); \ - local_lock_acquire(this_cpu_ptr(lock)); \ + local_lock_acquire((lock)); \ } while (0) #define __local_unlock_nested_bh(lock) \ - local_lock_release(this_cpu_ptr(lock)) + local_lock_release((lock)) #else /* !CONFIG_PREEMPT_RT */ @@ -227,7 +227,7 @@ typedef spinlock_t local_trylock_t; #define __local_lock(__lock) \ do { \ migrate_disable(); \ - spin_lock(this_cpu_ptr((__lock))); \ + spin_lock((__lock)); \ } while (0) #define __local_lock_irq(lock) __local_lock(lock) @@ -241,7 +241,7 @@ typedef spinlock_t local_trylock_t; #define __local_unlock(__lock) \ do { \ - spin_unlock(this_cpu_ptr((__lock))); \ + spin_unlock((__lock)); \ migrate_enable(); \ } while (0) @@ -252,12 +252,12 @@ typedef spinlock_t local_trylock_t; #define __local_lock_nested_bh(lock) \ do { \ lockdep_assert_in_softirq_func(); \ - spin_lock(this_cpu_ptr(lock)); \ + spin_lock((lock)); \ } while (0) #define __local_unlock_nested_bh(lock) \ do { \ - spin_unlock(this_cpu_ptr((lock))); \ + spin_unlock((lock)); \ } while (0) #define __local_trylock(lock) \ @@ -268,7 +268,7 @@ do { \ __locked = 0; \ } else { \ migrate_disable(); \ - __locked = spin_trylock(this_cpu_ptr((lock))); \ + __locked = spin_trylock((lock)); \ if (!__locked) \ migrate_enable(); \ } \ diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h index 9f361d3ab9d9..eae115a26488 100644 --- a/include/linux/lockdep_types.h +++ b/include/linux/lockdep_types.h @@ -175,7 +175,7 @@ struct lock_class_stats { unsigned long bounces[nr_bounce_types]; }; -struct lock_class_stats lock_stats(struct lock_class *class); +void lock_stats(struct lock_class *class, struct lock_class_stats *stats); void clear_lock_stats(struct lock_class *class); #endif diff --git a/include/linux/log2.h b/include/linux/log2.h index 1366cb688a6d..2eac3fc9303d 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -255,4 +255,18 @@ int __bits_per(unsigned long n) ) : \ __bits_per(n) \ ) + +/** + * max_pow_of_two_factor - return highest power-of-2 factor + * @n: parameter + * + * find highest power-of-2 which is evenly divisible into n. + * 0 is returned for n == 0 or 1. + */ +static inline __attribute__((const)) +unsigned int max_pow_of_two_factor(unsigned int n) +{ + return n & -n; +} + #endif /* _LINUX_LOG2_H */ diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index bf3bbac4e02a..fd11fffdd3c3 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -157,6 +157,8 @@ LSM_HOOK(int, 0, inode_removexattr, struct mnt_idmap *idmap, struct dentry *dentry, const char *name) LSM_HOOK(void, LSM_RET_VOID, inode_post_removexattr, struct dentry *dentry, const char *name) +LSM_HOOK(int, 0, inode_file_setattr, struct dentry *dentry, struct file_kattr *fa) +LSM_HOOK(int, 0, inode_file_getattr, struct dentry *dentry, struct file_kattr *fa) LSM_HOOK(int, 0, inode_set_acl, struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name, struct posix_acl *kacl) LSM_HOOK(void, LSM_RET_VOID, inode_post_set_acl, struct dentry *dentry, diff --git a/include/linux/mc33xs2410.h b/include/linux/mc33xs2410.h new file mode 100644 index 000000000000..31c0edf10dd7 --- /dev/null +++ b/include/linux/mc33xs2410.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Liebherr-Electronics and Drives GmbH + */ +#ifndef _MC33XS2410_H +#define _MC33XS2410_H + +#include <linux/spi/spi.h> + +MODULE_IMPORT_NS("PWM_MC33XS2410"); + +int mc33xs2410_read_reg_ctrl(struct spi_device *spi, u8 reg, u16 *val); +int mc33xs2410_read_reg_diag(struct spi_device *spi, u8 reg, u16 *val); +int mc33xs2410_modify_reg(struct spi_device *spi, u8 reg, u8 mask, u8 val); + +#endif /* _MC33XS2410_H */ diff --git a/include/linux/mdio.h b/include/linux/mdio.h index e43ff9f980a4..c640ba44dd6e 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -95,7 +95,6 @@ void mdio_device_remove(struct mdio_device *mdiodev); void mdio_device_reset(struct mdio_device *mdiodev, int value); int mdio_driver_register(struct mdio_driver *drv); void mdio_driver_unregister(struct mdio_driver *drv); -int mdio_device_bus_match(struct device *dev, const struct device_driver *drv); static inline void mdio_device_get(struct mdio_device *mdiodev) { diff --git a/include/linux/memblock.h b/include/linux/memblock.h index bb19a2534224..b96746376e17 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -463,7 +463,7 @@ static inline void *memblock_alloc_raw(phys_addr_t size, NUMA_NO_NODE); } -static inline void *memblock_alloc_from(phys_addr_t size, +static __always_inline void *memblock_alloc_from(phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) { diff --git a/include/linux/mfd/adp5585.h b/include/linux/mfd/adp5585.h index 016033cd68e4..5237da6b4a9f 100644 --- a/include/linux/mfd/adp5585.h +++ b/include/linux/mfd/adp5585.h @@ -10,13 +10,20 @@ #define __MFD_ADP5585_H_ #include <linux/bits.h> +#include <linux/notifier.h> #define ADP5585_ID 0x00 #define ADP5585_MAN_ID_VALUE 0x20 #define ADP5585_MAN_ID_MASK GENMASK(7, 4) +#define ADP5585_REV_ID_MASK GENMASK(3, 0) #define ADP5585_INT_STATUS 0x01 +#define ADP5585_OVRFLOW_INT BIT(2) +#define ADP5585_EVENT_INT BIT(0) #define ADP5585_STATUS 0x02 +#define ADP5585_EC_MASK GENMASK(4, 0) #define ADP5585_FIFO_1 0x03 +#define ADP5585_KEV_EV_PRESS_MASK BIT(7) +#define ADP5585_KEY_EVENT_MASK GENMASK(6, 0) #define ADP5585_FIFO_2 0x04 #define ADP5585_FIFO_3 0x05 #define ADP5585_FIFO_4 0x06 @@ -32,6 +39,7 @@ #define ADP5585_FIFO_14 0x10 #define ADP5585_FIFO_15 0x11 #define ADP5585_FIFO_16 0x12 +#define ADP5585_EV_MAX (ADP5585_FIFO_16 - ADP5585_FIFO_1 + 1) #define ADP5585_GPI_INT_STAT_A 0x13 #define ADP5585_GPI_INT_STAT_B 0x14 #define ADP5585_GPI_STATUS_A 0x15 @@ -60,6 +68,7 @@ #define ADP5585_GPIO_DIRECTION_A 0x27 #define ADP5585_GPIO_DIRECTION_B 0x28 #define ADP5585_RESET1_EVENT_A 0x29 +#define ADP5585_RESET_EV_PRESS BIT(7) #define ADP5585_RESET1_EVENT_B 0x2a #define ADP5585_RESET1_EVENT_C 0x2b #define ADP5585_RESET2_EVENT_A 0x2c @@ -104,23 +113,114 @@ #define ADP5585_INT_CFG BIT(1) #define ADP5585_RST_CFG BIT(0) #define ADP5585_INT_EN 0x3c +#define ADP5585_OVRFLOW_IEN BIT(2) +#define ADP5585_EVENT_IEN BIT(0) #define ADP5585_MAX_REG ADP5585_INT_EN -/* - * Bank 0 covers pins "GPIO 1/R0" to "GPIO 6/R5", numbered 0 to 5 by the - * driver, and bank 1 covers pins "GPIO 7/C0" to "GPIO 11/C4", numbered 6 to - * 10. Some variants of the ADP5585 don't support "GPIO 6/R5". As the driver - * uses identical GPIO numbering for all variants to avoid confusion, GPIO 5 is - * marked as reserved in the device tree for variants that don't support it. - */ -#define ADP5585_BANK(n) ((n) >= 6 ? 1 : 0) -#define ADP5585_BIT(n) ((n) >= 6 ? BIT((n) - 6) : BIT(n)) +#define ADP5585_PIN_MAX 11 +#define ADP5585_MAX_UNLOCK_TIME_SEC 7 +#define ADP5585_KEY_EVENT_START 1 +#define ADP5585_KEY_EVENT_END 25 +#define ADP5585_GPI_EVENT_START 37 +#define ADP5585_GPI_EVENT_END 47 +#define ADP5585_ROW5_KEY_EVENT_START 1 +#define ADP5585_ROW5_KEY_EVENT_END 30 +#define ADP5585_PWM_OUT 3 +#define ADP5585_RESET1_OUT 4 +#define ADP5585_RESET2_OUT 9 +#define ADP5585_ROW5 5 + +/* ADP5589 */ +#define ADP5589_MAN_ID_VALUE 0x10 +#define ADP5589_GPI_STATUS_A 0x16 +#define ADP5589_GPI_STATUS_C 0x18 +#define ADP5589_RPULL_CONFIG_A 0x19 +#define ADP5589_GPI_INT_LEVEL_A 0x1e +#define ADP5589_GPI_EVENT_EN_A 0x21 +#define ADP5589_DEBOUNCE_DIS_A 0x27 +#define ADP5589_GPO_DATA_OUT_A 0x2a +#define ADP5589_GPO_OUT_MODE_A 0x2d +#define ADP5589_GPIO_DIRECTION_A 0x30 +#define ADP5589_UNLOCK1 0x33 +#define ADP5589_UNLOCK_EV_PRESS BIT(7) +#define ADP5589_UNLOCK_TIMERS 0x36 +#define ADP5589_UNLOCK_TIMER GENMASK(2, 0) +#define ADP5589_LOCK_CFG 0x37 +#define ADP5589_LOCK_EN BIT(0) +#define ADP5589_RESET1_EVENT_A 0x38 +#define ADP5589_RESET2_EVENT_A 0x3B +#define ADP5589_RESET_CFG 0x3D +#define ADP5585_RESET2_POL BIT(7) +#define ADP5585_RESET1_POL BIT(6) +#define ADP5585_RST_PASSTHRU_EN BIT(5) +#define ADP5585_RESET_TRIG_TIME GENMASK(4, 2) +#define ADP5585_PULSE_WIDTH GENMASK(1, 0) +#define ADP5589_PWM_OFFT_LOW 0x3e +#define ADP5589_PWM_ONT_LOW 0x40 +#define ADP5589_PWM_CFG 0x42 +#define ADP5589_POLL_PTIME_CFG 0x48 +#define ADP5589_PIN_CONFIG_A 0x49 +#define ADP5589_PIN_CONFIG_D 0x4C +#define ADP5589_GENERAL_CFG 0x4d +#define ADP5589_INT_EN 0x4e +#define ADP5589_MAX_REG ADP5589_INT_EN + +#define ADP5589_PIN_MAX 19 +#define ADP5589_KEY_EVENT_START 1 +#define ADP5589_KEY_EVENT_END 88 +#define ADP5589_GPI_EVENT_START 97 +#define ADP5589_GPI_EVENT_END 115 +#define ADP5589_UNLOCK_WILDCARD 127 +#define ADP5589_RESET2_OUT 12 struct regmap; +enum adp5585_variant { + ADP5585_00 = 1, + ADP5585_01, + ADP5585_02, + ADP5585_03, + ADP5585_04, + ADP5589_00, + ADP5589_01, + ADP5589_02, + ADP5585_MAX +}; + +struct adp5585_regs { + unsigned int gen_cfg; + unsigned int ext_cfg; + unsigned int int_en; + unsigned int poll_ptime_cfg; + unsigned int reset_cfg; + unsigned int reset1_event_a; + unsigned int reset2_event_a; + unsigned int pin_cfg_a; +}; + struct adp5585_dev { + struct device *dev; struct regmap *regmap; + const struct adp5585_regs *regs; + struct blocking_notifier_head event_notifier; + unsigned long *pin_usage; + unsigned int n_pins; + unsigned int reset2_out; + enum adp5585_variant variant; + unsigned int id; + bool has_unlock; + bool has_pin6; + int irq; + unsigned int ev_poll_time; + unsigned int unlock_time; + unsigned int unlock_keys[2]; + unsigned int nkeys_unlock; + unsigned int reset1_keys[3]; + unsigned int nkeys_reset1; + unsigned int reset2_keys[2]; + unsigned int nkeys_reset2; + u8 reset_cfg; }; #endif diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h index 98567623c9df..828362b7860c 100644 --- a/include/linux/mfd/dbx500-prcmu.h +++ b/include/linux/mfd/dbx500-prcmu.h @@ -213,7 +213,7 @@ struct prcmu_fw_version { #if defined(CONFIG_UX500_SOC_DB8500) -static inline void prcmu_early_init(void) +static inline void __init prcmu_early_init(void) { db8500_prcmu_early_init(); } diff --git a/include/linux/mfd/macsmc.h b/include/linux/mfd/macsmc.h new file mode 100644 index 000000000000..6b13f01a8592 --- /dev/null +++ b/include/linux/mfd/macsmc.h @@ -0,0 +1,279 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* + * Apple SMC (System Management Controller) core definitions + * + * Copyright (C) The Asahi Linux Contributors + */ + +#ifndef _LINUX_MFD_MACSMC_H +#define _LINUX_MFD_MACSMC_H + +#include <linux/soc/apple/rtkit.h> + +/** + * typedef smc_key - Alias for u32 to be used for SMC keys + * + * SMC keys are 32bit integers containing packed ASCII characters in natural + * integer order, i.e. 0xAABBCCDD, which represent the FourCC ABCD. + * The SMC driver is designed with this assumption and ensures the right + * endianness is used when these are stored to memory and sent to or received + * from the actual SMC firmware (which can be done in either shared memory or + * as 64bit mailbox message on Apple Silicon). + * Internally, SMC stores these keys in a table sorted lexicographically and + * allows resolving an index into this table to the corresponding SMC key. + * Thus, storing keys as u32 is very convenient as it allows to e.g. use + * normal comparison operators which directly map to the natural order used + * by SMC firmware. + * + * This simple type alias is introduced to allow easy recognition of SMC key + * variables and arguments. + */ +typedef u32 smc_key; + +/** + * SMC_KEY - Convert FourCC SMC keys in source code to smc_key + * + * This macro can be used to easily define FourCC SMC keys in source code + * and convert these to u32 / smc_key, e.g. SMC_KEY(NTAP) will expand to + * 0x4e544150. + * + * @s: FourCC SMC key to be converted + */ +#define SMC_KEY(s) (smc_key)(_SMC_KEY(#s)) +#define _SMC_KEY(s) (((s)[0] << 24) | ((s)[1] << 16) | ((s)[2] << 8) | (s)[3]) + +#define APPLE_SMC_READABLE BIT(7) +#define APPLE_SMC_WRITABLE BIT(6) +#define APPLE_SMC_FUNCTION BIT(4) + +/** + * struct apple_smc_key_info - Information for a SMC key as returned by SMC + * @type_code: FourCC code indicating the type for this key. + * Known types: + * ch8*: ASCII string + * flag: Boolean, 1 or 0 + * flt: 32-bit single-precision IEEE 754 float + * hex: Binary data + * ioft: 64bit Unsigned fixed-point intger (48.16) + * {si,ui}{8,16,32,64}: Signed/Unsigned 8-/16-/32-/64-bit integer + * @size: Size of the buffer associated with this key + * @flags: Bitfield encoding flags (APPLE_SMC_{READABLE,WRITABLE,FUNCTION}) + */ +struct apple_smc_key_info { + u32 type_code; + u8 size; + u8 flags; +}; + +/** + * enum apple_smc_boot_stage - SMC boot stage + * @APPLE_SMC_BOOTING: SMC is booting + * @APPLE_SMC_INITIALIZED: SMC is initialized and ready to use + * @APPLE_SMC_ERROR_NO_SHMEM: Shared memory could not be initialized during boot + * @APPLE_SMC_ERROR_CRASHED: SMC has crashed + */ +enum apple_smc_boot_stage { + APPLE_SMC_BOOTING, + APPLE_SMC_INITIALIZED, + APPLE_SMC_ERROR_NO_SHMEM, + APPLE_SMC_ERROR_CRASHED +}; + +/** + * struct apple_smc + * @dev: Underlying device struct for the physical backend device + * @key_count: Number of available SMC keys + * @first_key: First valid SMC key + * @last_key: Last valid SMC key + * @event_handlers: Notifier call chain for events received from SMC + * @rtk: Pointer to Apple RTKit instance + * @init_done: Completion for initialization + * @boot_stage: Current boot stage of SMC + * @sram: Pointer to SRAM resource + * @sram_base: SRAM base address + * @shmem: RTKit shared memory structure for SRAM + * @msg_id: Current message id for commands, will be incremented for each command + * @atomic_mode: Flag set when atomic mode is entered + * @atomic_pending: Flag indicating pending atomic command + * @cmd_done: Completion for command execution in non-atomic mode + * @cmd_ret: Return value from SMC for last command + * @mutex: Mutex for non-atomic mode + * @lock: Spinlock for atomic mode + */ +struct apple_smc { + struct device *dev; + + u32 key_count; + smc_key first_key; + smc_key last_key; + + struct blocking_notifier_head event_handlers; + + struct apple_rtkit *rtk; + + struct completion init_done; + enum apple_smc_boot_stage boot_stage; + + struct resource *sram; + void __iomem *sram_base; + struct apple_rtkit_shmem shmem; + + unsigned int msg_id; + + bool atomic_mode; + bool atomic_pending; + struct completion cmd_done; + u64 cmd_ret; + + struct mutex mutex; + spinlock_t lock; +}; + +/** + * apple_smc_read - Read size bytes from given SMC key into buf + * @smc: Pointer to apple_smc struct + * @key: smc_key to be read + * @buf: Buffer into which size bytes of data will be read from SMC + * @size: Number of bytes to be read into buf + * + * Return: Zero on success, negative errno on error + */ +int apple_smc_read(struct apple_smc *smc, smc_key key, void *buf, size_t size); + +/** + * apple_smc_write - Write size bytes into given SMC key from buf + * @smc: Pointer to apple_smc struct + * @key: smc_key data will be written to + * @buf: Buffer from which size bytes of data will be written to SMC + * @size: Number of bytes to be written + * + * Return: Zero on success, negative errno on error + */ +int apple_smc_write(struct apple_smc *smc, smc_key key, void *buf, size_t size); + +/** + * apple_smc_enter_atomic - Enter atomic mode to be able to use apple_smc_write_atomic + * @smc: Pointer to apple_smc struct + * + * This function switches the SMC backend to atomic mode which allows the + * use of apple_smc_write_atomic while disabling *all* other functions. + * This is only used for shutdown/reboot which requires writing to a SMC + * key from atomic context. + * + * Return: Zero on success, negative errno on error + */ +int apple_smc_enter_atomic(struct apple_smc *smc); + +/** + * apple_smc_write_atomic - Write size bytes into given SMC key from buf without sleeping + * @smc: Pointer to apple_smc struct + * @key: smc_key data will be written to + * @buf: Buffer from which size bytes of data will be written to SMC + * @size: Number of bytes to be written + * + * Note that this function will fail if apple_smc_enter_atomic hasn't been + * called before. + * + * Return: Zero on success, negative errno on error + */ +int apple_smc_write_atomic(struct apple_smc *smc, smc_key key, void *buf, size_t size); + +/** + * apple_smc_rw - Write and then read using the given SMC key + * @smc: Pointer to apple_smc struct + * @key: smc_key data will be written to + * @wbuf: Buffer from which size bytes of data will be written to SMC + * @wsize: Number of bytes to be written + * @rbuf: Buffer to which size bytes of data will be read from SMC + * @rsize: Number of bytes to be read + * + * Return: Zero on success, negative errno on error + */ +int apple_smc_rw(struct apple_smc *smc, smc_key key, void *wbuf, size_t wsize, + void *rbuf, size_t rsize); + +/** + * apple_smc_get_key_by_index - Given an index return the corresponding SMC key + * @smc: Pointer to apple_smc struct + * @index: Index to be resolved + * @key: Buffer for SMC key to be returned + * + * Return: Zero on success, negative errno on error + */ +int apple_smc_get_key_by_index(struct apple_smc *smc, int index, smc_key *key); + +/** + * apple_smc_get_key_info - Get key information from SMC + * @smc: Pointer to apple_smc struct + * @key: Key to acquire information for + * @info: Pointer to struct apple_smc_key_info which will be filled + * + * Return: Zero on success, negative errno on error + */ +int apple_smc_get_key_info(struct apple_smc *smc, smc_key key, struct apple_smc_key_info *info); + +/** + * apple_smc_key_exists - Check if the given SMC key exists + * @smc: Pointer to apple_smc struct + * @key: smc_key to be checked + * + * Return: True if the key exists, false otherwise + */ +static inline bool apple_smc_key_exists(struct apple_smc *smc, smc_key key) +{ + return apple_smc_get_key_info(smc, key, NULL) >= 0; +} + +#define APPLE_SMC_TYPE_OPS(type) \ + static inline int apple_smc_read_##type(struct apple_smc *smc, smc_key key, type *p) \ + { \ + int ret = apple_smc_read(smc, key, p, sizeof(*p)); \ + return (ret < 0) ? ret : ((ret != sizeof(*p)) ? -EINVAL : 0); \ + } \ + static inline int apple_smc_write_##type(struct apple_smc *smc, smc_key key, type p) \ + { \ + return apple_smc_write(smc, key, &p, sizeof(p)); \ + } \ + static inline int apple_smc_write_##type##_atomic(struct apple_smc *smc, smc_key key, type p) \ + { \ + return apple_smc_write_atomic(smc, key, &p, sizeof(p)); \ + } \ + static inline int apple_smc_rw_##type(struct apple_smc *smc, smc_key key, \ + type w, type *r) \ + { \ + int ret = apple_smc_rw(smc, key, &w, sizeof(w), r, sizeof(*r)); \ + return (ret < 0) ? ret : ((ret != sizeof(*r)) ? -EINVAL : 0); \ + } + +APPLE_SMC_TYPE_OPS(u64) +APPLE_SMC_TYPE_OPS(u32) +APPLE_SMC_TYPE_OPS(u16) +APPLE_SMC_TYPE_OPS(u8) +APPLE_SMC_TYPE_OPS(s64) +APPLE_SMC_TYPE_OPS(s32) +APPLE_SMC_TYPE_OPS(s16) +APPLE_SMC_TYPE_OPS(s8) + +static inline int apple_smc_read_flag(struct apple_smc *smc, smc_key key, bool *flag) +{ + u8 val; + int ret = apple_smc_read_u8(smc, key, &val); + + if (ret < 0) + return ret; + + *flag = val ? true : false; + return ret; +} + +static inline int apple_smc_write_flag(struct apple_smc *smc, smc_key key, bool state) +{ + return apple_smc_write_u8(smc, key, state ? 1 : 0); +} + +static inline int apple_smc_write_flag_atomic(struct apple_smc *smc, smc_key key, bool state) +{ + return apple_smc_write_u8_atomic(smc, key, state ? 1 : 0); +} + +#endif diff --git a/include/linux/mfd/tps6594.h b/include/linux/mfd/tps6594.h index 16543fd4d83e..021db8875963 100644 --- a/include/linux/mfd/tps6594.h +++ b/include/linux/mfd/tps6594.h @@ -19,6 +19,7 @@ enum pmic_id { TPS6593, LP8764, TPS65224, + TPS652G1, }; /* Macro to get page index from register address */ diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 69e110c2b86a..3e6deb00fc85 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -73,9 +73,6 @@ #define RFKILL_MINOR 242 #define MISC_DYNAMIC_MINOR 255 -struct device; -struct attribute_group; - struct miscdevice { int minor; const char *name; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 6822cfa5f4ad..9d2467f982ad 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -280,6 +280,7 @@ enum { MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23, MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE = 1ull << 25, MLX5_MKEY_MASK_FREE = 1ull << 29, + MLX5_MKEY_MASK_PAGE_SIZE_5 = 1ull << 42, MLX5_MKEY_MASK_RELAXED_ORDERING_READ = 1ull << 47, }; diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 939e58c2f386..86055d55836d 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -40,7 +40,7 @@ #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) -#define MLX5_RDMA_TRANSPORT_BYPASS_PRIO 0 +#define MLX5_RDMA_TRANSPORT_BYPASS_PRIO 16 #define MLX5_FS_MAX_POOL_SIZE BIT(30) enum mlx5_flow_destination_type { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 2c09df4ee574..ed4130e49c27 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -420,7 +420,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits { /* Table 2170 - Flow Table Fields Supported 2 Format */ struct mlx5_ifc_flow_table_fields_supported_2_bits { - u8 reserved_at_0[0x2]; + u8 inner_l4_type_ext[0x1]; + u8 outer_l4_type_ext[0x1]; u8 inner_l4_type[0x1]; u8 outer_l4_type[0x1]; u8 reserved_at_4[0xa]; @@ -429,7 +430,11 @@ struct mlx5_ifc_flow_table_fields_supported_2_bits { u8 tunnel_header_0_1[0x1]; u8 reserved_at_11[0xf]; - u8 reserved_at_20[0x60]; + u8 reserved_at_20[0xf]; + u8 ipsec_next_header[0x1]; + u8 reserved_at_30[0x10]; + + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_flow_table_prop_layout_bits { @@ -552,6 +557,13 @@ enum { MLX5_PACKET_L4_TYPE_UDP, }; +enum { + MLX5_PACKET_L4_TYPE_EXT_NONE, + MLX5_PACKET_L4_TYPE_EXT_TCP, + MLX5_PACKET_L4_TYPE_EXT_UDP, + MLX5_PACKET_L4_TYPE_EXT_ICMP, +}; + struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 smac_47_16[0x20]; @@ -578,10 +590,10 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 tcp_dport[0x10]; u8 l4_type[0x2]; - u8 reserved_at_c2[0xe]; + u8 l4_type_ext[0x4]; + u8 reserved_at_c6[0xa]; u8 ipv4_ihl[0x4]; - u8 reserved_at_c4[0x4]; - + u8 reserved_at_d4[0x4]; u8 ttl_hoplimit[0x8]; u8 udp_sport[0x10]; @@ -689,10 +701,9 @@ struct mlx5_ifc_fte_match_set_misc2_bits { u8 metadata_reg_a[0x20]; u8 reserved_at_1a0[0x8]; - u8 macsec_syndrome[0x8]; u8 ipsec_syndrome[0x8]; - u8 reserved_at_1b8[0x8]; + u8 ipsec_next_header[0x8]; u8 reserved_at_1c0[0x40]; }; @@ -1846,7 +1857,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_bf_reg_size[0x5]; - u8 reserved_at_270[0x3]; + u8 disciplined_fr_counter[0x1]; + u8 reserved_at_271[0x2]; u8 qp_error_syndrome[0x1]; u8 reserved_at_274[0x2]; u8 lag_dct[0x2]; @@ -2171,7 +2183,9 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 min_mkey_log_entity_size_fixed_buffer[0x5]; u8 ec_vf_vport_base[0x10]; - u8 reserved_at_3a0[0xa]; + u8 reserved_at_3a0[0x2]; + u8 max_mkey_log_entity_size_fixed_buffer[0x6]; + u8 reserved_at_3a8[0x2]; u8 max_mkey_log_entity_size_mtt[0x6]; u8 max_rqt_vhca_id[0x10]; @@ -9980,6 +9994,10 @@ struct mlx5_ifc_pude_reg_bits { u8 reserved_at_20[0x60]; }; +enum { + MLX5_PTYS_CONNECTOR_TYPE_PORT_DA = 0x7, +}; + struct mlx5_ifc_ptys_reg_bits { u8 reserved_at_0[0x1]; u8 an_disable_admin[0x1]; @@ -10016,7 +10034,8 @@ struct mlx5_ifc_ptys_reg_bits { u8 ib_link_width_oper[0x10]; u8 ib_proto_oper[0x10]; - u8 reserved_at_160[0x1c]; + u8 reserved_at_160[0x8]; + u8 lane_rate_oper[0x14]; u8 connector_type[0x4]; u8 eth_proto_lp_advertise[0x20]; @@ -10460,10 +10479,19 @@ struct mlx5_ifc_pifr_reg_bits { u8 port_filter_update_en[8][0x20]; }; +enum { + MLX5_BUF_OWNERSHIP_UNKNOWN = 0x0, + MLX5_BUF_OWNERSHIP_FW_OWNED = 0x1, + MLX5_BUF_OWNERSHIP_SW_OWNED = 0x2, +}; + struct mlx5_ifc_pfcc_reg_bits { - u8 reserved_at_0[0x8]; + u8 reserved_at_0[0x4]; + u8 buf_ownership[0x2]; + u8 reserved_at_6[0x2]; u8 local_port[0x8]; - u8 reserved_at_10[0xb]; + u8 reserved_at_10[0xa]; + u8 cable_length_mask[0x1]; u8 ppan_mask_n[0x1]; u8 minor_stall_mask[0x1]; u8 critical_stall_mask[0x1]; @@ -10492,7 +10520,10 @@ struct mlx5_ifc_pfcc_reg_bits { u8 device_stall_minor_watermark[0x10]; u8 device_stall_critical_watermark[0x10]; - u8 reserved_at_a0[0x60]; + u8 reserved_at_a0[0x18]; + u8 cable_length[0x8]; + + u8 reserved_at_c0[0x40]; }; struct mlx5_ifc_pelc_reg_bits { @@ -10593,11 +10624,15 @@ struct mlx5_ifc_mtutc_reg_bits { struct mlx5_ifc_pcam_enhanced_features_bits { u8 reserved_at_0[0x10]; u8 ppcnt_recovery_counters[0x1]; - u8 reserved_at_11[0xc]; + u8 reserved_at_11[0x7]; + u8 cable_length[0x1]; + u8 reserved_at_19[0x4]; u8 fec_200G_per_lane_in_pplm[0x1]; u8 reserved_at_1e[0x2a]; u8 fec_100G_per_lane_in_pplm[0x1]; - u8 reserved_at_49[0x1f]; + u8 reserved_at_49[0xa]; + u8 buffer_ownership[0x1]; + u8 resereved_at_54[0x14]; u8 fec_50G_per_lane_in_pplm[0x1]; u8 reserved_at_69[0x4]; u8 rx_icrc_encapsulated_counter[0x1]; @@ -12380,7 +12415,9 @@ struct mlx5_ifc_mtrc_ctrl_bits { struct mlx5_ifc_host_params_context_bits { u8 host_number[0x8]; - u8 reserved_at_8[0x7]; + u8 reserved_at_8[0x5]; + u8 host_pf_not_exist[0x1]; + u8 reserved_at_14[0x1]; u8 host_pf_disabled[0x1]; u8 host_num_of_vfs[0x10]; @@ -12502,17 +12539,6 @@ struct mlx5_ifc_affiliated_event_header_bits { }; enum { - MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT_ULL(0xc), - MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT_ULL(0x13), - MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER = BIT_ULL(0x20), - MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = BIT_ULL(0x24), -}; - -enum { - MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL = BIT_ULL(0x13), -}; - -enum { MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc, MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13, MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20, @@ -12520,10 +12546,29 @@ enum { MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27, MLX5_GENERAL_OBJECT_TYPES_INT_KEK = 0x47, MLX5_GENERAL_OBJECT_TYPES_RDMA_CTRL = 0x53, + MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT = 0x58, MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS = 0xff15, }; enum { + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = + BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY), + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = + BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_IPSEC), + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER = + BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_SAMPLER), + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = + BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO), +}; + +enum { + MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL = + BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_RDMA_CTRL - 0x40), + MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT = + BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT - 0x40), +}; + +enum { MLX5_IPSEC_OBJECT_ICV_LEN_16B, }; @@ -13279,4 +13324,41 @@ struct mlx5_ifc_mrtcq_reg_bits { u8 reserved_at_80[0x180]; }; +struct mlx5_ifc_pcie_cong_event_obj_bits { + u8 modify_select_field[0x40]; + + u8 inbound_event_en[0x1]; + u8 outbound_event_en[0x1]; + u8 reserved_at_42[0x1e]; + + u8 reserved_at_60[0x1]; + u8 inbound_cong_state[0x3]; + u8 reserved_at_64[0x1]; + u8 outbound_cong_state[0x3]; + u8 reserved_at_68[0x18]; + + u8 inbound_cong_low_threshold[0x10]; + u8 inbound_cong_high_threshold[0x10]; + + u8 outbound_cong_low_threshold[0x10]; + u8 outbound_cong_high_threshold[0x10]; + + u8 reserved_at_e0[0x340]; +}; + +struct mlx5_ifc_pcie_cong_event_cmd_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_pcie_cong_event_obj_bits cong_obj; +}; + +struct mlx5_ifc_pcie_cong_event_cmd_out_bits { + struct mlx5_ifc_general_obj_out_cmd_hdr_bits hdr; + struct mlx5_ifc_pcie_cong_event_obj_bits cong_obj; +}; + +enum mlx5e_pcie_cong_event_mod_field { + MLX5_PCIE_CONG_EVENT_MOD_EVENT_EN = BIT(0), + MLX5_PCIE_CONG_EVENT_MOD_THRESH = BIT(2), +}; + #endif /* MLX5_IFC_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index fa538feaa8d9..0c44bb8ce544 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1325,6 +1325,8 @@ static inline void get_page(struct page *page) struct folio *folio = page_folio(page); if (WARN_ON_ONCE(folio_test_slab(folio))) return; + if (WARN_ON_ONCE(folio_test_large_kmalloc(folio))) + return; folio_get(folio); } @@ -1419,7 +1421,7 @@ static inline void put_page(struct page *page) { struct folio *folio = page_folio(page); - if (folio_test_slab(folio)) + if (folio_test_slab(folio) || folio_test_large_kmalloc(folio)) return; folio_put(folio); @@ -3901,7 +3903,6 @@ enum mf_flags { int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, unsigned long count, int mf_flags); extern int memory_failure(unsigned long pfn, int flags); -extern void memory_failure_queue_kick(int cpu); extern int unpoison_memory(unsigned long pfn); extern atomic_long_t num_poisoned_pages __read_mostly; extern int soft_offline_page(unsigned long pfn, int flags); @@ -4178,12 +4179,12 @@ int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status); #define PP_MAGIC_MASK ~(PP_DMA_INDEX_MASK | 0x3UL) #ifdef CONFIG_PAGE_POOL -static inline bool page_pool_page_is_pp(struct page *page) +static inline bool page_pool_page_is_pp(const struct page *page) { return (page->pp_magic & PP_MAGIC_MASK) == PP_SIGNATURE; } #else -static inline bool page_pool_page_is_pp(struct page *page) +static inline bool page_pool_page_is_pp(const struct page *page) { return false; } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index d6b91e8a66d6..0f0662157066 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -1070,6 +1070,11 @@ struct mm_struct { struct mutex futex_hash_lock; struct futex_private_hash __rcu *futex_phash; struct futex_private_hash *futex_phash_new; + /* futex-ref */ + unsigned long futex_batches; + struct rcu_head futex_rcu; + atomic_long_t futex_atomic; + unsigned int __percpu *futex_ref; #endif unsigned long hiwater_rss; /* High-watermark of RSS usage */ diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 7cddfdac2f57..fe3d6d98f8da 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -76,6 +76,7 @@ #define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 #define SDIO_DEVICE_ID_BROADCOM_43439 0xa9af #define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf +#define SDIO_DEVICE_ID_BROADCOM_43751 0xaae7 #define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752 0xaae8 #define SDIO_VENDOR_ID_CYPRESS 0x04b4 diff --git a/include/linux/module.h b/include/linux/module.h index 5faa1fb1f4b4..a7cac01d95e7 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -14,6 +14,7 @@ #include <linux/buildid.h> #include <linux/compiler.h> #include <linux/cache.h> +#include <linux/cleanup.h> #include <linux/kmod.h> #include <linux/init.h> #include <linux/elf.h> @@ -304,7 +305,6 @@ struct notifier_block; #ifdef CONFIG_MODULES -extern int modules_disabled; /* for sysctl */ /* Get/put a kernel symbol (calls must be symmetric) */ void *__symbol_get(const char *symbol); void *__symbol_get_gpl(const char *symbol); @@ -539,7 +539,7 @@ struct module { struct trace_eval_map **trace_evals; unsigned int num_trace_evals; #endif -#ifdef CONFIG_FTRACE_MCOUNT_RECORD +#ifdef CONFIG_DYNAMIC_FTRACE unsigned int num_ftrace_callsites; unsigned long *ftrace_callsites; #endif @@ -1019,4 +1019,7 @@ static inline unsigned long find_kallsyms_symbol_value(struct module *mod, #endif /* CONFIG_MODULES && CONFIG_KALLSYMS */ +/* Define __free(module_put) macro for struct module *. */ +DEFINE_FREE(module_put, struct module *, if (_T) module_put(_T)) + #endif /* _LINUX_MODULE_H */ diff --git a/include/linux/mount.h b/include/linux/mount.h index 1a508beba446..5f9c053b0897 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -35,9 +35,6 @@ enum mount_flags { MNT_SHRINKABLE = 0x100, MNT_WRITE_HOLD = 0x200, - MNT_SHARED = 0x1000, /* if the vfsmount is a shared mount */ - MNT_UNBINDABLE = 0x2000, /* if the vfsmount is a unbindable mount */ - MNT_INTERNAL = 0x4000, MNT_LOCK_ATIME = 0x040000, @@ -48,25 +45,15 @@ enum mount_flags { MNT_LOCKED = 0x800000, MNT_DOOMED = 0x1000000, MNT_SYNC_UMOUNT = 0x2000000, - MNT_MARKED = 0x4000000, MNT_UMOUNT = 0x8000000, - /* - * MNT_SHARED_MASK is the set of flags that should be cleared when a - * mount becomes shared. Currently, this is only the flag that says a - * mount cannot be bind mounted, since this is how we create a mount - * that shares events with another mount. If you add a new MNT_* - * flag, consider how it interacts with shared mounts. - */ - MNT_SHARED_MASK = MNT_UNBINDABLE, MNT_USER_SETTABLE_MASK = MNT_NOSUID | MNT_NODEV | MNT_NOEXEC | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME | MNT_READONLY | MNT_NOSYMFOLLOW, MNT_ATIME_MASK = MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME, - MNT_INTERNAL_FLAGS = MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | - MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | - MNT_LOCKED, + MNT_INTERNAL_FLAGS = MNT_WRITE_HOLD | MNT_INTERNAL | MNT_DOOMED | + MNT_SYNC_UMOUNT | MNT_LOCKED }; struct vfsmount { @@ -98,6 +85,7 @@ int mnt_get_write_access(struct vfsmount *mnt); void mnt_put_write_access(struct vfsmount *mnt); extern struct vfsmount *fc_mount(struct fs_context *fc); +extern struct vfsmount *fc_mount_longterm(struct fs_context *fc); extern struct vfsmount *vfs_create_mount(struct fs_context *fc); extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, int flags, const char *name, diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h index 63ef5191cc57..fddafdc168f7 100644 --- a/include/linux/mroute6.h +++ b/include/linux/mroute6.h @@ -31,6 +31,7 @@ extern int ip6_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t); extern int ip6_mr_input(struct sk_buff *skb); extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); extern int ip6_mr_init(void); +extern int ip6_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb); extern void ip6_mr_cleanup(void); int ip6mr_ioctl(struct sock *sk, int cmd, void *arg); #else @@ -58,6 +59,12 @@ static inline int ip6_mr_init(void) return 0; } +static inline int +ip6_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb) +{ + return ip6_output(net, sk, skb); +} + static inline void ip6_mr_cleanup(void) { return; diff --git a/include/linux/msi.h b/include/linux/msi.h index 6863540f4b71..e5e86a8529fb 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -488,6 +488,7 @@ struct msi_domain_ops { * gets initialized to the maximum software index limit * by the domain creation code. * @ops: The callback data structure + * @dev: Device which creates the domain * @chip: Optional: associated interrupt chip * @chip_data: Optional: associated interrupt chip data * @handler: Optional: associated interrupt flow handler @@ -501,6 +502,7 @@ struct msi_domain_info { enum irq_domain_bus_token bus_token; unsigned int hwsize; struct msi_domain_ops *ops; + struct device *dev; struct irq_chip *chip; void *chip_data; irq_flow_handler_t handler; @@ -705,7 +707,10 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev); +u32 pci_msi_map_rid_ctlr_node(struct pci_dev *pdev, struct device_node **node); struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev); +void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg, + struct msi_desc *desc); #else /* CONFIG_PCI_MSI */ static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) { diff --git a/include/linux/mtd/nand-qpic-common.h b/include/linux/mtd/nand-qpic-common.h index f0aa098a395f..4e694b1aabbd 100644 --- a/include/linux/mtd/nand-qpic-common.h +++ b/include/linux/mtd/nand-qpic-common.h @@ -101,6 +101,8 @@ #define ECC_SW_RESET BIT(1) #define ECC_MODE 4 #define ECC_MODE_MASK GENMASK(5, 4) +#define ECC_MODE_4BIT 0 +#define ECC_MODE_8BIT 1 #define ECC_PARITY_SIZE_BYTES_BCH 8 #define ECC_PARITY_SIZE_BYTES_BCH_MASK GENMASK(12, 8) #define ECC_NUM_DATA_BYTES 16 diff --git a/include/linux/mutex.h b/include/linux/mutex.h index a039fa8c1780..00afd341d293 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -126,11 +126,11 @@ do { \ #ifdef CONFIG_DEBUG_MUTEXES -int __devm_mutex_init(struct device *dev, struct mutex *lock); +int __must_check __devm_mutex_init(struct device *dev, struct mutex *lock); #else -static inline int __devm_mutex_init(struct device *dev, struct mutex *lock) +static inline int __must_check __devm_mutex_init(struct device *dev, struct mutex *lock) { /* * When CONFIG_DEBUG_MUTEXES is off mutex_destroy() is just a nop so @@ -141,14 +141,17 @@ static inline int __devm_mutex_init(struct device *dev, struct mutex *lock) #endif -#define devm_mutex_init(dev, mutex) \ +#define __mutex_init_ret(mutex) \ ({ \ typeof(mutex) mutex_ = (mutex); \ \ mutex_init(mutex_); \ - __devm_mutex_init(dev, mutex_); \ + mutex_; \ }) +#define devm_mutex_init(dev, mutex) \ + __devm_mutex_init(dev, __mutex_init_ret(mutex)) + /* * See kernel/locking/mutex.c for detailed documentation of these APIs. * Also see Documentation/locking/mutex-design.rst. diff --git a/include/linux/mux/driver.h b/include/linux/mux/driver.h index 18824064f8c0..e58e59354e23 100644 --- a/include/linux/mux/driver.h +++ b/include/linux/mux/driver.h @@ -56,18 +56,18 @@ struct mux_control { /** * struct mux_chip - Represents a chip holding mux controllers. * @controllers: Number of mux controllers handled by the chip. - * @mux: Array of mux controllers that are handled. * @dev: Device structure. * @id: Used to identify the device internally. * @ops: Mux controller operations. + * @mux: Array of mux controllers that are handled. */ struct mux_chip { unsigned int controllers; - struct mux_control *mux; struct device dev; int id; const struct mux_control_ops *ops; + struct mux_control mux[] __counted_by(controllers); }; #define to_mux_chip(x) container_of((x), struct mux_chip, dev) diff --git a/include/linux/net/intel/iidc_rdma_idpf.h b/include/linux/net/intel/iidc_rdma_idpf.h new file mode 100644 index 000000000000..bab697e18fd6 --- /dev/null +++ b/include/linux/net/intel/iidc_rdma_idpf.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2025 Intel Corporation. */ + +#ifndef _IIDC_RDMA_IDPF_H_ +#define _IIDC_RDMA_IDPF_H_ + +#include <linux/auxiliary_bus.h> + +/* struct to be populated by core LAN PCI driver */ +struct iidc_rdma_vport_dev_info { + struct auxiliary_device *adev; + struct auxiliary_device *core_adev; + struct net_device *netdev; + u16 vport_id; +}; + +struct iidc_rdma_vport_auxiliary_dev { + struct auxiliary_device adev; + struct iidc_rdma_vport_dev_info *vdev_info; +}; + +struct iidc_rdma_vport_auxiliary_drv { + struct auxiliary_driver adrv; + void (*event_handler)(struct iidc_rdma_vport_dev_info *vdev, + struct iidc_rdma_event *event); +}; + +/* struct to be populated by core LAN PCI driver */ +enum iidc_function_type { + IIDC_FUNCTION_TYPE_PF, + IIDC_FUNCTION_TYPE_VF, +}; + +struct iidc_rdma_lan_mapped_mem_region { + u8 __iomem *region_addr; + __le64 size; + __le64 start_offset; +}; + +struct iidc_rdma_priv_dev_info { + struct msix_entry *msix_entries; + u16 msix_count; /* How many vectors are reserved for this device */ + enum iidc_function_type ftype; + __le16 num_memory_regions; + struct iidc_rdma_lan_mapped_mem_region *mapped_mem_regions; +}; + +int idpf_idc_vport_dev_ctrl(struct iidc_rdma_core_dev_info *cdev_info, bool up); +int idpf_idc_request_reset(struct iidc_rdma_core_dev_info *cdev_info, + enum iidc_rdma_reset_type __always_unused reset_type); +int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info, + u8 *send_msg, u16 msg_size, + u8 *recv_msg, u16 *recv_len); + +#endif /* _IIDC_RDMA_IDPF_H_ */ diff --git a/include/linux/net/intel/libie/adminq.h b/include/linux/net/intel/libie/adminq.h new file mode 100644 index 000000000000..012b5d499c1a --- /dev/null +++ b/include/linux/net/intel/libie/adminq.h @@ -0,0 +1,308 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2025 Intel Corporation */ + +#ifndef __LIBIE_ADMINQ_H +#define __LIBIE_ADMINQ_H + +#include <linux/build_bug.h> +#include <linux/types.h> + +#define LIBIE_CHECK_STRUCT_LEN(n, X) \ + static_assert((n) == sizeof(struct X)) + +/** + * struct libie_aqc_generic - Generic structure used in adminq communication + * @param0: generic parameter high 32bit + * @param1: generic parameter lower 32bit + * @addr_high: generic address high 32bit + * @addr_low: generic address lower 32bit + */ +struct libie_aqc_generic { + __le32 param0; + __le32 param1; + __le32 addr_high; + __le32 addr_low; +}; +LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_generic); + +/** + * struct libie_aqc_get_ver - Used in command get version (direct 0x0001) + * @rom_ver: rom version + * @fw_build: number coressponding to firmware build + * @fw_branch: branch identifier of firmware version + * @fw_major: major number of firmware version + * @fw_minor: minor number of firmware version + * @fw_patch: patch of firmware version + * @api_branch: brancch identifier of API version + * @api_major: major number of API version + * @api_minor: minor number of API version + * @api_patch: patch of API version + */ +struct libie_aqc_get_ver { + __le32 rom_ver; + __le32 fw_build; + u8 fw_branch; + u8 fw_major; + u8 fw_minor; + u8 fw_patch; + u8 api_branch; + u8 api_major; + u8 api_minor; + u8 api_patch; +}; +LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_get_ver); + +/** + * struct libie_aqc_driver_ver - Used in command send driver version + * (indirect 0x0002) + * @major_ver: driver major version + * @minor_ver: driver minor version + * @build_ver: driver build version + * @subbuild_ver: driver subbuild version + * @reserved: for feature use + * @addr_high: high part of response address buff + * @addr_low: low part of response address buff + */ +struct libie_aqc_driver_ver { + u8 major_ver; + u8 minor_ver; + u8 build_ver; + u8 subbuild_ver; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; +LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_driver_ver); + +enum libie_aq_res_id { + LIBIE_AQC_RES_ID_NVM = 1, + LIBIE_AQC_RES_ID_SDP = 2, + LIBIE_AQC_RES_ID_CHNG_LOCK = 3, + LIBIE_AQC_RES_ID_GLBL_LOCK = 4, +}; + +enum libie_aq_res_access_type { + LIBIE_AQC_RES_ACCESS_READ = 1, + LIBIE_AQC_RES_ACCESS_WRITE = 2, +}; + +#define LIBIE_AQ_RES_NVM_READ_DFLT_TIMEOUT_MS 3000 +#define LIBIE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000 +#define LIBIE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000 +#define LIBIE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000 + +#define LIBIE_AQ_RES_GLBL_SUCCESS 0 +#define LIBIE_AQ_RES_GLBL_IN_PROG 1 +#define LIBIE_AQ_RES_GLBL_DONE 2 + +/** + * struct libie_aqc_req_res - Request resource ownership + * @res_id: resource ID (look at enum definition above) + * @access_type: read or write (enum definition above) + * @timeout: Upon successful completion, FW writes this value and driver is + * expected to release resource before timeout. This value is provided in + * milliseconds. + * @res_number: for SDP, this is the pin ID of the SDP + * @status: status only used for LIBIE_AQC_RES_ID_GLBL_LOCK, for others reserved + * @reserved: reserved for future use + * + * Used in commands: + * request resource ownership (direct 0x0008) + * request resource ownership (direct 0x0009) + */ +struct libie_aqc_req_res { + __le16 res_id; + __le16 access_type; + + __le32 timeout; + __le32 res_number; + __le16 status; + u8 reserved[2]; +}; +LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_req_res); + +/** + * struct libie_aqc_list_caps - Getting capabilities + * @cmd_flags: command flags + * @pf_index: index of PF to get caps from + * @reserved: reserved for future use + * @count: number of capabilities records + * @addr_high: high part of response address buff + * @addr_low: low part of response address buff + * + * Used in commands: + * get function capabilities (indirect 0x000A) + * get device capabilities (indirect 0x000B) + */ +struct libie_aqc_list_caps { + u8 cmd_flags; + u8 pf_index; + u8 reserved[2]; + __le32 count; + __le32 addr_high; + __le32 addr_low; +}; +LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_list_caps); + +/* Device/Function buffer entry, repeated per reported capability */ +#define LIBIE_AQC_CAPS_SWITCH_MODE 0x0001 +#define LIBIE_AQC_CAPS_MNG_MODE 0x0002 +#define LIBIE_AQC_CAPS_NPAR_ACTIVE 0x0003 +#define LIBIE_AQC_CAPS_OS2BMC_CAP 0x0004 +#define LIBIE_AQC_CAPS_VALID_FUNCTIONS 0x0005 +#define LIBIE_AQC_MAX_VALID_FUNCTIONS 0x8 +#define LIBIE_AQC_CAPS_SRIOV 0x0012 +#define LIBIE_AQC_CAPS_VF 0x0013 +#define LIBIE_AQC_CAPS_VMDQ 0x0014 +#define LIBIE_AQC_CAPS_8021QBG 0x0015 +#define LIBIE_AQC_CAPS_8021QBR 0x0016 +#define LIBIE_AQC_CAPS_VSI 0x0017 +#define LIBIE_AQC_CAPS_DCB 0x0018 +#define LIBIE_AQC_CAPS_FCOE 0x0021 +#define LIBIE_AQC_CAPS_ISCSI 0x0022 +#define LIBIE_AQC_CAPS_RSS 0x0040 +#define LIBIE_AQC_CAPS_RXQS 0x0041 +#define LIBIE_AQC_CAPS_TXQS 0x0042 +#define LIBIE_AQC_CAPS_MSIX 0x0043 +#define LIBIE_AQC_CAPS_VF_MSIX 0x0044 +#define LIBIE_AQC_CAPS_FD 0x0045 +#define LIBIE_AQC_CAPS_1588 0x0046 +#define LIBIE_AQC_CAPS_MAX_MTU 0x0047 +#define LIBIE_AQC_CAPS_NVM_VER 0x0048 +#define LIBIE_AQC_CAPS_PENDING_NVM_VER 0x0049 +#define LIBIE_AQC_CAPS_OROM_VER 0x004A +#define LIBIE_AQC_CAPS_PENDING_OROM_VER 0x004B +#define LIBIE_AQC_CAPS_NET_VER 0x004C +#define LIBIE_AQC_CAPS_PENDING_NET_VER 0x004D +#define LIBIE_AQC_CAPS_RDMA 0x0051 +#define LIBIE_AQC_CAPS_LED 0x0061 +#define LIBIE_AQC_CAPS_SDP 0x0062 +#define LIBIE_AQC_CAPS_MDIO 0x0063 +#define LIBIE_AQC_CAPS_WSR_PROT 0x0064 +#define LIBIE_AQC_CAPS_SENSOR_READING 0x0067 +#define LIBIE_AQC_INLINE_IPSEC 0x0070 +#define LIBIE_AQC_CAPS_NUM_ENABLED_PORTS 0x0072 +#define LIBIE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076 +#define LIBIE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077 +#define LIBIE_AQC_CAPS_NVM_MGMT 0x0080 +#define LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG0 0x0081 +#define LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG1 0x0082 +#define LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG2 0x0083 +#define LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG3 0x0084 +#define LIBIE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085 +#define LIBIE_AQC_CAPS_NAC_TOPOLOGY 0x0087 +#define LIBIE_AQC_CAPS_FW_LAG_SUPPORT 0x0092 +#define LIBIE_AQC_BIT_ROCEV2_LAG 0x01 +#define LIBIE_AQC_BIT_SRIOV_LAG 0x02 +#define LIBIE_AQC_CAPS_FLEX10 0x00F1 +#define LIBIE_AQC_CAPS_CEM 0x00F2 + +/** + * struct libie_aqc_list_caps_elem - Getting list of caps elements + * @cap: one from the defines list above + * @major_ver: major version + * @minor_ver: minor version + * @number: number of resources described by this capability + * @logical_id: logical ID, only meaningful for some types of resources + * @phys_id: physical ID, only meaningful for some types of resources + * @rsvd1: reserved for future use + * @rsvd2: reserved for future use + */ +struct libie_aqc_list_caps_elem { + __le16 cap; + + u8 major_ver; + u8 minor_ver; + __le32 number; + __le32 logical_id; + __le32 phys_id; + __le64 rsvd1; + __le64 rsvd2; +}; +LIBIE_CHECK_STRUCT_LEN(32, libie_aqc_list_caps_elem); + +/** + * struct libie_aq_desc - Admin Queue (AQ) descriptor + * @flags: LIBIE_AQ_FLAG_* flags + * @opcode: AQ command opcode + * @datalen: length in bytes of indirect/external data buffer + * @retval: return value from firmware + * @cookie_high: opaque data high-half + * @cookie_low: opaque data low-half + * @params: command-specific parameters + * + * Descriptor format for commands the driver posts on the Admin Transmit Queue + * (ATQ). The firmware writes back onto the command descriptor and returns + * the result of the command. Asynchronous events that are not an immediate + * result of the command are written to the Admin Receive Queue (ARQ) using + * the same descriptor format. Descriptors are in little-endian notation with + * 32-bit words. + */ +struct libie_aq_desc { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 retval; + __le32 cookie_high; + __le32 cookie_low; + union { + u8 raw[16]; + struct libie_aqc_generic generic; + struct libie_aqc_get_ver get_ver; + struct libie_aqc_driver_ver driver_ver; + struct libie_aqc_req_res res_owner; + struct libie_aqc_list_caps get_cap; + } params; +}; +LIBIE_CHECK_STRUCT_LEN(32, libie_aq_desc); + +/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */ +#define LIBIE_AQ_LG_BUF 512 + +/* Flags sub-structure + * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | + * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | + */ +#define LIBIE_AQ_FLAG_DD BIT(0) /* 0x1 */ +#define LIBIE_AQ_FLAG_CMP BIT(1) /* 0x2 */ +#define LIBIE_AQ_FLAG_ERR BIT(2) /* 0x4 */ +#define LIBIE_AQ_FLAG_VFE BIT(3) /* 0x8 */ +#define LIBIE_AQ_FLAG_LB BIT(9) /* 0x200 */ +#define LIBIE_AQ_FLAG_RD BIT(10) /* 0x400 */ +#define LIBIE_AQ_FLAG_VFC BIT(11) /* 0x800 */ +#define LIBIE_AQ_FLAG_BUF BIT(12) /* 0x1000 */ +#define LIBIE_AQ_FLAG_SI BIT(13) /* 0x2000 */ +#define LIBIE_AQ_FLAG_EI BIT(14) /* 0x4000 */ +#define LIBIE_AQ_FLAG_FE BIT(15) /* 0x8000 */ + +/* error codes */ +enum libie_aq_err { + LIBIE_AQ_RC_OK = 0, /* Success */ + LIBIE_AQ_RC_EPERM = 1, /* Operation not permitted */ + LIBIE_AQ_RC_ENOENT = 2, /* No such element */ + LIBIE_AQ_RC_ESRCH = 3, /* Bad opcode */ + LIBIE_AQ_RC_EIO = 5, /* I/O error */ + LIBIE_AQ_RC_EAGAIN = 8, /* Try again */ + LIBIE_AQ_RC_ENOMEM = 9, /* Out of memory */ + LIBIE_AQ_RC_EACCES = 10, /* Permission denied */ + LIBIE_AQ_RC_EBUSY = 12, /* Device or resource busy */ + LIBIE_AQ_RC_EEXIST = 13, /* Object already exists */ + LIBIE_AQ_RC_EINVAL = 14, /* Invalid argument */ + LIBIE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */ + LIBIE_AQ_RC_ENOSYS = 17, /* Function not implemented */ + LIBIE_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + LIBIE_AQ_RC_ENOSEC = 24, /* Missing security manifest */ + LIBIE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */ + LIBIE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */ + LIBIE_AQ_RC_EBADMAN = 27, /* Manifest hash mismatch */ + LIBIE_AQ_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */ +}; + +static inline void *libie_aq_raw(struct libie_aq_desc *desc) +{ + return &desc->params.raw; +} + +const char *libie_aq_str(enum libie_aq_err err); + +#endif /* __LIBIE_ADMINQ_H */ diff --git a/include/linux/net/intel/libie/pctype.h b/include/linux/net/intel/libie/pctype.h new file mode 100644 index 000000000000..d783417fbf36 --- /dev/null +++ b/include/linux/net/intel/libie/pctype.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2025 Intel Corporation */ + +#ifndef __LIBIE_PCTYPE_H +#define __LIBIE_PCTYPE_H + +/* Packet Classifier Type indexes, used to set the xxQF_HENA registers. Also + * communicated over the virtchnl API as part of struct virtchnl_rss_hashena. + */ +enum libie_filter_pctype { + /* Note: Values 0-28 are reserved for future use. + * Value 29, 30, 32 are not supported on XL710 and X710. + */ + LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, + LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, + LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP = 31, + LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, + LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP = 33, + LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, + LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, + LIBIE_FILTER_PCTYPE_FRAG_IPV4 = 36, + /* Note: Values 37-38 are reserved for future use. + * Value 39, 40, 42 are not supported on XL710 and X710. + */ + LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, + LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, + LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP = 41, + LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, + LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP = 43, + LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, + LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, + LIBIE_FILTER_PCTYPE_FRAG_IPV6 = 46, + /* Note: Value 47 is reserved for future use */ + LIBIE_FILTER_PCTYPE_FCOE_OX = 48, + LIBIE_FILTER_PCTYPE_FCOE_RX = 49, + LIBIE_FILTER_PCTYPE_FCOE_OTHER = 50, + /* Note: Values 51-62 are reserved for future use */ + LIBIE_FILTER_PCTYPE_L2_PAYLOAD = 63 +}; + +#endif /* __LIBIE_PCTYPE_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index adb14db25798..5e5de4b0a433 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -369,6 +369,7 @@ struct napi_config { u64 irq_suspend_timeout; u32 defer_hard_irqs; cpumask_t affinity_mask; + u8 threaded; unsigned int napi_id; }; @@ -588,7 +589,9 @@ static inline bool napi_complete(struct napi_struct *n) return napi_complete_done(n, 0); } -int dev_set_threaded(struct net_device *dev, bool threaded); +void netif_threaded_enable(struct net_device *dev); +int dev_set_threaded(struct net_device *dev, + enum netdev_napi_threaded threaded); void napi_disable(struct napi_struct *n); void napi_disable_locked(struct napi_struct *n); @@ -1870,6 +1873,7 @@ enum netdev_reg_state { * @addr_len: Hardware address length * @upper_level: Maximum depth level of upper devices. * @lower_level: Maximum depth level of lower devices. + * @threaded: napi threaded state. * @neigh_priv_len: Used in neigh_alloc() * @dev_id: Used to differentiate devices that share * the same link layer address @@ -2009,8 +2013,6 @@ enum netdev_reg_state { * switch driver and used to set the phys state of the * switch port. * - * @threaded: napi threaded mode is enabled - * * @irq_affinity_auto: driver wants the core to store and re-assign the IRQ * affinity. Set by netif_enable_irq_affinity(), then * the driver must create a persistent napi by @@ -2246,6 +2248,7 @@ struct net_device { unsigned char addr_len; unsigned char upper_level; unsigned char lower_level; + u8 threaded; unsigned short neigh_priv_len; unsigned short dev_id; @@ -2388,7 +2391,7 @@ struct net_device { struct dm_hw_stat_delta __rcu *dm_private; #endif struct device dev; - const struct attribute_group *sysfs_groups[4]; + const struct attribute_group *sysfs_groups[5]; const struct attribute_group *sysfs_rx_queue_group; const struct rtnl_link_ops *rtnl_link_ops; @@ -2427,7 +2430,6 @@ struct net_device { struct sfp_bus *sfp_bus; struct lock_class_key *qdisc_tx_busylock; bool proto_down; - bool threaded; bool irq_affinity_auto; bool rx_cpu_rmap_auto; @@ -3016,6 +3018,16 @@ static inline void dev_dstats_rx_dropped(struct net_device *dev) u64_stats_update_end(&dstats->syncp); } +static inline void dev_dstats_rx_dropped_add(struct net_device *dev, + unsigned int packets) +{ + struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); + + u64_stats_update_begin(&dstats->syncp); + u64_stats_add(&dstats->rx_drops, packets); + u64_stats_update_end(&dstats->syncp); +} + static inline void dev_dstats_tx_add(struct net_device *dev, unsigned int len) { @@ -3306,13 +3318,6 @@ static inline struct net_device *first_net_device(struct net *net) net_device_entry(net->dev_base_head.next); } -static inline struct net_device *first_net_device_rcu(struct net *net) -{ - struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); - - return lh == &net->dev_base_head ? NULL : net_device_entry(lh); -} - int netdev_boot_setup_check(struct net_device *dev); struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, const char *hwaddr); @@ -3329,8 +3334,6 @@ int dev_get_iflink(const struct net_device *dev); int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, struct net_device_path_stack *stack); -struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, - unsigned short mask); struct net_device *dev_get_by_name(struct net *net, const char *name); struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); struct net_device *__dev_get_by_name(struct net *net, const char *name); @@ -3340,7 +3343,7 @@ int netif_open(struct net_device *dev, struct netlink_ext_ack *extack); int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); void netif_close(struct net_device *dev); void dev_close(struct net_device *dev); -void dev_close_many(struct list_head *head, bool unlink); +void netif_close_many(struct list_head *head, bool unlink); void netif_disable_lro(struct net_device *dev); void dev_disable_lro(struct net_device *dev); int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); @@ -3393,6 +3396,8 @@ struct net_device *netdev_get_by_index(struct net *net, int ifindex, netdevice_tracker *tracker, gfp_t gfp); struct net_device *netdev_get_by_name(struct net *net, const char *name, netdevice_tracker *tracker, gfp_t gfp); +struct net_device *netdev_get_by_flags_rcu(struct net *net, netdevice_tracker *tracker, + unsigned short flags, unsigned short mask); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); void netdev_copy_name(struct net_device *dev, char *name); @@ -4192,7 +4197,7 @@ int generic_hwtstamp_set_lower(struct net_device *dev, struct kernel_hwtstamp_config *kernel_cfg, struct netlink_ext_ack *extack); int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata); -unsigned int dev_get_flags(const struct net_device *); +unsigned int netif_get_flags(const struct net_device *dev); int __dev_change_flags(struct net_device *dev, unsigned int flags, struct netlink_ext_ack *extack); int netif_change_flags(struct net_device *dev, unsigned int flags, @@ -4207,20 +4212,20 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net, struct netlink_ext_ack *extack); int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat); -int __dev_set_mtu(struct net_device *, int); +int __netif_set_mtu(struct net_device *dev, int new_mtu); int netif_set_mtu(struct net_device *dev, int new_mtu); int dev_set_mtu(struct net_device *, int); -int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, - struct netlink_ext_ack *extack); +int netif_pre_changeaddr_notify(struct net_device *dev, const char *addr, + struct netlink_ext_ack *extack); int netif_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss, struct netlink_ext_ack *extack); int dev_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss, struct netlink_ext_ack *extack); int dev_set_mac_address_user(struct net_device *dev, struct sockaddr_storage *ss, struct netlink_ext_ack *extack); -int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); -int dev_get_port_parent_id(struct net_device *dev, - struct netdev_phys_item_id *ppid, bool recurse); +int netif_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); +int netif_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid, bool recurse); bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); @@ -5128,10 +5133,9 @@ void netdev_bonding_info_change(struct net_device *dev, struct netdev_bonding_info *bonding_info); #if IS_ENABLED(CONFIG_ETHTOOL_NETLINK) -void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data); +void ethtool_notify(struct net_device *dev, unsigned int cmd); #else -static inline void ethtool_notify(struct net_device *dev, unsigned int cmd, - const void *data) +static inline void ethtool_notify(struct net_device *dev, unsigned int cmd) { } #endif diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 5f896fcc074d..efbbfa770d66 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -92,6 +92,7 @@ enum nf_hook_ops_type { NF_HOOK_OP_UNDEFINED, NF_HOOK_OP_NF_TABLES, NF_HOOK_OP_BPF, + NF_HOOK_OP_NFT_FT, }; struct nf_hook_ops { diff --git a/include/linux/netfilter/nf_conntrack_dccp.h b/include/linux/netfilter/nf_conntrack_dccp.h deleted file mode 100644 index c509ed76e714..000000000000 --- a/include/linux/netfilter/nf_conntrack_dccp.h +++ /dev/null @@ -1,38 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _NF_CONNTRACK_DCCP_H -#define _NF_CONNTRACK_DCCP_H - -/* Exposed to userspace over nfnetlink */ -enum ct_dccp_states { - CT_DCCP_NONE, - CT_DCCP_REQUEST, - CT_DCCP_RESPOND, - CT_DCCP_PARTOPEN, - CT_DCCP_OPEN, - CT_DCCP_CLOSEREQ, - CT_DCCP_CLOSING, - CT_DCCP_TIMEWAIT, - CT_DCCP_IGNORE, - CT_DCCP_INVALID, - __CT_DCCP_MAX -}; -#define CT_DCCP_MAX (__CT_DCCP_MAX - 1) - -enum ct_dccp_roles { - CT_DCCP_ROLE_CLIENT, - CT_DCCP_ROLE_SERVER, - __CT_DCCP_ROLE_MAX -}; -#define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1) - -#include <linux/netfilter/nf_conntrack_tuple_common.h> - -struct nf_ct_dccp { - u_int8_t role[IP_CT_DIR_MAX]; - u_int8_t state; - u_int8_t last_pkt; - u_int8_t last_dir; - u_int64_t handshake_seq; -}; - -#endif /* _NF_CONNTRACK_DCCP_H */ diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index f39f688d7285..77c778d84d4c 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -51,21 +51,11 @@ static inline struct net_device *xt_in(const struct xt_action_param *par) return par->state->in; } -static inline const char *xt_inname(const struct xt_action_param *par) -{ - return par->state->in->name; -} - static inline struct net_device *xt_out(const struct xt_action_param *par) { return par->state->out; } -static inline const char *xt_outname(const struct xt_action_param *par) -{ - return par->state->out->name; -} - static inline unsigned int xt_hooknum(const struct xt_action_param *par) { return par->state->hook; diff --git a/include/linux/netfs.h b/include/linux/netfs.h index f43f075852c0..185bd8196503 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -442,7 +442,6 @@ size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset, size_t max_size, size_t max_segs); void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq); void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error); -void netfs_queue_write_request(struct netfs_io_subrequest *subreq); int netfs_start_io_read(struct inode *inode); void netfs_end_io_read(struct inode *inode); diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 0477208ed9ff..b5ea9882eda8 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -15,10 +15,7 @@ #include <linux/refcount.h> union inet_addr { - __u32 all[4]; __be32 ip; - __be32 ip6[4]; - struct in_addr in; struct in6_addr in6; }; @@ -42,6 +39,13 @@ struct netpoll { struct work_struct refill_wq; }; +#define np_info(np, fmt, ...) \ + pr_info("%s: " fmt, np->name, ##__VA_ARGS__) +#define np_err(np, fmt, ...) \ + pr_err("%s: " fmt, np->name, ##__VA_ARGS__) +#define np_notice(np, fmt, ...) \ + pr_notice("%s: " fmt, np->name, ##__VA_ARGS__) + struct netpoll_info { refcount_t refcnt; @@ -65,11 +69,8 @@ static inline void netpoll_poll_enable(struct net_device *dev) { return; } #endif int netpoll_send_udp(struct netpoll *np, const char *msg, int len); -void netpoll_print_options(struct netpoll *np); -int netpoll_parse_options(struct netpoll *np, char *opt); int __netpoll_setup(struct netpoll *np, struct net_device *ndev); int netpoll_setup(struct netpoll *np); -void __netpoll_cleanup(struct netpoll *np); void __netpoll_free(struct netpoll *np); void netpoll_cleanup(struct netpoll *np); void do_netpoll_cleanup(struct netpoll *np); diff --git a/include/linux/nmi.h b/include/linux/nmi.h index e78fa535f61d..cf3c6ab408aa 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -103,10 +103,12 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs); extern void hardlockup_detector_perf_stop(void); extern void hardlockup_detector_perf_restart(void); extern void hardlockup_config_perf_event(const char *str); +extern void hardlockup_detector_perf_adjust_period(u64 period); #else static inline void hardlockup_detector_perf_stop(void) { } static inline void hardlockup_detector_perf_restart(void) { } static inline void hardlockup_config_perf_event(const char *str) { } +static inline void hardlockup_detector_perf_adjust_period(u64 period) { } #endif void watchdog_hardlockup_stop(void); diff --git a/include/linux/nvme.h b/include/linux/nvme.h index b65a1b9f2116..655d194f8e72 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -2155,7 +2155,7 @@ enum { NVME_SC_NS_NOT_ATTACHED = 0x11a, NVME_SC_THIN_PROV_NOT_SUPP = 0x11b, NVME_SC_CTRL_LIST_INVALID = 0x11c, - NVME_SC_SELT_TEST_IN_PROGRESS = 0x11d, + NVME_SC_SELF_TEST_IN_PROGRESS = 0x11d, NVME_SC_BP_WRITE_PROHIBITED = 0x11e, NVME_SC_CTRL_ID_INVALID = 0x11f, NVME_SC_SEC_CTRL_STATE_INVALID = 0x120, diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index 6337ad4e5fe8..a480063c9cb1 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h @@ -54,6 +54,7 @@ extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 id, u32 bus_token); extern void of_msi_configure(struct device *dev, const struct device_node *np); +extern u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in); u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in); #else static inline void of_irq_init(const struct of_device_id *matches) @@ -100,6 +101,10 @@ static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev static inline void of_msi_configure(struct device *dev, struct device_node *np) { } +static inline u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in) +{ + return id_in; +} static inline u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in) { diff --git a/include/linux/packing.h b/include/linux/packing.h index 0589d70bbe04..20ae4d452c7b 100644 --- a/include/linux/packing.h +++ b/include/linux/packing.h @@ -5,8 +5,12 @@ #ifndef _LINUX_PACKING_H #define _LINUX_PACKING_H -#include <linux/types.h> +#include <linux/array_size.h> #include <linux/bitops.h> +#include <linux/build_bug.h> +#include <linux/minmax.h> +#include <linux/stddef.h> +#include <linux/types.h> #define GEN_PACKED_FIELD_STRUCT(__type) \ struct packed_field_ ## __type { \ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index e63fbfbd5b0f..ce2bcdcadb73 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -751,6 +751,33 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, fgf_t fgp_flags, gfp_t gfp); /** + * write_begin_get_folio - Get folio for write_begin with flags. + * @iocb: The kiocb passed from write_begin (may be NULL). + * @mapping: The address space to search. + * @index: The page cache index. + * @len: Length of data being written. + * + * This is a helper for filesystem write_begin() implementations. + * It wraps __filemap_get_folio(), setting appropriate flags in + * the write begin context. + * + * Return: A folio or an ERR_PTR. + */ +static inline struct folio *write_begin_get_folio(const struct kiocb *iocb, + struct address_space *mapping, pgoff_t index, size_t len) +{ + fgf_t fgp_flags = FGP_WRITEBEGIN; + + fgp_flags |= fgf_set_order(len); + + if (iocb && iocb->ki_flags & IOCB_DONTCACHE) + fgp_flags |= FGP_DONTCACHE; + + return __filemap_get_folio(mapping, index, fgp_flags, + mapping_gfp_mask(mapping)); +} + +/** * filemap_get_folio - Find and get a folio. * @mapping: The address_space to search. * @index: The page index. diff --git a/include/linux/panic.h b/include/linux/panic.h index 4adc65766935..7be742628c25 100644 --- a/include/linux/panic.h +++ b/include/linux/panic.h @@ -3,6 +3,7 @@ #define _LINUX_PANIC_H #include <linux/compiler_attributes.h> +#include <linux/stdarg.h> #include <linux/types.h> struct pt_regs; @@ -10,6 +11,8 @@ struct pt_regs; extern long (*panic_blink)(int state); __printf(1, 2) void panic(const char *fmt, ...) __noreturn __cold; +__printf(1, 0) +void vpanic(const char *fmt, va_list args) __noreturn __cold; void nmi_panic(struct pt_regs *regs, const char *msg); void check_panic_on_warn(const char *origin); extern void oops_enter(void); @@ -25,8 +28,6 @@ extern int panic_on_warn; extern unsigned long panic_on_taint; extern bool panic_on_taint_nousertaint; -extern int sysctl_panic_on_rcu_stall; -extern int sysctl_max_rcu_stall_to_panic; extern int sysctl_panic_on_stackoverflow; extern bool crash_kexec_post_notifiers; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index e2d71b6fdd84..92ffc4373f6d 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2624,6 +2624,9 @@ #define PCI_VENDOR_ID_TEKRAM 0x1de1 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 +#define PCI_VENDOR_ID_RPI 0x1de4 +#define PCI_DEVICE_ID_RPI_RP1_C0 0x0001 + #define PCI_VENDOR_ID_ALIBABA 0x1ded #define PCI_VENDOR_ID_CXL 0x1e98 diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 6dc5e0cd76ca..93c9a26492fc 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -70,6 +70,11 @@ struct pmu_hw_events { struct arm_pmu *percpu_pmu; int irq; + + struct perf_branch_stack *branch_stack; + + /* Active events requesting branch records */ + unsigned int branch_users; }; enum armpmu_attr_groups { @@ -115,6 +120,7 @@ struct arm_pmu { /* PMUv3 only */ int pmuver; u64 reg_pmmir; + u64 reg_brbidr; #define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); #define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000 @@ -126,6 +132,8 @@ struct arm_pmu { #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) +DECLARE_PER_CPU(struct arm_pmu *, cpu_armpmu); + u64 armpmu_event_update(struct perf_event *event); int armpmu_event_set_period(struct perf_event *event); diff --git a/include/linux/phy.h b/include/linux/phy.h index e194dad1623d..4c2b8b6e7187 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -103,6 +103,10 @@ extern const int phy_basic_ports_array[3]; * @PHY_INTERFACE_MODE_QUSGMII: Quad Universal SGMII * @PHY_INTERFACE_MODE_1000BASEKX: 1000Base-KX - with Clause 73 AN * @PHY_INTERFACE_MODE_10G_QXGMII: 10G-QXGMII - 4 ports over 10G USXGMII + * @PHY_INTERFACE_MODE_50GBASER: 50GBase-R - with Clause 134 FEC + * @PHY_INTERFACE_MODE_LAUI: 50 Gigabit Attachment Unit Interface + * @PHY_INTERFACE_MODE_100GBASEP: 100GBase-P - with Clause 134 FEC + * @PHY_INTERFACE_MODE_MIILITE: MII-Lite - MII without RXER TXER CRS COL * @PHY_INTERFACE_MODE_MAX: Book keeping * * Describes the interface between the MAC and PHY. @@ -144,6 +148,10 @@ typedef enum { PHY_INTERFACE_MODE_QUSGMII, PHY_INTERFACE_MODE_1000BASEKX, PHY_INTERFACE_MODE_10G_QXGMII, + PHY_INTERFACE_MODE_50GBASER, + PHY_INTERFACE_MODE_LAUI, + PHY_INTERFACE_MODE_100GBASEP, + PHY_INTERFACE_MODE_MIILITE, PHY_INTERFACE_MODE_MAX, } phy_interface_t; @@ -260,6 +268,14 @@ static inline const char *phy_modes(phy_interface_t interface) return "qusgmii"; case PHY_INTERFACE_MODE_10G_QXGMII: return "10g-qxgmii"; + case PHY_INTERFACE_MODE_50GBASER: + return "50gbase-r"; + case PHY_INTERFACE_MODE_LAUI: + return "laui"; + case PHY_INTERFACE_MODE_100GBASEP: + return "100gbase-p"; + case PHY_INTERFACE_MODE_MIILITE: + return "mii-lite"; default: return "unknown"; } @@ -269,8 +285,10 @@ static inline const char *phy_modes(phy_interface_t interface) * rgmii_clock - map link speed to the clock rate * @speed: link speed value * - * Description: maps RGMII supported link speeds - * into the clock rates. + * Description: maps RGMII supported link speeds into the clock rates. + * This can also be used for MII, GMII, and RMII interface modes as the + * clock rates are indentical, but the caller must be aware that errors + * for unsupported clock rates will not be signalled. * * Returns: clock rate or negative errno */ @@ -395,8 +413,10 @@ struct mii_bus { /** @shared_lock: protect access to the shared element */ struct mutex shared_lock; +#if IS_ENABLED(CONFIG_PHY_PACKAGE) /** @shared: shared state across different PHYs */ struct phy_package_shared *shared[PHY_MAX_ADDR]; +#endif }; #define to_mii_bus(d) container_of(d, struct mii_bus, dev) @@ -526,6 +546,7 @@ struct macsec_ops; * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY * @wol_enabled: Set to true if the PHY or the attached MAC have Wake-on-LAN * enabled. + * @is_genphy_driven: PHY is driven by one of the generic PHY drivers * @state: State of the PHY for management purposes * @dev_flags: Device-specific flags used by the PHY driver. * @@ -629,6 +650,7 @@ struct phy_device { unsigned is_on_sfp_module:1; unsigned mac_managed_pm:1; unsigned wol_enabled:1; + unsigned is_genphy_driven:1; unsigned autoneg:1; /* The most recently read link state */ @@ -702,9 +724,11 @@ struct phy_device { /* For use by PHYs to maintain extra state */ void *priv; +#if IS_ENABLED(CONFIG_PHY_PACKAGE) /* shared data pointer */ /* For use by PHYs inside the same package that need a shared state. */ struct phy_package_shared *shared; +#endif /* Reporting cable test results */ struct sk_buff *skb; @@ -1292,6 +1316,17 @@ static inline bool phy_is_started(struct phy_device *phydev) } /** + * phy_driver_is_genphy - Convenience function to check whether PHY is driven + * by one of the generic PHY drivers + * @phydev: The phy_device struct + * Return: true if PHY is driven by one of the genphy drivers + */ +static inline bool phy_driver_is_genphy(struct phy_device *phydev) +{ + return phydev->is_genphy_driven; +} + +/** * phy_disable_eee_mode - Don't advertise an EEE mode. * @phydev: The phy_device struct * @link_mode: The EEE mode to be disabled @@ -1941,9 +1976,6 @@ int genphy_c45_ethtool_set_eee(struct phy_device *phydev, struct ethtool_keee *data); int genphy_c45_an_config_eee_aneg(struct phy_device *phydev); -/* Generic C45 PHY driver */ -extern struct phy_driver genphy_c45_driver; - /* The gen10g_* functions are the old Clause 45 stub */ int gen10g_config_aneg(struct phy_device *phydev); @@ -1997,8 +2029,8 @@ bool phy_validate_pause(struct phy_device *phydev, struct ethtool_pauseparam *pp); void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause); -s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev, - const int *delay_values, int size, bool is_rx); +s32 phy_get_internal_delay(struct phy_device *phydev, const int *delay_values, + int size, bool is_rx); int phy_get_tx_amplitude_gain(struct phy_device *phydev, struct device *dev, enum ethtool_link_mode_bit_indices linkmode, @@ -2096,7 +2128,4 @@ module_exit(phy_module_exit) #define module_phy_driver(__phy_drivers) \ phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers)) -bool phy_driver_is_genphy(struct phy_device *phydev); -bool phy_driver_is_genphy_10g(struct phy_device *phydev); - #endif /* __PHY_H */ diff --git a/include/linux/pid.h b/include/linux/pid.h index 453ae6d8a68d..003a1027d219 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -47,19 +47,23 @@ #define RESERVED_PIDS 300 +struct pidfs_attr; + struct upid { int nr; struct pid_namespace *ns; }; -struct pid -{ +struct pid { refcount_t count; unsigned int level; spinlock_t lock; - struct dentry *stashed; - u64 ino; - struct rb_node pidfs_node; + struct { + u64 ino; + struct rb_node pidfs_node; + struct dentry *stashed; + struct pidfs_attr *attr; + }; /* lists of tasks that use this pid */ struct hlist_head tasks[PIDTYPE_MAX]; struct hlist_head inodes; diff --git a/include/linux/pidfs.h b/include/linux/pidfs.h index 77e7db194914..3e08c33da2df 100644 --- a/include/linux/pidfs.h +++ b/include/linux/pidfs.h @@ -14,7 +14,6 @@ void pidfs_coredump(const struct coredump_params *cprm); #endif extern const struct dentry_operations pidfs_dentry_operations; int pidfs_register_pid(struct pid *pid); -void pidfs_get_pid(struct pid *pid); -void pidfs_put_pid(struct pid *pid); +void pidfs_free_pid(struct pid *pid); #endif /* _LINUX_PID_FS_H */ diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h deleted file mode 100644 index 2f1b952d596a..000000000000 --- a/include/linux/pktcdvd.h +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Copyright (C) 2000 Jens Axboe <axboe@suse.de> - * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com> - * - * May be copied or modified under the terms of the GNU General Public - * License. See linux/COPYING for more information. - * - * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and - * DVD-RW devices. - * - */ -#ifndef __PKTCDVD_H -#define __PKTCDVD_H - -#include <linux/blkdev.h> -#include <linux/completion.h> -#include <linux/cdrom.h> -#include <linux/kobject.h> -#include <linux/sysfs.h> -#include <linux/mempool.h> -#include <uapi/linux/pktcdvd.h> - -/* default bio write queue congestion marks */ -#define PKT_WRITE_CONGESTION_ON 10000 -#define PKT_WRITE_CONGESTION_OFF 9000 - - -struct packet_settings -{ - __u32 size; /* packet size in (512 byte) sectors */ - __u8 fp; /* fixed packets */ - __u8 link_loss; /* the rest is specified - * as per Mt Fuji */ - __u8 write_type; - __u8 track_mode; - __u8 block_mode; -}; - -/* - * Very crude stats for now - */ -struct packet_stats -{ - unsigned long pkt_started; - unsigned long pkt_ended; - unsigned long secs_w; - unsigned long secs_rg; - unsigned long secs_r; -}; - -struct packet_cdrw -{ - struct list_head pkt_free_list; - struct list_head pkt_active_list; - spinlock_t active_list_lock; /* Serialize access to pkt_active_list */ - struct task_struct *thread; - atomic_t pending_bios; -}; - -/* - * Switch to high speed reading after reading this many kilobytes - * with no interspersed writes. - */ -#define HI_SPEED_SWITCH 512 - -struct packet_iosched -{ - atomic_t attention; /* Set to non-zero when queue processing is needed */ - int writing; /* Non-zero when writing, zero when reading */ - spinlock_t lock; /* Protecting read/write queue manipulations */ - struct bio_list read_queue; - struct bio_list write_queue; - sector_t last_write; /* The sector where the last write ended */ - int successive_reads; -}; - -/* - * 32 buffers of 2048 bytes - */ -#if (PAGE_SIZE % CD_FRAMESIZE) != 0 -#error "PAGE_SIZE must be a multiple of CD_FRAMESIZE" -#endif -#define PACKET_MAX_SIZE 128 -#define FRAMES_PER_PAGE (PAGE_SIZE / CD_FRAMESIZE) -#define PACKET_MAX_SECTORS (PACKET_MAX_SIZE * CD_FRAMESIZE >> 9) - -enum packet_data_state { - PACKET_IDLE_STATE, /* Not used at the moment */ - PACKET_WAITING_STATE, /* Waiting for more bios to arrive, so */ - /* we don't have to do as much */ - /* data gathering */ - PACKET_READ_WAIT_STATE, /* Waiting for reads to fill in holes */ - PACKET_WRITE_WAIT_STATE, /* Waiting for the write to complete */ - PACKET_RECOVERY_STATE, /* Recover after read/write errors */ - PACKET_FINISHED_STATE, /* After write has finished */ - - PACKET_NUM_STATES /* Number of possible states */ -}; - -/* - * Information needed for writing a single packet - */ -struct pktcdvd_device; - -struct packet_data -{ - struct list_head list; - - spinlock_t lock; /* Lock protecting state transitions and */ - /* orig_bios list */ - - struct bio_list orig_bios; /* Original bios passed to pkt_make_request */ - /* that will be handled by this packet */ - int write_size; /* Total size of all bios in the orig_bios */ - /* list, measured in number of frames */ - - struct bio *w_bio; /* The bio we will send to the real CD */ - /* device once we have all data for the */ - /* packet we are going to write */ - sector_t sector; /* First sector in this packet */ - int frames; /* Number of frames in this packet */ - - enum packet_data_state state; /* Current state */ - atomic_t run_sm; /* Incremented whenever the state */ - /* machine needs to be run */ - long sleep_time; /* Set this to non-zero to make the state */ - /* machine run after this many jiffies. */ - - atomic_t io_wait; /* Number of pending IO operations */ - atomic_t io_errors; /* Number of read/write errors during IO */ - - struct bio *r_bios[PACKET_MAX_SIZE]; /* bios to use during data gathering */ - struct page *pages[PACKET_MAX_SIZE / FRAMES_PER_PAGE]; - - int cache_valid; /* If non-zero, the data for the zone defined */ - /* by the sector variable is completely cached */ - /* in the pages[] vector. */ - - int id; /* ID number for debugging */ - struct pktcdvd_device *pd; -}; - -struct pkt_rb_node { - struct rb_node rb_node; - struct bio *bio; -}; - -struct packet_stacked_data -{ - struct bio *bio; /* Original read request bio */ - struct pktcdvd_device *pd; -}; -#define PSD_POOL_SIZE 64 - -struct pktcdvd_device -{ - struct file *bdev_file; /* dev attached */ - /* handle acquired for bdev during pkt_open_dev() */ - struct file *f_open_bdev; - dev_t pkt_dev; /* our dev */ - struct packet_settings settings; - struct packet_stats stats; - int refcnt; /* Open count */ - int write_speed; /* current write speed, kB/s */ - int read_speed; /* current read speed, kB/s */ - unsigned long offset; /* start offset */ - __u8 mode_offset; /* 0 / 8 */ - __u8 type; - unsigned long flags; - __u16 mmc3_profile; - __u32 nwa; /* next writable address */ - __u32 lra; /* last recorded address */ - struct packet_cdrw cdrw; - wait_queue_head_t wqueue; - - spinlock_t lock; /* Serialize access to bio_queue */ - struct rb_root bio_queue; /* Work queue of bios we need to handle */ - int bio_queue_size; /* Number of nodes in bio_queue */ - bool congested; /* Someone is waiting for bio_queue_size - * to drop. */ - sector_t current_sector; /* Keep track of where the elevator is */ - atomic_t scan_queue; /* Set to non-zero when pkt_handle_queue */ - /* needs to be run. */ - mempool_t rb_pool; /* mempool for pkt_rb_node allocations */ - - struct packet_iosched iosched; - struct gendisk *disk; - - int write_congestion_off; - int write_congestion_on; - - struct device *dev; /* sysfs pktcdvd[0-7] dev */ - - struct dentry *dfs_d_root; /* debugfs: devname directory */ - struct dentry *dfs_f_info; /* debugfs: info file */ -}; - -#endif /* __PKTCDVD_H */ diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 1f4e4f2b89bb..c19b404e3d8d 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -2388,6 +2388,12 @@ enum motionsense_command { */ MOTIONSENSE_CMD_SENSOR_SCALE = 18, + /* + * Activity management + * Retrieve current status of given activity. + */ + MOTIONSENSE_CMD_GET_ACTIVITY = 20, + /* Number of motionsense sub-commands. */ MOTIONSENSE_NUM_CMDS }; @@ -2447,6 +2453,11 @@ enum motionsensor_orientation { MOTIONSENSE_ORIENTATION_UNKNOWN = 4, }; +struct ec_response_activity_data { + uint8_t activity; /* motionsensor_activity */ + uint8_t state; +} __ec_todo_packed; + struct ec_response_motion_sensor_data { /* Flags for each sensor. */ uint8_t flags; @@ -2460,8 +2471,7 @@ struct ec_response_motion_sensor_data { uint32_t timestamp; }; struct __ec_todo_unpacked { - uint8_t activity; /* motionsensor_activity */ - uint8_t state; + struct ec_response_activity_data activity_data; int16_t add_info[2]; }; }; @@ -2494,6 +2504,7 @@ enum motionsensor_activity { MOTIONSENSE_ACTIVITY_SIG_MOTION = 1, MOTIONSENSE_ACTIVITY_DOUBLE_TAP = 2, MOTIONSENSE_ACTIVITY_ORIENTATION = 3, + MOTIONSENSE_ACTIVITY_BODY_DETECTION = 4, }; struct ec_motion_sense_activity { @@ -2671,6 +2682,7 @@ struct ec_params_motion_sense { uint32_t max_data_vector; } fifo_read; + /* Used for MOTIONSENSE_CMD_SET_ACTIVITY */ struct ec_motion_sense_activity set_activity; /* Used for MOTIONSENSE_CMD_LID_ANGLE */ @@ -2716,6 +2728,12 @@ struct ec_params_motion_sense { */ int16_t hys_degree; } tablet_mode_threshold; + + /* Used for MOTIONSENSE_CMD_GET_ACTIVITY */ + struct __ec_todo_unpacked { + uint8_t sensor_num; + uint8_t activity; /* enum motionsensor_activity */ + } get_activity; }; } __ec_todo_packed; @@ -2833,6 +2851,10 @@ struct ec_response_motion_sense { uint16_t hys_degree; } tablet_mode_threshold; + /* USED for MOTIONSENSE_CMD_GET_ACTIVITY. */ + struct __ec_todo_unpacked { + uint8_t state; + } get_activity; }; } __ec_todo_packed; diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h index 0e0e8fe6975f..028781ad4059 100644 --- a/include/linux/platform_data/microchip-ksz.h +++ b/include/linux/platform_data/microchip-ksz.h @@ -23,6 +23,7 @@ #include <linux/platform_data/dsa.h> enum ksz_chip_id { + KSZ8463_CHIP_ID = 0x8463, KSZ8563_CHIP_ID = 0x8563, KSZ8795_CHIP_ID = 0x8795, KSZ8794_CHIP_ID = 0x8794, diff --git a/include/linux/platform_data/video-pxafb.h b/include/linux/platform_data/video-pxafb.h index 6333bac166a5..38c24c77ba43 100644 --- a/include/linux/platform_data/video-pxafb.h +++ b/include/linux/platform_data/video-pxafb.h @@ -150,7 +150,6 @@ struct pxafb_mach_info { }; void pxa_set_fb_info(struct device *, struct pxafb_mach_info *); -unsigned long pxafb_get_hsync_time(struct device *dev); /* smartpanel related */ #define SMART_CMD_A0 (0x1 << 8) diff --git a/include/linux/pm.h b/include/linux/pm.h index f0bd8fbae4f2..cc7b2dc28574 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -8,14 +8,15 @@ #ifndef _LINUX_PM_H #define _LINUX_PM_H +#include <linux/completion.h> #include <linux/export.h> -#include <linux/list.h> -#include <linux/workqueue.h> +#include <linux/hrtimer_types.h> +#include <linux/mutex.h> #include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/util_macros.h> #include <linux/wait.h> -#include <linux/timer.h> -#include <linux/hrtimer.h> -#include <linux/completion.h> +#include <linux/workqueue_types.h> /* * Callbacks for platform drivers to implement. @@ -683,6 +684,7 @@ struct dev_pm_info { bool smart_suspend:1; /* Owned by the PM core */ bool must_resume:1; /* Owned by the PM core */ bool may_skip_resume:1; /* Set by subsystems */ + bool strict_midlayer:1; #else bool should_wakeup:1; #endif @@ -720,6 +722,7 @@ struct dev_pm_info { struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ void (*set_latency_tolerance)(struct device *, s32); struct dev_pm_qos *qos; + bool detach_power_off:1; /* Owned by the driver core */ }; extern int dev_pm_get_subsys_data(struct device *dev); diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 0b18160901a2..c84edf217819 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -36,10 +36,16 @@ * isn't specified, the index just follows the * index for the attached PM domain. * + * PD_FLAG_ATTACH_POWER_ON: Power on the domain during attach. + * + * PD_FLAG_DETACH_POWER_OFF: Power off the domain during detach. + * */ #define PD_FLAG_NO_DEV_LINK BIT(0) #define PD_FLAG_DEV_LINK_ON BIT(1) #define PD_FLAG_REQUIRED_OPP BIT(2) +#define PD_FLAG_ATTACH_POWER_ON BIT(3) +#define PD_FLAG_DETACH_POWER_OFF BIT(4) struct dev_pm_domain_attach_data { const char * const *pd_names; @@ -104,6 +110,11 @@ struct dev_pm_domain_list { * GENPD_FLAG_DEV_NAME_FW: Instructs genpd to generate an unique device name * using ida. It is used by genpd providers which * get their genpd-names directly from FW. + * + * GENPD_FLAG_NO_SYNC_STATE: The ->sync_state() support is implemented in a + * genpd provider specific way, likely through a + * parent device node. This flag makes genpd to + * skip its internal support for this. */ #define GENPD_FLAG_PM_CLK (1U << 0) #define GENPD_FLAG_IRQ_SAFE (1U << 1) @@ -114,6 +125,7 @@ struct dev_pm_domain_list { #define GENPD_FLAG_MIN_RESIDENCY (1U << 6) #define GENPD_FLAG_OPP_TABLE_FW (1U << 7) #define GENPD_FLAG_DEV_NAME_FW (1U << 8) +#define GENPD_FLAG_NO_SYNC_STATE (1U << 9) enum gpd_status { GENPD_STATE_ON = 0, /* PM domain is on */ @@ -127,6 +139,12 @@ enum genpd_notication { GENPD_NOTIFY_ON, }; +enum genpd_sync_state { + GENPD_SYNC_STATE_OFF = 0, + GENPD_SYNC_STATE_SIMPLE, + GENPD_SYNC_STATE_ONECELL, +}; + struct dev_power_governor { bool (*power_down_ok)(struct dev_pm_domain *domain); bool (*suspend_ok)(struct device *dev); @@ -187,6 +205,8 @@ struct generic_pm_domain { unsigned int performance_state; /* Aggregated max performance state */ cpumask_var_t cpus; /* A cpumask of the attached CPUs */ bool synced_poweroff; /* A consumer needs a synced poweroff */ + bool stay_on; /* Stay powered-on during boot. */ + enum genpd_sync_state sync_state; /* How sync_state is managed. */ int (*power_off)(struct generic_pm_domain *domain); int (*power_on)(struct generic_pm_domain *domain); struct raw_notifier_head power_notifiers; /* Power on/off notifiers */ @@ -301,6 +321,7 @@ void dev_pm_genpd_synced_poweroff(struct device *dev); int dev_pm_genpd_set_hwmode(struct device *dev, bool enable); bool dev_pm_genpd_get_hwmode(struct device *dev); int dev_pm_genpd_rpm_always_on(struct device *dev, bool on); +bool dev_pm_genpd_is_on(struct device *dev); extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; @@ -393,6 +414,11 @@ static inline int dev_pm_genpd_rpm_always_on(struct device *dev, bool on) return -EOPNOTSUPP; } +static inline bool dev_pm_genpd_is_on(struct device *dev) +{ + return false; +} + #define simple_qos_governor (*(struct dev_power_governor *)(NULL)) #define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL)) #endif @@ -431,6 +457,7 @@ int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec, struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); int of_genpd_parse_idle_states(struct device_node *dn, struct genpd_power_state **states, int *n); +void of_genpd_sync_state(struct device_node *np); int genpd_dev_pm_attach(struct device *dev); struct device *genpd_dev_pm_attach_by_id(struct device *dev, @@ -476,6 +503,8 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn, return -ENODEV; } +static inline void of_genpd_sync_state(struct device_node *np) {} + static inline int genpd_dev_pm_attach(struct device *dev) { return 0; @@ -501,7 +530,7 @@ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ #ifdef CONFIG_PM -int dev_pm_domain_attach(struct device *dev, bool power_on); +int dev_pm_domain_attach(struct device *dev, u32 flags); struct device *dev_pm_domain_attach_by_id(struct device *dev, unsigned int index); struct device *dev_pm_domain_attach_by_name(struct device *dev, @@ -518,7 +547,7 @@ int dev_pm_domain_start(struct device *dev); void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd); int dev_pm_domain_set_performance_state(struct device *dev, unsigned int state); #else -static inline int dev_pm_domain_attach(struct device *dev, bool power_on) +static inline int dev_pm_domain_attach(struct device *dev, u32 flags) { return 0; } diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index e7cb70fcc0af..d88d6b6ccf5b 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -66,9 +66,7 @@ static inline bool queue_pm_work(struct work_struct *work) extern int pm_generic_runtime_suspend(struct device *dev); extern int pm_generic_runtime_resume(struct device *dev); -extern bool pm_runtime_need_not_resume(struct device *dev); extern int pm_runtime_force_suspend(struct device *dev); -extern int pm_runtime_force_resume(struct device *dev); extern int __pm_runtime_idle(struct device *dev, int rpmflags); extern int __pm_runtime_suspend(struct device *dev, int rpmflags); @@ -257,9 +255,7 @@ static inline bool queue_pm_work(struct work_struct *work) { return false; } static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } -static inline bool pm_runtime_need_not_resume(struct device *dev) {return true; } static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } -static inline int pm_runtime_force_resume(struct device *dev) { return 0; } static inline int __pm_runtime_idle(struct device *dev, int rpmflags) { @@ -330,6 +326,18 @@ static inline void pm_runtime_release_supplier(struct device_link *link) {} #endif /* !CONFIG_PM */ +#ifdef CONFIG_PM_SLEEP + +bool pm_runtime_need_not_resume(struct device *dev); +int pm_runtime_force_resume(struct device *dev); + +#else /* !CONFIG_PM_SLEEP */ + +static inline bool pm_runtime_need_not_resume(struct device *dev) {return true; } +static inline int pm_runtime_force_resume(struct device *dev) { return -ENXIO; } + +#endif /* CONFIG_PM_SLEEP */ + /** * pm_runtime_idle - Conditionally set up autosuspend of a device or suspend it. * @dev: Target device. @@ -337,6 +345,20 @@ static inline void pm_runtime_release_supplier(struct device_link *link) {} * Invoke the "idle check" callback of @dev and, depending on its return value, * set up autosuspend of @dev or suspend it (depending on whether or not * autosuspend has been enabled for it). + * + * Return: + * * 0: Success. + * * -EINVAL: Runtime PM error. + * * -EACCES: Runtime PM disabled. + * * -EAGAIN: Runtime PM usage_count non-zero, Runtime PM status change ongoing + * or device not in %RPM_ACTIVE state. + * * -EBUSY: Runtime PM child_count non-zero. + * * -EPERM: Device PM QoS resume latency 0. + * * -EINPROGRESS: Suspend already in progress. + * * -ENOSYS: CONFIG_PM not enabled. + * * 1: Device already suspended. + * Other values and conditions for the above values are possible as returned by + * Runtime PM idle and suspend callbacks. */ static inline int pm_runtime_idle(struct device *dev) { @@ -346,6 +368,18 @@ static inline int pm_runtime_idle(struct device *dev) /** * pm_runtime_suspend - Suspend a device synchronously. * @dev: Target device. + * + * Return: + * * 0: Success. + * * -EINVAL: Runtime PM error. + * * -EACCES: Runtime PM disabled. + * * -EAGAIN: Runtime PM usage_count non-zero or Runtime PM status change ongoing. + * * -EBUSY: Runtime PM child_count non-zero. + * * -EPERM: Device PM QoS resume latency 0. + * * -ENOSYS: CONFIG_PM not enabled. + * * 1: Device already suspended. + * Other values and conditions for the above values are possible as returned by + * Runtime PM suspend callbacks. */ static inline int pm_runtime_suspend(struct device *dev) { @@ -353,14 +387,29 @@ static inline int pm_runtime_suspend(struct device *dev) } /** - * pm_runtime_autosuspend - Set up autosuspend of a device or suspend it. + * pm_runtime_autosuspend - Update the last access time and set up autosuspend + * of a device. * @dev: Target device. * - * Set up autosuspend of @dev or suspend it (depending on whether or not - * autosuspend is enabled for it) without engaging its "idle check" callback. + * First update the last access time, then set up autosuspend of @dev or suspend + * it (depending on whether or not autosuspend is enabled for it) without + * engaging its "idle check" callback. + * + * Return: + * * 0: Success. + * * -EINVAL: Runtime PM error. + * * -EACCES: Runtime PM disabled. + * * -EAGAIN: Runtime PM usage_count non-zero or Runtime PM status change ongoing. + * * -EBUSY: Runtime PM child_count non-zero. + * * -EPERM: Device PM QoS resume latency 0. + * * -ENOSYS: CONFIG_PM not enabled. + * * 1: Device already suspended. + * Other values and conditions for the above values are possible as returned by + * Runtime PM suspend callbacks. */ static inline int pm_runtime_autosuspend(struct device *dev) { + pm_runtime_mark_last_busy(dev); return __pm_runtime_suspend(dev, RPM_AUTO); } @@ -379,6 +428,18 @@ static inline int pm_runtime_resume(struct device *dev) * * Queue up a work item to run an equivalent of pm_runtime_idle() for @dev * asynchronously. + * + * Return: + * * 0: Success. + * * -EINVAL: Runtime PM error. + * * -EACCES: Runtime PM disabled. + * * -EAGAIN: Runtime PM usage_count non-zero, Runtime PM status change ongoing + * or device not in %RPM_ACTIVE state. + * * -EBUSY: Runtime PM child_count non-zero. + * * -EPERM: Device PM QoS resume latency 0. + * * -EINPROGRESS: Suspend already in progress. + * * -ENOSYS: CONFIG_PM not enabled. + * * 1: Device already suspended. */ static inline int pm_request_idle(struct device *dev) { @@ -395,14 +456,27 @@ static inline int pm_request_resume(struct device *dev) } /** - * pm_request_autosuspend - Queue up autosuspend of a device. + * pm_request_autosuspend - Update the last access time and queue up autosuspend + * of a device. * @dev: Target device. * - * Queue up a work item to run an equivalent pm_runtime_autosuspend() for @dev - * asynchronously. + * Update the last access time of a device and queue up a work item to run an + * equivalent pm_runtime_autosuspend() for @dev asynchronously. + * + * Return: + * * 0: Success. + * * -EINVAL: Runtime PM error. + * * -EACCES: Runtime PM disabled. + * * -EAGAIN: Runtime PM usage_count non-zero or Runtime PM status change ongoing. + * * -EBUSY: Runtime PM child_count non-zero. + * * -EPERM: Device PM QoS resume latency 0. + * * -EINPROGRESS: Suspend already in progress. + * * -ENOSYS: CONFIG_PM not enabled. + * * 1: Device already suspended. */ static inline int pm_request_autosuspend(struct device *dev) { + pm_runtime_mark_last_busy(dev); return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO); } @@ -464,6 +538,17 @@ static inline int pm_runtime_resume_and_get(struct device *dev) * * Decrement the runtime PM usage counter of @dev and if it turns out to be * equal to 0, queue up a work item for @dev like in pm_request_idle(). + * + * Return: + * * 0: Success. + * * -EINVAL: Runtime PM error. + * * -EACCES: Runtime PM disabled. + * * -EAGAIN: Runtime PM usage_count non-zero or Runtime PM status change ongoing. + * * -EBUSY: Runtime PM child_count non-zero. + * * -EPERM: Device PM QoS resume latency 0. + * * -EINPROGRESS: Suspend already in progress. + * * -ENOSYS: CONFIG_PM not enabled. + * * 1: Device already suspended. */ static inline int pm_runtime_put(struct device *dev) { @@ -478,6 +563,17 @@ DEFINE_FREE(pm_runtime_put, struct device *, if (_T) pm_runtime_put(_T)) * * Decrement the runtime PM usage counter of @dev and if it turns out to be * equal to 0, queue up a work item for @dev like in pm_request_autosuspend(). + * + * Return: + * * 0: Success. + * * -EINVAL: Runtime PM error. + * * -EACCES: Runtime PM disabled. + * * -EAGAIN: Runtime PM usage_count non-zero or Runtime PM status change ongoing. + * * -EBUSY: Runtime PM child_count non-zero. + * * -EPERM: Device PM QoS resume latency 0. + * * -EINPROGRESS: Suspend already in progress. + * * -ENOSYS: CONFIG_PM not enabled. + * * 1: Device already suspended. */ static inline int __pm_runtime_put_autosuspend(struct device *dev) { @@ -485,16 +581,29 @@ static inline int __pm_runtime_put_autosuspend(struct device *dev) } /** - * pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0. + * pm_runtime_put_autosuspend - Update the last access time of a device, drop + * its usage counter and queue autosuspend if the usage counter becomes 0. * @dev: Target device. * - * Decrement the runtime PM usage counter of @dev and if it turns out to be - * equal to 0, queue up a work item for @dev like in pm_request_autosuspend(). + * Update the last access time of @dev, decrement runtime PM usage counter of + * @dev and if it turns out to be equal to 0, queue up a work item for @dev like + * in pm_request_autosuspend(). + * + * Return: + * * 0: Success. + * * -EINVAL: Runtime PM error. + * * -EACCES: Runtime PM disabled. + * * -EAGAIN: Runtime PM usage_count non-zero or Runtime PM status change ongoing. + * * -EBUSY: Runtime PM child_count non-zero. + * * -EPERM: Device PM QoS resume latency 0. + * * -EINPROGRESS: Suspend already in progress. + * * -ENOSYS: CONFIG_PM not enabled. + * * 1: Device already suspended. */ static inline int pm_runtime_put_autosuspend(struct device *dev) { - return __pm_runtime_suspend(dev, - RPM_GET_PUT | RPM_ASYNC | RPM_AUTO); + pm_runtime_mark_last_busy(dev); + return __pm_runtime_put_autosuspend(dev); } /** @@ -506,9 +615,20 @@ static inline int pm_runtime_put_autosuspend(struct device *dev) * return value, set up autosuspend of @dev or suspend it (depending on whether * or not autosuspend has been enabled for it). * - * The possible return values of this function are the same as for - * pm_runtime_idle() and the runtime PM usage counter of @dev remains - * decremented in all cases, even if it returns an error code. + * The runtime PM usage counter of @dev remains decremented in all cases, even + * if it returns an error code. + * + * Return: + * * 0: Success. + * * -EINVAL: Runtime PM error. + * * -EACCES: Runtime PM disabled. + * * -EAGAIN: Runtime PM usage_count non-zero or Runtime PM status change ongoing. + * * -EBUSY: Runtime PM child_count non-zero. + * * -EPERM: Device PM QoS resume latency 0. + * * -ENOSYS: CONFIG_PM not enabled. + * * 1: Device already suspended. + * Other values and conditions for the above values are possible as returned by + * Runtime PM suspend callbacks. */ static inline int pm_runtime_put_sync(struct device *dev) { @@ -522,9 +642,21 @@ static inline int pm_runtime_put_sync(struct device *dev) * Decrement the runtime PM usage counter of @dev and if it turns out to be * equal to 0, carry out runtime-suspend of @dev synchronously. * - * The possible return values of this function are the same as for - * pm_runtime_suspend() and the runtime PM usage counter of @dev remains - * decremented in all cases, even if it returns an error code. + * The runtime PM usage counter of @dev remains decremented in all cases, even + * if it returns an error code. + * + * Return: + * * 0: Success. + * * -EINVAL: Runtime PM error. + * * -EACCES: Runtime PM disabled. + * * -EAGAIN: Runtime PM usage_count non-zero or Runtime PM status change ongoing. + * * -EAGAIN: usage_count non-zero or Runtime PM status change ongoing. + * * -EBUSY: Runtime PM child_count non-zero. + * * -EPERM: Device PM QoS resume latency 0. + * * -ENOSYS: CONFIG_PM not enabled. + * * 1: Device already suspended. + * Other values and conditions for the above values are possible as returned by + * Runtime PM suspend callbacks. */ static inline int pm_runtime_put_sync_suspend(struct device *dev) { @@ -532,19 +664,34 @@ static inline int pm_runtime_put_sync_suspend(struct device *dev) } /** - * pm_runtime_put_sync_autosuspend - Drop device usage counter and autosuspend if 0. + * pm_runtime_put_sync_autosuspend - Update the last access time of a device, + * drop device usage counter and autosuspend if 0. * @dev: Target device. * - * Decrement the runtime PM usage counter of @dev and if it turns out to be - * equal to 0, set up autosuspend of @dev or suspend it synchronously (depending - * on whether or not autosuspend has been enabled for it). + * Update the last access time of @dev, decrement the runtime PM usage counter + * of @dev and if it turns out to be equal to 0, set up autosuspend of @dev or + * suspend it synchronously (depending on whether or not autosuspend has been + * enabled for it). * - * The possible return values of this function are the same as for - * pm_runtime_autosuspend() and the runtime PM usage counter of @dev remains - * decremented in all cases, even if it returns an error code. + * The runtime PM usage counter of @dev remains decremented in all cases, even + * if it returns an error code. + * + * Return: + * * 0: Success. + * * -EINVAL: Runtime PM error. + * * -EACCES: Runtime PM disabled. + * * -EAGAIN: Runtime PM usage_count non-zero or Runtime PM status change ongoing. + * * -EBUSY: Runtime PM child_count non-zero. + * * -EPERM: Device PM QoS resume latency 0. + * * -EINPROGRESS: Suspend already in progress. + * * -ENOSYS: CONFIG_PM not enabled. + * * 1: Device already suspended. + * Other values and conditions for the above values are possible as returned by + * Runtime PM suspend callbacks. */ static inline int pm_runtime_put_sync_autosuspend(struct device *dev) { + pm_runtime_mark_last_busy(dev); return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO); } diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index dd48c64b605e..4d3dbcef379e 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -37,6 +37,11 @@ static inline int clockid_to_fd(const clockid_t clk) return ~(clk >> 3); } +static inline bool clockid_aux_valid(clockid_t id) +{ + return IS_ENABLED(CONFIG_POSIX_AUX_CLOCKS) && id >= CLOCK_AUX && id <= CLOCK_AUX_LAST; +} + #ifdef CONFIG_POSIX_TIMERS #include <linux/signal_types.h> diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 7803edaa8ff8..0cca01b5607b 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -888,15 +888,23 @@ static inline int power_supply_is_system_supplied(void) { return -ENOSYS; } extern int power_supply_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val); +int power_supply_get_property_direct(struct power_supply *psy, enum power_supply_property psp, + union power_supply_propval *val); #if IS_ENABLED(CONFIG_POWER_SUPPLY) extern int power_supply_set_property(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *val); +int power_supply_set_property_direct(struct power_supply *psy, enum power_supply_property psp, + const union power_supply_propval *val); #else static inline int power_supply_set_property(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *val) { return 0; } +static inline int power_supply_set_property_direct(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val) +{ return 0; } #endif extern void power_supply_external_power_changed(struct power_supply *psy); diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h index c7abce28ed29..aab0aebb529e 100644 --- a/include/linux/pps_kernel.h +++ b/include/linux/pps_kernel.h @@ -52,6 +52,7 @@ struct pps_device { int current_mode; /* PPS mode at event time */ unsigned int last_ev; /* last PPS event id */ + unsigned int last_fetched_ev; /* last fetched PPS event id */ wait_queue_head_t queue; /* PPS event queue */ unsigned int id; /* PPS source unique ID */ diff --git a/include/linux/preempt.h b/include/linux/preempt.h index b0af8d4ef6e6..1fad1c8a4c76 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -369,8 +369,6 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier, #endif -#ifdef CONFIG_SMP - /* * Migrate-Disable and why it is undesired. * @@ -429,13 +427,6 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier, extern void migrate_disable(void); extern void migrate_enable(void); -#else - -static inline void migrate_disable(void) { } -static inline void migrate_enable(void) { } - -#endif /* CONFIG_SMP */ - /** * preempt_disable_nested - Disable preemption inside a normally preempt disabled section * diff --git a/include/linux/printk.h b/include/linux/printk.h index 5b462029d03c..5d22b803f51e 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -154,6 +154,8 @@ int vprintk_emit(int facility, int level, asmlinkage __printf(1, 0) int vprintk(const char *fmt, va_list args); +__printf(1, 0) +int vprintk_deferred(const char *fmt, va_list args); asmlinkage __printf(1, 2) __cold int _printk(const char *fmt, ...); @@ -214,6 +216,11 @@ int vprintk(const char *s, va_list args) { return 0; } +static inline __printf(1, 0) +int vprintk_deferred(const char *fmt, va_list args) +{ + return 0; +} static inline __printf(1, 2) __cold int _printk(const char *s, ...) { diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index ea62201c74c4..de1d24f19f76 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -27,6 +27,8 @@ enum { PROC_ENTRY_proc_read_iter = 1U << 1, PROC_ENTRY_proc_compat_ioctl = 1U << 2, + + PROC_ENTRY_FORCE_LOOKUP = 1U << 7, }; struct proc_ops { diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h index 5ea470eb4d76..4b20375f3783 100644 --- a/include/linux/proc_ns.h +++ b/include/linux/proc_ns.h @@ -6,6 +6,7 @@ #define _LINUX_PROC_NS_H #include <linux/ns_common.h> +#include <uapi/linux/nsfs.h> struct pid_namespace; struct nsset; @@ -39,13 +40,14 @@ extern const struct proc_ns_operations timens_for_children_operations; * We always define these enumerators */ enum { - PROC_ROOT_INO = 1, - PROC_IPC_INIT_INO = 0xEFFFFFFFU, - PROC_UTS_INIT_INO = 0xEFFFFFFEU, - PROC_USER_INIT_INO = 0xEFFFFFFDU, - PROC_PID_INIT_INO = 0xEFFFFFFCU, - PROC_CGROUP_INIT_INO = 0xEFFFFFFBU, - PROC_TIME_INIT_INO = 0xEFFFFFFAU, + PROC_IPC_INIT_INO = IPC_NS_INIT_INO, + PROC_UTS_INIT_INO = UTS_NS_INIT_INO, + PROC_USER_INIT_INO = USER_NS_INIT_INO, + PROC_PID_INIT_INO = PID_NS_INIT_INO, + PROC_CGROUP_INIT_INO = CGROUP_NS_INIT_INO, + PROC_TIME_INIT_INO = TIME_NS_INIT_INO, + PROC_NET_INIT_INO = NET_NS_INIT_INO, + PROC_MNT_INIT_INO = MNT_NS_INIT_INO, }; #ifdef CONFIG_PROC_FS diff --git a/include/linux/property.h b/include/linux/property.h index f718dd4789e5..82f0cb3abd1e 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -17,6 +17,7 @@ #include <linux/fwnode.h> #include <linux/stddef.h> #include <linux/types.h> +#include <linux/util_macros.h> struct device; @@ -169,7 +170,7 @@ struct fwnode_handle *fwnode_get_next_available_child_node( #define fwnode_for_each_named_child_node(fwnode, child, name) \ fwnode_for_each_child_node(fwnode, child) \ - if (!fwnode_name_eq(child, name)) { } else + for_each_if(fwnode_name_eq(child, name)) #define fwnode_for_each_available_child_node(fwnode, child) \ for (child = fwnode_get_next_available_child_node(fwnode, NULL); child;\ @@ -184,7 +185,7 @@ struct fwnode_handle *device_get_next_child_node(const struct device *dev, #define device_for_each_named_child_node(dev, child, name) \ device_for_each_child_node(dev, child) \ - if (!fwnode_name_eq(child, name)) { } else + for_each_if(fwnode_name_eq(child, name)) #define device_for_each_child_node_scoped(dev, child) \ for (struct fwnode_handle *child __free(fwnode_handle) = \ @@ -193,7 +194,7 @@ struct fwnode_handle *device_get_next_child_node(const struct device *dev, #define device_for_each_named_child_node_scoped(dev, child, name) \ device_for_each_child_node_scoped(dev, child) \ - if (!fwnode_name_eq(child, name)) { } else + for_each_if(fwnode_name_eq(child, name)) struct fwnode_handle *fwnode_get_named_child_node(const struct fwnode_handle *fwnode, const char *childname); diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h index c773eeb92d04..4e5696cfade7 100644 --- a/include/linux/pse-pd/pse.h +++ b/include/linux/pse-pd/pse.h @@ -6,13 +6,18 @@ #define _LINUX_PSE_CONTROLLER_H #include <linux/list.h> +#include <linux/netlink.h> +#include <linux/kfifo.h> #include <uapi/linux/ethtool.h> +#include <uapi/linux/ethtool_netlink_generated.h> +#include <linux/regulator/driver.h> /* Maximum current in uA according to IEEE 802.3-2022 Table 145-1 */ #define MAX_PI_CURRENT 1920000 /* Maximum power in mW according to IEEE 802.3-2022 Table 145-16 */ #define MAX_PI_PW 99900 +struct net_device; struct phy_device; struct pse_controller_dev; struct netlink_ext_ack; @@ -38,6 +43,19 @@ struct ethtool_c33_pse_pw_limit_range { }; /** + * struct pse_irq_desc - notification sender description for IRQ based events. + * + * @name: the visible name for the IRQ + * @map_event: driver callback to map IRQ status into PSE devices with events. + */ +struct pse_irq_desc { + const char *name; + int (*map_event)(int irq, struct pse_controller_dev *pcdev, + unsigned long *notifs, + unsigned long *notifs_mask); +}; + +/** * struct pse_control_config - PSE control/channel configuration. * * @podl_admin_control: set PoDL PSE admin control as described in @@ -98,6 +116,7 @@ struct pse_pw_limit_ranges { /** * struct ethtool_pse_control_status - PSE control/channel status. * + * @pw_d_id: PSE power domain index. * @podl_admin_state: operational state of the PoDL PSE * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState * @podl_pw_status: power detection status of the PoDL PSE. @@ -117,8 +136,12 @@ struct pse_pw_limit_ranges { * is in charge of the memory allocation * @c33_pw_limit_nb_ranges: number of supported power limit configuration * ranges + * @prio_max: max priority allowed for the c33_prio variable value. + * @prio: priority of the PSE. Managed by PSE core in case of static budget + * evaluation strategy. */ struct ethtool_pse_control_status { + u32 pw_d_id; enum ethtool_podl_pse_admin_state podl_admin_state; enum ethtool_podl_pse_pw_d_status podl_pw_status; enum ethtool_c33_pse_admin_state c33_admin_state; @@ -129,12 +152,20 @@ struct ethtool_pse_control_status { u32 c33_avail_pw_limit; struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges; u32 c33_pw_limit_nb_ranges; + u32 prio_max; + u32 prio; }; /** * struct pse_controller_ops - PSE controller driver callbacks * - * @setup_pi_matrix: setup PI matrix of the PSE controller + * @setup_pi_matrix: Setup PI matrix of the PSE controller. + * The PSE PIs devicetree nodes have already been parsed by + * of_load_pse_pis() and the pcdev->pi[x]->pairset[y].np + * populated. This callback should establish the + * relationship between the PSE controller hardware ports + * and the PSE Power Interfaces, either through software + * mapping or hardware configuration. * @pi_get_admin_state: Get the operational state of the PSE PI. This ops * is mandatory. * @pi_get_pw_status: Get the power detection status of the PSE PI. This @@ -152,6 +183,11 @@ struct ethtool_pse_control_status { * range. The driver is in charge of the memory * allocation and should return the number of * ranges. + * @pi_get_prio: Get the PSE PI priority. + * @pi_set_prio: Configure the PSE PI priority. + * @pi_get_pw_req: Get the power requested by a PD before enabling the PSE PI. + * This is only relevant when an interrupt is registered using + * devm_pse_irq_helper helper. */ struct pse_controller_ops { int (*setup_pi_matrix)(struct pse_controller_dev *pcdev); @@ -172,6 +208,10 @@ struct pse_controller_ops { int id, int max_mW); int (*pi_get_pw_limit_ranges)(struct pse_controller_dev *pcdev, int id, struct pse_pw_limit_ranges *pw_limit_ranges); + int (*pi_get_prio)(struct pse_controller_dev *pcdev, int id); + int (*pi_set_prio)(struct pse_controller_dev *pcdev, int id, + unsigned int prio); + int (*pi_get_pw_req)(struct pse_controller_dev *pcdev, int id); }; struct module; @@ -206,12 +246,35 @@ struct pse_pi_pairset { * @np: device node pointer of the PSE PI node * @rdev: regulator represented by the PSE PI * @admin_state_enabled: PI enabled state + * @pw_d: Power domain of the PSE PI + * @prio: Priority of the PSE PI. Used in static budget evaluation strategy + * @isr_pd_detected: PSE PI detection status managed by the interruption + * handler. This variable is relevant when the power enabled + * management is managed in software like the static + * budget evaluation strategy. + * @pw_allocated_mW: Power allocated to a PSE PI to manage power budget in + * static budget evaluation strategy. */ struct pse_pi { struct pse_pi_pairset pairset[2]; struct device_node *np; struct regulator_dev *rdev; bool admin_state_enabled; + struct pse_power_domain *pw_d; + int prio; + bool isr_pd_detected; + int pw_allocated_mW; +}; + +/** + * struct pse_ntf - PSE notification element + * + * @id: ID of the PSE control + * @notifs: PSE notifications to be reported + */ +struct pse_ntf { + int id; + unsigned long notifs; }; /** @@ -228,6 +291,13 @@ struct pse_pi { * @types: types of the PSE controller * @pi: table of PSE PIs described in this controller device * @no_of_pse_pi: flag set if the pse_pis devicetree node is not used + * @irq: PSE interrupt + * @pis_prio_max: Maximum value allowed for the PSE PIs priority + * @supp_budget_eval_strategies: budget evaluation strategies supported + * by the PSE + * @ntf_work: workqueue for PSE notification management + * @ntf_fifo: PSE notifications FIFO + * @ntf_fifo_lock: protect @ntf_fifo writer */ struct pse_controller_dev { const struct pse_controller_ops *ops; @@ -241,6 +311,30 @@ struct pse_controller_dev { enum ethtool_pse_types types; struct pse_pi *pi; bool no_of_pse_pi; + int irq; + unsigned int pis_prio_max; + u32 supp_budget_eval_strategies; + struct work_struct ntf_work; + DECLARE_KFIFO_PTR(ntf_fifo, struct pse_ntf); + spinlock_t ntf_fifo_lock; /* Protect @ntf_fifo writer */ +}; + +/** + * enum pse_budget_eval_strategies - PSE budget evaluation strategies. + * @PSE_BUDGET_EVAL_STRAT_DISABLED: Budget evaluation strategy disabled. + * @PSE_BUDGET_EVAL_STRAT_STATIC: PSE static budget evaluation strategy. + * Budget evaluation strategy based on the power requested during PD + * classification. This strategy is managed by the PSE core. + * @PSE_BUDGET_EVAL_STRAT_DYNAMIC: PSE dynamic budget evaluation + * strategy. Budget evaluation strategy based on the current consumption + * per ports compared to the total power budget. This mode is managed by + * the PSE controller. + */ + +enum pse_budget_eval_strategies { + PSE_BUDGET_EVAL_STRAT_DISABLED = 1 << 0, + PSE_BUDGET_EVAL_STRAT_STATIC = 1 << 1, + PSE_BUDGET_EVAL_STRAT_DYNAMIC = 1 << 2, }; #if IS_ENABLED(CONFIG_PSE_CONTROLLER) @@ -249,8 +343,11 @@ void pse_controller_unregister(struct pse_controller_dev *pcdev); struct device; int devm_pse_controller_register(struct device *dev, struct pse_controller_dev *pcdev); +int devm_pse_irq_helper(struct pse_controller_dev *pcdev, int irq, + int irq_flags, const struct pse_irq_desc *d); -struct pse_control *of_pse_control_get(struct device_node *node); +struct pse_control *of_pse_control_get(struct device_node *node, + struct phy_device *phydev); void pse_control_put(struct pse_control *psec); int pse_ethtool_get_status(struct pse_control *psec, @@ -262,13 +359,17 @@ int pse_ethtool_set_config(struct pse_control *psec, int pse_ethtool_set_pw_limit(struct pse_control *psec, struct netlink_ext_ack *extack, const unsigned int pw_limit); +int pse_ethtool_set_prio(struct pse_control *psec, + struct netlink_ext_ack *extack, + unsigned int prio); bool pse_has_podl(struct pse_control *psec); bool pse_has_c33(struct pse_control *psec); #else -static inline struct pse_control *of_pse_control_get(struct device_node *node) +static inline struct pse_control *of_pse_control_get(struct device_node *node, + struct phy_device *phydev) { return ERR_PTR(-ENOENT); } @@ -298,6 +399,13 @@ static inline int pse_ethtool_set_pw_limit(struct pse_control *psec, return -EOPNOTSUPP; } +static inline int pse_ethtool_set_prio(struct pse_control *psec, + struct netlink_ext_ack *extack, + unsigned int prio) +{ + return -EOPNOTSUPP; +} + static inline bool pse_has_podl(struct pse_control *psec) { return false; diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h index f1fd3a8044e0..dd10c22299ab 100644 --- a/include/linux/psi_types.h +++ b/include/linux/psi_types.h @@ -84,11 +84,9 @@ enum psi_aggregators { struct psi_group_cpu { /* 1st cacheline updated by the scheduler */ - /* Aggregator needs to know of concurrent changes */ - seqcount_t seq ____cacheline_aligned_in_smp; - /* States of the tasks belonging to this group */ - unsigned int tasks[NR_PSI_TASK_COUNTS]; + unsigned int tasks[NR_PSI_TASK_COUNTS] + ____cacheline_aligned_in_smp; /* Aggregate pressure state derived from the tasks */ u32 state_mask; diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index eced7e9bf69a..3d089bd4d5e9 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -477,40 +477,14 @@ static inline ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, static inline void ptp_read_system_prets(struct ptp_system_timestamp *sts) { - if (sts) { - switch (sts->clockid) { - case CLOCK_REALTIME: - ktime_get_real_ts64(&sts->pre_ts); - break; - case CLOCK_MONOTONIC: - ktime_get_ts64(&sts->pre_ts); - break; - case CLOCK_MONOTONIC_RAW: - ktime_get_raw_ts64(&sts->pre_ts); - break; - default: - break; - } - } + if (sts) + ktime_get_clock_ts64(sts->clockid, &sts->pre_ts); } static inline void ptp_read_system_postts(struct ptp_system_timestamp *sts) { - if (sts) { - switch (sts->clockid) { - case CLOCK_REALTIME: - ktime_get_real_ts64(&sts->post_ts); - break; - case CLOCK_MONOTONIC: - ktime_get_ts64(&sts->post_ts); - break; - case CLOCK_MONOTONIC_RAW: - ktime_get_raw_ts64(&sts->post_ts); - break; - default: - break; - } - } + if (sts) + ktime_get_clock_ts64(sts->clockid, &sts->post_ts); } #endif diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 63a17d2b4ec8..8cafc483db53 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -2,6 +2,7 @@ #ifndef __LINUX_PWM_H #define __LINUX_PWM_H +#include <linux/cdev.h> #include <linux/device.h> #include <linux/err.h> #include <linux/module.h> @@ -273,6 +274,8 @@ struct pwm_capture { unsigned int duty_cycle; }; +#define PWM_WFHWSIZE 20 + /** * struct pwm_ops - PWM controller operations * @request: optional hook for requesting a PWM @@ -311,6 +314,7 @@ struct pwm_ops { /** * struct pwm_chip - abstract a PWM controller * @dev: device providing the PWMs + * @cdev: &struct cdev for this device * @ops: callbacks for this PWM controller * @owner: module providing this chip * @id: unique number of this PWM chip @@ -325,6 +329,7 @@ struct pwm_ops { */ struct pwm_chip { struct device dev; + struct cdev cdev; const struct pwm_ops *ops; struct module *owner; unsigned int id; diff --git a/include/linux/pwrseq/provider.h b/include/linux/pwrseq/provider.h index cbc3607cbfcf..33b3d2c2e39d 100644 --- a/include/linux/pwrseq/provider.h +++ b/include/linux/pwrseq/provider.h @@ -13,6 +13,9 @@ struct pwrseq_device; typedef int (*pwrseq_power_state_func)(struct pwrseq_device *); typedef int (*pwrseq_match_func)(struct pwrseq_device *, struct device *); +#define PWRSEQ_NO_MATCH 0 +#define PWRSEQ_MATCH_OK 1 + /** * struct pwrseq_unit_data - Configuration of a single power sequencing * unit. diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 06cc8888199e..c334f82ed385 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -19,7 +19,7 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb) return &sb->s_dquot; } -/* i_mutex must being held */ +/* i_rwsem must being held */ static inline bool is_quota_modification(struct mnt_idmap *idmap, struct inode *inode, struct iattr *ia) { diff --git a/include/linux/ref_tracker.h b/include/linux/ref_tracker.h index 8eac4f3d5254..d10563afd91c 100644 --- a/include/linux/ref_tracker.h +++ b/include/linux/ref_tracker.h @@ -6,6 +6,8 @@ #include <linux/spinlock.h> #include <linux/stackdepot.h> +#define __ostream_printf __printf(2, 3) + struct ref_tracker; struct ref_tracker_dir { @@ -17,15 +19,45 @@ struct ref_tracker_dir { bool dead; struct list_head list; /* List of active trackers */ struct list_head quarantine; /* List of dead trackers */ - char name[32]; + const char *class; /* object classname */ #endif }; #ifdef CONFIG_REF_TRACKER +#ifdef CONFIG_DEBUG_FS + +void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir); +void ref_tracker_dir_symlink(struct ref_tracker_dir *dir, const char *fmt, ...); + +#else /* CONFIG_DEBUG_FS */ + +static inline void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir) +{ +} + +static inline __ostream_printf +void ref_tracker_dir_symlink(struct ref_tracker_dir *dir, const char *fmt, ...) +{ +} + +#endif /* CONFIG_DEBUG_FS */ + +/** + * ref_tracker_dir_init - initialize a ref_tracker dir + * @dir: ref_tracker_dir to be initialized + * @quarantine_count: max number of entries to be tracked + * @class: pointer to static string that describes object type + * + * Initialize a ref_tracker_dir. If debugfs is configured, then a file + * will also be created for it under the top-level ref_tracker debugfs + * directory. + * + * Note that @class must point to a static string. + */ static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir, unsigned int quarantine_count, - const char *name) + const char *class) { INIT_LIST_HEAD(&dir->list); INIT_LIST_HEAD(&dir->quarantine); @@ -34,7 +66,8 @@ static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir, dir->dead = false; refcount_set(&dir->untracked, 1); refcount_set(&dir->no_tracker, 1); - strscpy(dir->name, name, sizeof(dir->name)); + dir->class = class; + ref_tracker_dir_debugfs(dir); stack_depot_init(); } @@ -58,7 +91,16 @@ int ref_tracker_free(struct ref_tracker_dir *dir, static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir, unsigned int quarantine_count, - const char *name) + const char *class) +{ +} + +static inline void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir) +{ +} + +static inline __ostream_printf +void ref_tracker_dir_symlink(struct ref_tracker_dir *dir, const char *fmt, ...) { } diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 02b83f5499b8..4e1ac1fbcec4 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -913,7 +913,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, * @config: Configuration for register map * * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. + * a struct regmap. Implies 'fast_io'. */ #define regmap_init_mmio_clk(dev, clk_id, regs, config) \ __regmap_lockdep_wrapper(__regmap_init_mmio_clk, #config, \ @@ -927,7 +927,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, * @config: Configuration for register map * * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. + * a struct regmap. Implies 'fast_io'. */ #define regmap_init_mmio(dev, regs, config) \ regmap_init_mmio_clk(dev, NULL, regs, config) @@ -1138,7 +1138,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); * * The return value will be an ERR_PTR() on error or a valid pointer * to a struct regmap. The regmap will be automatically freed by the - * device management code. + * device management code. Implies 'fast_io'. */ #define devm_regmap_init_mmio_clk(dev, clk_id, regs, config) \ __regmap_lockdep_wrapper(__devm_regmap_init_mmio_clk, #config, \ @@ -1153,7 +1153,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); * * The return value will be an ERR_PTR() on error or a valid pointer * to a struct regmap. The regmap will be automatically freed by the - * device management code. + * device management code. Implies 'fast_io'. */ #define devm_regmap_init_mmio(dev, regs, config) \ devm_regmap_init_mmio_clk(dev, NULL, regs, config) diff --git a/include/linux/regset.h b/include/linux/regset.h index 9061266dd8de..ad1ca6fe04f4 100644 --- a/include/linux/regset.h +++ b/include/linux/regset.h @@ -151,7 +151,8 @@ typedef int user_regset_writeback_fn(struct task_struct *target, * @align: Required alignment, in bytes. * @bias: Bias from natural indexing. * @core_note_type: ELF note @n_type value used in core dumps. - * @get: Function to fetch values. + * @core_note_name: ELF note name to qualify the note type. + * @regset_get: Function to fetch values. * @set: Function to store values. * @active: Function to report if regset is active, or %NULL. * @writeback: Function to write data back to user memory, or %NULL. @@ -190,6 +191,10 @@ typedef int user_regset_writeback_fn(struct task_struct *target, * * If nonzero, @core_note_type gives the n_type field (NT_* value) * of the core file note in which this regset's data appears. + * @core_note_name specifies the note name. The preferred way to + * specify these two fields is to use the @USER_REGSET_NOTE_TYPE() + * macro. + * * NT_PRSTATUS is a special case in that the regset data starts at * offsetof(struct elf_prstatus, pr_reg) into the note data; that is * part of the per-machine ELF formats userland knows about. In @@ -207,8 +212,13 @@ struct user_regset { unsigned int align; unsigned int bias; unsigned int core_note_type; + const char *core_note_name; }; +#define USER_REGSET_NOTE_TYPE(type) \ + .core_note_type = (NT_ ## type), \ + .core_note_name = (NN_ ## type) + /** * struct user_regset_view - available regsets * @name: Identifier, e.g. UTS_MACHINE string. diff --git a/include/linux/regulator/coupler.h b/include/linux/regulator/coupler.h index 73291f280a23..5e314a4294fb 100644 --- a/include/linux/regulator/coupler.h +++ b/include/linux/regulator/coupler.h @@ -8,7 +8,8 @@ #ifndef __LINUX_REGULATOR_COUPLER_H_ #define __LINUX_REGULATOR_COUPLER_H_ -#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/types.h> #include <linux/suspend.h> struct regulator_coupler; diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index cd7f0ae26615..bc90c3c7b5fd 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -152,9 +152,7 @@ ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, unsigned long *lost_events); struct ring_buffer_iter * -ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags); -void ring_buffer_read_prepare_sync(void); -void ring_buffer_read_start(struct ring_buffer_iter *iter); +ring_buffer_read_start(struct trace_buffer *buffer, int cpu, gfp_t flags); void ring_buffer_read_finish(struct ring_buffer_iter *iter); struct ring_buffer_event * diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 7d049883a08a..fa9f1021541e 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -18,7 +18,7 @@ #include <linux/rbtree_types.h> #include <linux/spinlock_types_raw.h> -extern int max_lock_depth; /* for sysctl */ +extern int max_lock_depth; struct rt_mutex_base { raw_spinlock_t wait_lock; diff --git a/include/linux/rv.h b/include/linux/rv.h index 3452b5e4b29e..14410a42faef 100644 --- a/include/linux/rv.h +++ b/include/linux/rv.h @@ -7,9 +7,17 @@ #ifndef _LINUX_RV_H #define _LINUX_RV_H -#define MAX_DA_NAME_LEN 32 +#include <linux/types.h> +#include <linux/list.h> + +#define MAX_DA_NAME_LEN 32 +#define MAX_DA_RETRY_RACING_EVENTS 3 #ifdef CONFIG_RV +#include <linux/bitops.h> +#include <linux/types.h> +#include <linux/array_size.h> + /* * Deterministic automaton per-object variables. */ @@ -18,27 +26,72 @@ struct da_monitor { unsigned int curr_state; }; +#ifdef CONFIG_RV_LTL_MONITOR + /* - * Per-task RV monitors count. Nowadays fixed in RV_PER_TASK_MONITORS. - * If we find justification for more monitors, we can think about - * adding more or developing a dynamic method. So far, none of - * these are justified. + * In the future, if the number of atomic propositions or the size of Buchi + * automaton is larger, we can switch to dynamic allocation. For now, the code + * is simpler this way. */ -#define RV_PER_TASK_MONITORS 1 -#define RV_PER_TASK_MONITOR_INIT (RV_PER_TASK_MONITORS) +#define RV_MAX_LTL_ATOM 32 +#define RV_MAX_BA_STATES 32 -/* - * Futher monitor types are expected, so make this a union. +/** + * struct ltl_monitor - A linear temporal logic runtime verification monitor + * @states: States in the Buchi automaton. As Buchi automaton is a + * non-deterministic state machine, the monitor can be in multiple + * states simultaneously. This is a bitmask of all possible states. + * If this is zero, that means either: + * - The monitor has not started yet (e.g. because not all + * atomic propositions are known). + * - There is no possible state to be in. In other words, a + * violation of the LTL property is detected. + * @atoms: The values of atomic propositions. + * @unknown_atoms: Atomic propositions which are still unknown. */ +struct ltl_monitor { + DECLARE_BITMAP(states, RV_MAX_BA_STATES); + DECLARE_BITMAP(atoms, RV_MAX_LTL_ATOM); + DECLARE_BITMAP(unknown_atoms, RV_MAX_LTL_ATOM); +}; + +static inline bool rv_ltl_valid_state(struct ltl_monitor *mon) +{ + for (int i = 0; i < ARRAY_SIZE(mon->states); ++i) { + if (mon->states[i]) + return true; + } + return false; +} + +static inline bool rv_ltl_all_atoms_known(struct ltl_monitor *mon) +{ + for (int i = 0; i < ARRAY_SIZE(mon->unknown_atoms); ++i) { + if (mon->unknown_atoms[i]) + return false; + } + return true; +} + +#else + +struct ltl_monitor {}; + +#endif /* CONFIG_RV_LTL_MONITOR */ + +#define RV_PER_TASK_MONITOR_INIT (CONFIG_RV_PER_TASK_MONITORS) + union rv_task_monitor { - struct da_monitor da_mon; + struct da_monitor da_mon; + struct ltl_monitor ltl_mon; }; #ifdef CONFIG_RV_REACTORS struct rv_reactor { const char *name; const char *description; - void (*react)(char *msg); + __printf(1, 2) void (*react)(const char *msg, ...); + struct list_head list; }; #endif @@ -50,8 +103,12 @@ struct rv_monitor { void (*disable)(void); void (*reset)(void); #ifdef CONFIG_RV_REACTORS - void (*react)(char *msg); + struct rv_reactor *reactor; + __printf(1, 2) void (*react)(const char *msg, ...); #endif + struct list_head list; + struct rv_monitor *parent; + struct dentry *root_d; }; bool rv_monitoring_on(void); @@ -64,6 +121,11 @@ void rv_put_task_monitor_slot(int slot); bool rv_reacting_on(void); int rv_unregister_reactor(struct rv_reactor *reactor); int rv_register_reactor(struct rv_reactor *reactor); +#else +static inline bool rv_reacting_on(void) +{ + return false; +} #endif /* CONFIG_RV_REACTORS */ #endif /* CONFIG_RV */ diff --git a/include/linux/sched.h b/include/linux/sched.h index aa9c5be7a632..a9693f179c58 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -34,6 +34,7 @@ #include <linux/sched/prio.h> #include <linux/sched/types.h> #include <linux/signal_types.h> +#include <linux/spinlock.h> #include <linux/syscall_user_dispatch_types.h> #include <linux/mm_types_task.h> #include <linux/netdevice_xmit.h> @@ -339,9 +340,11 @@ extern void io_schedule_finish(int token); extern long io_schedule_timeout(long timeout); extern void io_schedule(void); -/* wrapper function to trace from this header file */ +/* wrapper functions to trace from this header file */ DECLARE_TRACEPOINT(sched_set_state_tp); extern void __trace_set_current_state(int state_value); +DECLARE_TRACEPOINT(sched_set_need_resched_tp); +extern void __trace_set_need_resched(struct task_struct *curr, int tif); /** * struct prev_cputime - snapshot of system and user cputime @@ -395,15 +398,10 @@ enum uclamp_id { UCLAMP_CNT }; -#ifdef CONFIG_SMP extern struct root_domain def_root_domain; extern struct mutex sched_domains_mutex; extern void sched_domains_mutex_lock(void); extern void sched_domains_mutex_unlock(void); -#else -static inline void sched_domains_mutex_lock(void) { } -static inline void sched_domains_mutex_unlock(void) { } -#endif struct sched_param { int sched_priority; @@ -584,7 +582,15 @@ struct sched_entity { u64 sum_exec_runtime; u64 prev_sum_exec_runtime; u64 vruntime; - s64 vlag; + union { + /* + * When !@on_rq this field is vlag. + * When cfs_rq->curr == se (which implies @on_rq) + * this field is vprot. See protect_slice(). + */ + s64 vlag; + u64 vprot; + }; u64 slice; u64 nr_migrations; @@ -600,7 +606,6 @@ struct sched_entity { unsigned long runnable_weight; #endif -#ifdef CONFIG_SMP /* * Per entity load average tracking. * @@ -608,7 +613,6 @@ struct sched_entity { * collide with read-mostly values above. */ struct sched_avg avg; -#endif }; struct sched_rt_entity { @@ -701,6 +705,7 @@ struct sched_dl_entity { unsigned int dl_defer : 1; unsigned int dl_defer_armed : 1; unsigned int dl_defer_running : 1; + unsigned int dl_server_idle : 1; /* * Bandwidth enforcement timer. Each -deadline task has its @@ -838,7 +843,6 @@ struct task_struct { struct alloc_tag *alloc_tag; #endif -#ifdef CONFIG_SMP int on_cpu; struct __call_single_node wake_entry; unsigned int wakee_flips; @@ -854,7 +858,6 @@ struct task_struct { */ int recent_used_cpu; int wake_cpu; -#endif int on_rq; int prio; @@ -913,9 +916,7 @@ struct task_struct { cpumask_t *user_cpus_ptr; cpumask_t cpus_mask; void *migration_pending; -#ifdef CONFIG_SMP unsigned short migration_disabled; -#endif unsigned short migration_flags; #ifdef CONFIG_PREEMPT_RCU @@ -947,10 +948,8 @@ struct task_struct { struct sched_info sched_info; struct list_head tasks; -#ifdef CONFIG_SMP struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; -#endif struct mm_struct *mm; struct mm_struct *active_mm; @@ -1234,10 +1233,7 @@ struct task_struct { struct rt_mutex_waiter *pi_blocked_on; #endif -#ifdef CONFIG_DEBUG_MUTEXES - /* Mutex deadlock detection: */ - struct mutex_waiter *blocked_on; -#endif + struct mutex *blocked_on; /* lock we're blocked on */ #ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER /* @@ -1603,8 +1599,10 @@ struct task_struct { /* Used by BPF for per-TASK xdp storage */ struct bpf_net_context *bpf_net_context; -#ifdef CONFIG_GCC_PLUGIN_STACKLEAK +#ifdef CONFIG_KSTACK_ERASE unsigned long lowest_stack; +#endif +#ifdef CONFIG_KSTACK_ERASE_METRICS unsigned long prev_lowest_stack; #endif @@ -1638,12 +1636,10 @@ struct task_struct { #ifdef CONFIG_RV /* - * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS. - * If we find justification for more monitors, we can think - * about adding more or developing a dynamic method. So far, - * none of these are justified. + * Per-task RV monitor, fixed in CONFIG_RV_PER_TASK_MONITORS. + * If memory becomes a concern, we can think about a dynamic method. */ - union rv_task_monitor rv[RV_PER_TASK_MONITORS]; + union rv_task_monitor rv[CONFIG_RV_PER_TASK_MONITORS]; #endif #ifdef CONFIG_USER_EVENTS @@ -1660,6 +1656,19 @@ struct task_struct { randomized_struct_fields_end } __attribute__ ((aligned (64))); +#ifdef CONFIG_SCHED_PROXY_EXEC +DECLARE_STATIC_KEY_TRUE(__sched_proxy_exec); +static inline bool sched_proxy_exec(void) +{ + return static_branch_likely(&__sched_proxy_exec); +} +#else +static inline bool sched_proxy_exec(void) +{ + return false; +} +#endif + #define TASK_REPORT_IDLE (TASK_REPORT + 1) #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1) @@ -1774,12 +1783,8 @@ extern struct pid *cad_pid; static __always_inline bool is_percpu_thread(void) { -#ifdef CONFIG_SMP return (current->flags & PF_NO_SETAFFINITY) && (current->nr_cpus_allowed == 1); -#else - return true; -#endif } /* Per-process atomic flags. */ @@ -1844,7 +1849,6 @@ extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpu extern int task_can_attach(struct task_struct *p); extern int dl_bw_alloc(int cpu, u64 dl_bw); extern void dl_bw_free(int cpu, u64 dl_bw); -#ifdef CONFIG_SMP /* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */ extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); @@ -1862,33 +1866,6 @@ extern void release_user_cpus_ptr(struct task_struct *p); extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask); extern void force_compatible_cpus_allowed_ptr(struct task_struct *p); extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p); -#else -static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) -{ -} -static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) -{ - /* Opencoded cpumask_test_cpu(0, new_mask) to avoid dependency on cpumask.h */ - if ((*cpumask_bits(new_mask) & 1) == 0) - return -EINVAL; - return 0; -} -static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node) -{ - if (src->user_cpus_ptr) - return -EINVAL; - return 0; -} -static inline void release_user_cpus_ptr(struct task_struct *p) -{ - WARN_ON(p->user_cpus_ptr); -} - -static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) -{ - return 0; -} -#endif extern int yield_to(struct task_struct *p, bool preempt); extern void set_user_nice(struct task_struct *p, long nice); @@ -1977,11 +1954,7 @@ extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); extern void wake_up_new_task(struct task_struct *tsk); -#ifdef CONFIG_SMP extern void kick_process(struct task_struct *tsk); -#else -static inline void kick_process(struct task_struct *tsk) { } -#endif extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); #define set_task_comm(tsk, from) ({ \ @@ -2008,7 +1981,6 @@ extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec buf; \ }) -#ifdef CONFIG_SMP static __always_inline void scheduler_ipi(void) { /* @@ -2018,9 +1990,6 @@ static __always_inline void scheduler_ipi(void) */ preempt_fold_need_resched(); } -#else -static inline void scheduler_ipi(void) { } -#endif extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state); @@ -2061,6 +2030,9 @@ static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) static inline void set_tsk_need_resched(struct task_struct *tsk) { + if (tracepoint_enabled(sched_set_need_resched_tp) && + !test_tsk_thread_flag(tsk, TIF_NEED_RESCHED)) + __trace_set_need_resched(tsk, TIF_NEED_RESCHED); set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); } @@ -2163,6 +2135,67 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock); __cond_resched_rwlock_write(lock); \ }) +#ifndef CONFIG_PREEMPT_RT +static inline struct mutex *__get_task_blocked_on(struct task_struct *p) +{ + struct mutex *m = p->blocked_on; + + if (m) + lockdep_assert_held_once(&m->wait_lock); + return m; +} + +static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m) +{ + WARN_ON_ONCE(!m); + /* The task should only be setting itself as blocked */ + WARN_ON_ONCE(p != current); + /* Currently we serialize blocked_on under the mutex::wait_lock */ + lockdep_assert_held_once(&m->wait_lock); + /* + * Check ensure we don't overwrite existing mutex value + * with a different mutex. Note, setting it to the same + * lock repeatedly is ok. + */ + WARN_ON_ONCE(p->blocked_on && p->blocked_on != m); + p->blocked_on = m; +} + +static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m) +{ + guard(raw_spinlock_irqsave)(&m->wait_lock); + __set_task_blocked_on(p, m); +} + +static inline void __clear_task_blocked_on(struct task_struct *p, struct mutex *m) +{ + WARN_ON_ONCE(!m); + /* Currently we serialize blocked_on under the mutex::wait_lock */ + lockdep_assert_held_once(&m->wait_lock); + /* + * There may be cases where we re-clear already cleared + * blocked_on relationships, but make sure we are not + * clearing the relationship with a different lock. + */ + WARN_ON_ONCE(m && p->blocked_on && p->blocked_on != m); + p->blocked_on = NULL; +} + +static inline void clear_task_blocked_on(struct task_struct *p, struct mutex *m) +{ + guard(raw_spinlock_irqsave)(&m->wait_lock); + __clear_task_blocked_on(p, m); +} +#else +static inline void __clear_task_blocked_on(struct task_struct *p, struct rt_mutex *m) +{ +} + +static inline void clear_task_blocked_on(struct task_struct *p, struct rt_mutex *m) +{ +} +#endif /* !CONFIG_PREEMPT_RT */ + static __always_inline bool need_resched(void) { return unlikely(tif_need_resched()); @@ -2202,8 +2235,6 @@ extern bool sched_task_on_rq(struct task_struct *p); extern unsigned long get_wchan(struct task_struct *p); extern struct task_struct *cpu_curr_snapshot(int cpu); -#include <linux/spinlock.h> - /* * In order to reduce various lock holder preemption latencies provide an * interface to see if a vCPU is currently running or not. @@ -2226,7 +2257,6 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask); #define TASK_SIZE_OF(tsk) TASK_SIZE #endif -#ifdef CONFIG_SMP static inline bool owner_on_cpu(struct task_struct *owner) { /* @@ -2238,7 +2268,6 @@ static inline bool owner_on_cpu(struct task_struct *owner) /* Returns effective CPU energy utilization, as seen by the scheduler */ unsigned long sched_cpu_util(int cpu); -#endif /* CONFIG_SMP */ #ifdef CONFIG_SCHED_CORE extern void sched_core_free(struct task_struct *tsk); diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index f9aabbc9d22e..c40115d4e34d 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -29,15 +29,11 @@ static inline bool dl_time_before(u64 a, u64 b) return (s64)(a - b) < 0; } -#ifdef CONFIG_SMP - struct root_domain; extern void dl_add_task_root_domain(struct task_struct *p); extern void dl_clear_root_domain(struct root_domain *rd); extern void dl_clear_root_domain_cpu(int cpu); -#endif /* CONFIG_SMP */ - extern u64 dl_cookie; extern bool dl_bw_visited(int cpu, u64 cookie); diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h index 439f6029d3b9..8465ff1f20d1 100644 --- a/include/linux/sched/idle.h +++ b/include/linux/sched/idle.h @@ -11,11 +11,7 @@ enum cpu_idle_type { CPU_MAX_IDLE_TYPES }; -#ifdef CONFIG_SMP extern void wake_up_if_idle(int cpu); -#else -static inline void wake_up_if_idle(int cpu) { } -#endif /* * Idle thread specific functions to determine the need_resched diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index b13474825130..2201da0afecc 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -140,7 +140,7 @@ static inline bool mmget_not_zero(struct mm_struct *mm) /* mmput gets rid of the mappings and all user-space */ extern void mmput(struct mm_struct *); -#ifdef CONFIG_MMU +#if defined(CONFIG_MMU) || defined(CONFIG_FUTEX_PRIVATE_HASH) /* same as above but performs the slow path from the async context. Can * be called from the atomic context as well */ diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h index 6d67e9a5af6b..0db7f67935fe 100644 --- a/include/linux/sched/nohz.h +++ b/include/linux/sched/nohz.h @@ -6,7 +6,7 @@ * This is the interface between the scheduler and nohz/dynticks: */ -#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) +#ifdef CONFIG_NO_HZ_COMMON extern void nohz_balance_enter_idle(int cpu); extern int get_nohz_timer_target(void); #else @@ -23,7 +23,7 @@ static inline void calc_load_nohz_remote(struct rq *rq) { } static inline void calc_load_nohz_stop(void) { } #endif /* CONFIG_NO_HZ_COMMON */ -#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) +#ifdef CONFIG_NO_HZ_COMMON extern void wake_up_nohz_cpu(int cpu); #else static inline void wake_up_nohz_cpu(int cpu) { } diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h index b04a5d04dee9..42839cfa2778 100644 --- a/include/linux/sched/sd_flags.h +++ b/include/linux/sched/sd_flags.h @@ -154,14 +154,6 @@ SD_FLAG(SD_ASYM_PACKING, SDF_NEEDS_GROUPS) SD_FLAG(SD_PREFER_SIBLING, SDF_NEEDS_GROUPS) /* - * sched_groups of this level overlap - * - * SHARED_PARENT: Set for all NUMA levels above NODE. - * NEEDS_GROUPS: Overlaps can only exist with more than one group. - */ -SD_FLAG(SD_OVERLAP, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS) - -/* * Cross-node balancing * * SHARED_PARENT: Set for all NUMA levels above NODE. diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index ca1db4b92c32..ea41795a352b 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -109,11 +109,7 @@ int kernel_wait(pid_t pid, int *stat); extern void free_task(struct task_struct *tsk); /* sched_exec is called by processes performing an exec */ -#ifdef CONFIG_SMP extern void sched_exec(void); -#else -#define sched_exec() {} -#endif static inline struct task_struct *get_task_struct(struct task_struct *t) { @@ -135,24 +131,17 @@ static inline void put_task_struct(struct task_struct *t) return; /* - * In !RT, it is always safe to call __put_task_struct(). - * Under RT, we can only call it in preemptible context. - */ - if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) { - static DEFINE_WAIT_OVERRIDE_MAP(put_task_map, LD_WAIT_SLEEP); - - lock_map_acquire_try(&put_task_map); - __put_task_struct(t); - lock_map_release(&put_task_map); - return; - } - - /* - * under PREEMPT_RT, we can't call put_task_struct + * Under PREEMPT_RT, we can't call __put_task_struct * in atomic context because it will indirectly - * acquire sleeping locks. + * acquire sleeping locks. The same is true if the + * current process has a mutex enqueued (blocked on + * a PI chain). + * + * In !RT, it is always safe to call __put_task_struct(). + * Though, in order to simplify the code, resort to the + * deferred call too. * - * call_rcu() will schedule delayed_put_task_struct_rcu() + * call_rcu() will schedule __put_task_struct_rcu_cb() * to be called in process context. * * __put_task_struct() is called when @@ -165,7 +154,7 @@ static inline void put_task_struct(struct task_struct *t) * * delayed_free_task() also uses ->rcu, but it is only called * when it fails to fork a process. Therefore, there is no - * way it can conflict with put_task_struct(). + * way it can conflict with __put_task_struct(). */ call_rcu(&t->rcu, __put_task_struct_rcu_cb); } diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h index 85c5a6392e02..1fab7e9043a3 100644 --- a/include/linux/sched/task_stack.h +++ b/include/linux/sched/task_stack.h @@ -53,7 +53,7 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct * When the stack grows up, this is the highest address. * Beyond that position, we corrupt data on the next page. */ -static inline unsigned long *end_of_stack(struct task_struct *p) +static inline unsigned long *end_of_stack(const struct task_struct *p) { #ifdef CONFIG_STACK_GROWSUP return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 198bb5cc1774..5263746b63e8 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -9,7 +9,6 @@ /* * sched-domains (multiprocessor balancing) declarations: */ -#ifdef CONFIG_SMP /* Generate SD flag indexes */ #define SD_FLAG(name, mflags) __##name, @@ -176,8 +175,6 @@ bool cpus_share_resources(int this_cpu, int that_cpu); typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); typedef int (*sched_domain_flags_f)(void); -#define SDTL_OVERLAP 0x01 - struct sd_data { struct sched_domain *__percpu *sd; struct sched_domain_shared *__percpu *sds; @@ -188,7 +185,6 @@ struct sd_data { struct sched_domain_topology_level { sched_domain_mask_f mask; sched_domain_flags_f sd_flags; - int flags; int numa_level; struct sd_data data; char *name; @@ -197,39 +193,8 @@ struct sched_domain_topology_level { extern void __init set_sched_topology(struct sched_domain_topology_level *tl); extern void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio); - -# define SD_INIT_NAME(type) .name = #type - -#else /* CONFIG_SMP */ - -struct sched_domain_attr; - -static inline void -partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], - struct sched_domain_attr *dattr_new) -{ -} - -static inline bool cpus_equal_capacity(int this_cpu, int that_cpu) -{ - return true; -} - -static inline bool cpus_share_cache(int this_cpu, int that_cpu) -{ - return true; -} - -static inline bool cpus_share_resources(int this_cpu, int that_cpu) -{ - return true; -} - -static inline void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio) -{ -} - -#endif /* !CONFIG_SMP */ +#define SDTL_INIT(maskfn, flagsfn, dname) ((struct sched_domain_topology_level) \ + { .mask = maskfn, .sd_flags = flagsfn, .name = #dname }) #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) extern void rebuild_sched_domains_energy(void); diff --git a/include/linux/security.h b/include/linux/security.h index dba349629229..521bcb5b9717 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -193,8 +193,6 @@ int cap_inode_getsecurity(struct mnt_idmap *idmap, struct inode *inode, const char *name, void **buffer, bool alloc); extern int cap_mmap_addr(unsigned long addr); -extern int cap_mmap_file(struct file *file, unsigned long reqprot, - unsigned long prot, unsigned long flags); extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5); @@ -451,6 +449,10 @@ int security_inode_listxattr(struct dentry *dentry); int security_inode_removexattr(struct mnt_idmap *idmap, struct dentry *dentry, const char *name); void security_inode_post_removexattr(struct dentry *dentry, const char *name); +int security_inode_file_setattr(struct dentry *dentry, + struct file_kattr *fa); +int security_inode_file_getattr(struct dentry *dentry, + struct file_kattr *fa); int security_inode_need_killpriv(struct dentry *dentry); int security_inode_killpriv(struct mnt_idmap *idmap, struct dentry *dentry); int security_inode_getsecurity(struct mnt_idmap *idmap, @@ -1052,6 +1054,18 @@ static inline void security_inode_post_removexattr(struct dentry *dentry, const char *name) { } +static inline int security_inode_file_setattr(struct dentry *dentry, + struct file_kattr *fa) +{ + return 0; +} + +static inline int security_inode_file_getattr(struct dentry *dentry, + struct file_kattr *fa) +{ + return 0; +} + static inline int security_inode_need_killpriv(struct dentry *dentry) { return cap_inode_need_killpriv(dentry); @@ -2211,7 +2225,6 @@ struct dentry *securityfs_create_symlink(const char *name, const char *target, const struct inode_operations *iops); extern void securityfs_remove(struct dentry *dentry); -extern void securityfs_recursive_remove(struct dentry *dentry); #else /* CONFIG_SECURITYFS */ @@ -2243,6 +2256,8 @@ static inline void securityfs_remove(struct dentry *dentry) #endif +#define securityfs_recursive_remove securityfs_remove + #ifdef CONFIG_BPF_SYSCALL union bpf_attr; struct bpf_map; diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 144de7a7948d..01efdce0fda0 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -46,8 +46,8 @@ struct plat_serial8250_port { unsigned int type; /* If UPF_FIXED_TYPE */ upf_t flags; /* UPF_* flags */ u16 bugs; /* port bugs */ - unsigned int (*serial_in)(struct uart_port *, int); - void (*serial_out)(struct uart_port *, int, int); + u32 (*serial_in)(struct uart_port *, unsigned int offset); + void (*serial_out)(struct uart_port *, unsigned int offset, u32 val); u32 (*dl_read)(struct uart_8250_port *up); void (*dl_write)(struct uart_8250_port *up, u32 value); void (*set_termios)(struct uart_port *, diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 914b5e97e056..84b4648ead7e 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -443,8 +443,8 @@ struct uart_port { spinlock_t lock; /* port lock */ unsigned long iobase; /* in/out[bwl] */ unsigned char __iomem *membase; /* read/write[bwl] */ - unsigned int (*serial_in)(struct uart_port *, int); - void (*serial_out)(struct uart_port *, int, int); + u32 (*serial_in)(struct uart_port *, unsigned int offset); + void (*serial_out)(struct uart_port *, unsigned int offset, u32 val); void (*set_termios)(struct uart_port *, struct ktermios *new, const struct ktermios *old); @@ -1101,8 +1101,6 @@ static inline bool uart_console_registered(struct uart_port *port) return uart_console(port) && console_is_registered(port->cons); } -struct uart_port *uart_get_console(struct uart_port *ports, int nr, - struct console *c); int uart_parse_earlycon(char *p, enum uart_iotype *iotype, resource_size_t *addr, char **options); void uart_parse_options(const char *options, int *baud, int *parity, int *bits, diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5520524c93bf..b8b06e71b73e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3665,7 +3665,13 @@ static inline void *skb_frag_address(const skb_frag_t *frag) */ static inline void *skb_frag_address_safe(const skb_frag_t *frag) { - void *ptr = page_address(skb_frag_page(frag)); + struct page *page = skb_frag_page(frag); + void *ptr; + + if (!page) + return NULL; + + ptr = page_address(page); if (unlikely(!ptr)) return NULL; @@ -3873,20 +3879,26 @@ static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int l bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i) __must_check; -static inline bool skb_can_coalesce(struct sk_buff *skb, int i, - const struct page *page, int off) +static inline bool skb_can_coalesce_netmem(struct sk_buff *skb, int i, + netmem_ref netmem, int off) { if (skb_zcopy(skb)) return false; if (i) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; - return page == skb_frag_page(frag) && + return netmem == skb_frag_netmem(frag) && off == skb_frag_off(frag) + skb_frag_size(frag); } return false; } +static inline bool skb_can_coalesce(struct sk_buff *skb, int i, + const struct page *page, int off) +{ + return skb_can_coalesce_netmem(skb, i, page_to_netmem(page), off); +} + static inline int __skb_linearize(struct sk_buff *skb) { return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; @@ -5253,7 +5265,7 @@ static inline void skb_mark_for_recycle(struct sk_buff *skb) } ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, - ssize_t maxsize, gfp_t gfp); + ssize_t maxsize); #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/include/linux/smp.h b/include/linux/smp.h index f1aa0952e8c3..18e9c918325e 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -221,7 +221,7 @@ static inline void wake_up_all_idle_cpus(void) { } #ifdef CONFIG_UP_LATE_INIT extern void __init up_late_init(void); -static inline void smp_init(void) { up_late_init(); } +static __always_inline void smp_init(void) { up_late_init(); } #else static inline void smp_init(void) { } #endif @@ -234,7 +234,7 @@ static inline int get_boot_cpu_id(void) #endif /* !SMP */ /** - * raw_processor_id() - get the current (unstable) CPU id + * raw_smp_processor_id() - get the current (unstable) CPU id * * For then you know what you are doing and need an unstable * CPU id. diff --git a/include/linux/soc/marvell/silicons.h b/include/linux/soc/marvell/silicons.h new file mode 100644 index 000000000000..66bb9bfaf17d --- /dev/null +++ b/include/linux/soc/marvell/silicons.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright (C) 2024 Marvell. + */ + +#ifndef __SOC_SILICON_H +#define __SOC_SILICON_H + +#include <linux/types.h> +#include <linux/pci.h> + +#if defined(CONFIG_ARM64) + +#define CN20K_CHIPID 0x20 +/* + * Silicon check for CN20K family + */ +static inline bool is_cn20k(struct pci_dev *pdev) +{ + return (pdev->subsystem_device & 0xFF) == CN20K_CHIPID; +} +#else +#define is_cn20k(pdev) ((void)(pdev), 0) +#endif + +#endif /* __SOC_SILICON_H */ diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h index 469e02d2aa0d..291cdc7ef49c 100644 --- a/include/linux/soc/qcom/qmi.h +++ b/include/linux/soc/qcom/qmi.h @@ -24,9 +24,9 @@ struct socket; */ struct qmi_header { u8 type; - u16 txn_id; - u16 msg_id; - u16 msg_len; + __le16 txn_id; + __le16 msg_id; + __le16 msg_len; } __packed; #define QMI_REQUEST 0 diff --git a/include/linux/soc/qcom/ubwc.h b/include/linux/soc/qcom/ubwc.h new file mode 100644 index 000000000000..1ed8b1b16bc9 --- /dev/null +++ b/include/linux/soc/qcom/ubwc.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018, The Linux Foundation + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + +#ifndef __QCOM_UBWC_H__ +#define __QCOM_UBWC_H__ + +#include <linux/bits.h> +#include <linux/types.h> + +struct qcom_ubwc_cfg_data { + u32 ubwc_enc_version; + /* Can be read from MDSS_BASE + 0x58 */ + u32 ubwc_dec_version; + + /** + * @ubwc_swizzle: Whether to enable level 1, 2 & 3 bank swizzling. + * + * UBWC 1.0 always enables all three levels. + * UBWC 2.0 removes level 1 bank swizzling, leaving levels 2 & 3. + * UBWC 4.0 adds the optional ability to disable levels 2 & 3. + */ + u32 ubwc_swizzle; +#define UBWC_SWIZZLE_ENABLE_LVL1 BIT(0) +#define UBWC_SWIZZLE_ENABLE_LVL2 BIT(1) +#define UBWC_SWIZZLE_ENABLE_LVL3 BIT(2) + + /** + * @highest_bank_bit: Highest Bank Bit + * + * The Highest Bank Bit value represents the bit of the highest + * DDR bank. This should ideally use DRAM type detection. + */ + int highest_bank_bit; + bool ubwc_bank_spread; + + /** + * @macrotile_mode: Macrotile Mode + * + * Whether to use 4-channel macrotiling mode or the newer + * 8-channel macrotiling mode introduced in UBWC 3.1. 0 is + * 4-channel and 1 is 8-channel. + */ + bool macrotile_mode; +}; + +#define UBWC_1_0 0x10000000 +#define UBWC_2_0 0x20000000 +#define UBWC_3_0 0x30000000 +#define UBWC_4_0 0x40000000 +#define UBWC_4_3 0x40030000 +#define UBWC_5_0 0x50000000 + +#if IS_ENABLED(CONFIG_QCOM_UBWC_CONFIG) +const struct qcom_ubwc_cfg_data *qcom_ubwc_config_get_data(void); +#else +static inline const struct qcom_ubwc_cfg_data *qcom_ubwc_config_get_data(void) +{ + return ERR_PTR(-EOPNOTSUPP); +} +#endif + +static inline bool qcom_ubwc_get_ubwc_mode(const struct qcom_ubwc_cfg_data *cfg) +{ + bool ret = cfg->ubwc_enc_version == UBWC_1_0; + + if (ret && !(cfg->ubwc_swizzle & UBWC_SWIZZLE_ENABLE_LVL1)) + pr_err("UBWC config discrepancy - level 1 swizzling disabled on UBWC 1.0\n"); + + return ret; +} + +#endif /* __QCOM_UBWC_H__ */ diff --git a/include/linux/sprintf.h b/include/linux/sprintf.h index 51cab2def9ec..876130091384 100644 --- a/include/linux/sprintf.h +++ b/include/linux/sprintf.h @@ -4,6 +4,7 @@ #include <linux/compiler_attributes.h> #include <linux/types.h> +#include <linux/stdarg.h> int num_to_str(char *buf, int size, unsigned long long num, unsigned int width); diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 900b0d5c05f5..f179700fecaf 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -46,11 +46,11 @@ int init_srcu_struct(struct srcu_struct *ssp); /* Values for SRCU Tree srcu_data ->srcu_reader_flavor, but also used by rcutorture. */ #define SRCU_READ_FLAVOR_NORMAL 0x1 // srcu_read_lock(). #define SRCU_READ_FLAVOR_NMI 0x2 // srcu_read_lock_nmisafe(). -#define SRCU_READ_FLAVOR_LITE 0x4 // srcu_read_lock_lite(). +// 0x4 // SRCU-lite is no longer with us. #define SRCU_READ_FLAVOR_FAST 0x8 // srcu_read_lock_fast(). #define SRCU_READ_FLAVOR_ALL (SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_NMI | \ - SRCU_READ_FLAVOR_LITE | SRCU_READ_FLAVOR_FAST) // All of the above. -#define SRCU_READ_FLAVOR_SLOWGP (SRCU_READ_FLAVOR_LITE | SRCU_READ_FLAVOR_FAST) + SRCU_READ_FLAVOR_FAST) // All of the above. +#define SRCU_READ_FLAVOR_SLOWGP SRCU_READ_FLAVOR_FAST // Flavors requiring synchronize_rcu() // instead of smp_mb(). void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); @@ -300,33 +300,6 @@ static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct * } /** - * srcu_read_lock_lite - register a new reader for an SRCU-protected structure. - * @ssp: srcu_struct in which to register the new reader. - * - * Enter an SRCU read-side critical section, but for a light-weight - * smp_mb()-free reader. See srcu_read_lock() for more information. - * - * If srcu_read_lock_lite() is ever used on an srcu_struct structure, - * then none of the other flavors may be used, whether before, during, - * or after. Note that grace-period auto-expediting is disabled for _lite - * srcu_struct structures because auto-expedited grace periods invoke - * synchronize_rcu_expedited(), IPIs and all. - * - * Note that srcu_read_lock_lite() can be invoked only from those contexts - * where RCU is watching, that is, from contexts where it would be legal - * to invoke rcu_read_lock(). Otherwise, lockdep will complain. - */ -static inline int srcu_read_lock_lite(struct srcu_struct *ssp) __acquires(ssp) -{ - int retval; - - srcu_check_read_flavor_force(ssp, SRCU_READ_FLAVOR_LITE); - retval = __srcu_read_lock_lite(ssp); - rcu_try_lock_acquire(&ssp->dep_map); - return retval; -} - -/** * srcu_read_lock_nmisafe - register a new reader for an SRCU-protected structure. * @ssp: srcu_struct in which to register the new reader. * @@ -435,22 +408,6 @@ static inline void srcu_up_read_fast(struct srcu_struct *ssp, struct srcu_ctr __ } /** - * srcu_read_unlock_lite - unregister a old reader from an SRCU-protected structure. - * @ssp: srcu_struct in which to unregister the old reader. - * @idx: return value from corresponding srcu_read_lock_lite(). - * - * Exit a light-weight SRCU read-side critical section. - */ -static inline void srcu_read_unlock_lite(struct srcu_struct *ssp, int idx) - __releases(ssp) -{ - WARN_ON_ONCE(idx & ~0x1); - srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_LITE); - srcu_lock_release(&ssp->dep_map); - __srcu_read_unlock_lite(ssp, idx); -} - -/** * srcu_read_unlock_nmisafe - unregister a old reader from an SRCU-protected structure. * @ssp: srcu_struct in which to unregister the old reader. * @idx: return value from corresponding srcu_read_lock_nmisafe(). @@ -524,4 +481,9 @@ DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct, srcu_read_unlock(_T->lock, _T->idx), int idx) +DEFINE_LOCK_GUARD_1(srcu_fast, struct srcu_struct, + _T->scp = srcu_read_lock_fast(_T->lock), + srcu_read_unlock_fast(_T->lock, _T->scp), + struct srcu_ctr __percpu *scp) + #endif diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index 380260317d98..51ce25f07930 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -93,9 +93,6 @@ static inline void __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ __srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp)); } -#define __srcu_read_lock_lite __srcu_read_lock -#define __srcu_read_unlock_lite __srcu_read_unlock - static inline void synchronize_srcu_expedited(struct srcu_struct *ssp) { synchronize_srcu(ssp); diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 8bed7e6cc4c1..bf44d8d1e69e 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -278,44 +278,6 @@ static inline void __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_unlock_fast()."); } -/* - * Counts the new reader in the appropriate per-CPU element of the - * srcu_struct. Returns an index that must be passed to the matching - * srcu_read_unlock_lite(). - * - * Note that this_cpu_inc() is an RCU read-side critical section either - * because it disables interrupts, because it is a single instruction, - * or because it is a read-modify-write atomic operation, depending on - * the whims of the architecture. - */ -static inline int __srcu_read_lock_lite(struct srcu_struct *ssp) -{ - struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp); - - RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_lock_lite()."); - this_cpu_inc(scp->srcu_locks.counter); /* Y */ - barrier(); /* Avoid leaking the critical section. */ - return __srcu_ptr_to_ctr(ssp, scp); -} - -/* - * Removes the count for the old reader from the appropriate - * per-CPU element of the srcu_struct. Note that this may well be a - * different CPU than that which was incremented by the corresponding - * srcu_read_lock_lite(), but it must be within the same task. - * - * Note that this_cpu_inc() is an RCU read-side critical section either - * because it disables interrupts, because it is a single instruction, - * or because it is a read-modify-write atomic operation, depending on - * the whims of the architecture. - */ -static inline void __srcu_read_unlock_lite(struct srcu_struct *ssp, int idx) -{ - barrier(); /* Avoid leaking the critical section. */ - this_cpu_inc(__srcu_ctr_to_ptr(ssp, idx)->srcu_unlocks.counter); /* Z */ - RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_unlock_lite()."); -} - void __srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor); // Record reader usage even for CONFIG_PROVE_RCU=n kernels. This is diff --git a/include/linux/stddef.h b/include/linux/stddef.h index 929d67710cc5..dab49e2ec8c0 100644 --- a/include/linux/stddef.h +++ b/include/linux/stddef.h @@ -93,4 +93,24 @@ enum { #define DECLARE_FLEX_ARRAY(TYPE, NAME) \ __DECLARE_FLEX_ARRAY(TYPE, NAME) +/** + * TRAILING_OVERLAP() - Overlap a flexible-array member with trailing members. + * + * Creates a union between a flexible-array member (FAM) in a struct and a set + * of additional members that would otherwise follow it. + * + * @TYPE: Flexible structure type name, including "struct" keyword. + * @NAME: Name for a variable to define. + * @FAM: The flexible-array member within @TYPE + * @MEMBERS: Trailing overlapping members. + */ +#define TRAILING_OVERLAP(TYPE, NAME, FAM, MEMBERS) \ + union { \ + TYPE NAME; \ + struct { \ + unsigned char __offset_to_##FAM[offsetof(TYPE, FAM)]; \ + MEMBERS \ + }; \ + } + #endif diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 3132262a404d..72820503514c 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -88,55 +88,73 @@ static inline void print_stop_info(const char *log_lvl, struct task_struct *task #endif /* CONFIG_SMP */ /* - * stop_machine "Bogolock": stop the entire machine, disable - * interrupts. This is a very heavy lock, which is equivalent to - * grabbing every spinlock (and more). So the "read" side to such a - * lock is anything which disables preemption. + * stop_machine "Bogolock": stop the entire machine, disable interrupts. + * This is a very heavy lock, which is equivalent to grabbing every raw + * spinlock (and more). So the "read" side to such a lock is anything + * which disables preemption. */ #if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) /** * stop_machine: freeze the machine on all CPUs and run this function * @fn: the function to run - * @data: the data ptr for the @fn() - * @cpus: the cpus to run the @fn() on (NULL = any online cpu) + * @data: the data ptr to pass to @fn() + * @cpus: the cpus to run @fn() on (NULL = run on each online CPU) * - * Description: This causes a thread to be scheduled on every cpu, - * each of which disables interrupts. The result is that no one is - * holding a spinlock or inside any other preempt-disabled region when - * @fn() runs. + * Description: This causes a thread to be scheduled on every CPU, which + * will run with interrupts disabled. Each CPU specified by @cpus will + * run @fn. While @fn is executing, there will no other CPUs holding + * a raw spinlock or running within any other type of preempt-disabled + * region of code. * - * This can be thought of as a very heavy write lock, equivalent to - * grabbing every spinlock in the kernel. + * When @cpus specifies only a single CPU, this can be thought of as + * a reader-writer lock where readers disable preemption (for example, + * by holding a raw spinlock) and where the insanely heavy writers run + * @fn while also preventing any other CPU from doing any useful work. + * These writers can also be thought of as having implicitly grabbed every + * raw spinlock in the kernel. * - * Protects against CPU hotplug. + * When @fn is a no-op, this can be thought of as an RCU implementation + * where readers again disable preemption and writers use stop_machine() + * in place of synchronize_rcu(), albeit with orders of magnitude more + * disruption than even that of synchronize_rcu_expedited(). + * + * Although only one stop_machine() operation can proceed at a time, + * the possibility of blocking in cpus_read_lock() means that the caller + * cannot usefully rely on this serialization. + * + * Return: 0 if all invocations of @fn return zero. Otherwise, the + * value returned by an arbitrarily chosen member of the set of calls to + * @fn that returned non-zero. */ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); /** * stop_machine_cpuslocked: freeze the machine on all CPUs and run this function * @fn: the function to run - * @data: the data ptr for the @fn() - * @cpus: the cpus to run the @fn() on (NULL = any online cpu) + * @data: the data ptr to pass to @fn() + * @cpus: the cpus to run @fn() on (NULL = run on each online CPU) + * + * Same as above. Avoids nested calls to cpus_read_lock(). * - * Same as above. Must be called from with in a cpus_read_lock() protected - * region. Avoids nested calls to cpus_read_lock(). + * Context: Must be called from within a cpus_read_lock() protected region. */ int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); /** * stop_core_cpuslocked: - stop all threads on just one core * @cpu: any cpu in the targeted core - * @fn: the function to run - * @data: the data ptr for @fn() + * @fn: the function to run on each CPU in the core containing @cpu + * @data: the data ptr to pass to @fn() * - * Same as above, but instead of every CPU, only the logical CPUs of a - * single core are affected. + * Same as above, but instead of every CPU, only the logical CPUs of the + * single core containing @cpu are affected. * * Context: Must be called from within a cpus_read_lock() protected region. * - * Return: 0 if all executions of @fn returned 0, any non zero return - * value if any returned non zero. + * Return: 0 if all invocations of @fn return zero. Otherwise, the + * value returned by an arbitrarily chosen member of the set of calls to + * @fn that returned non-zero. */ int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data); diff --git a/include/linux/string.h b/include/linux/string.h index 01621ad0f598..fdd3442c6bcb 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -345,16 +345,6 @@ extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, int ptr_to_hashval(const void *ptr, unsigned long *hashval_out); -/** - * strstarts - does @str start with @prefix? - * @str: string to examine - * @prefix: prefix to look for. - */ -static inline bool strstarts(const char *str, const char *prefix) -{ - return strncmp(str, prefix, strlen(prefix)) == 0; -} - size_t memweight(const void *ptr, size_t bytes); /** @@ -562,4 +552,14 @@ static __always_inline size_t str_has_prefix(const char *str, const char *prefix return strncmp(str, prefix, len) == 0 ? len : 0; } +/** + * strstarts - does @str start with @prefix? + * @str: string to examine + * @prefix: prefix to look for. + */ +static inline bool strstarts(const char *str, const char *prefix) +{ + return strncmp(str, prefix, strlen(prefix)) == 0; +} + #endif /* _LINUX_STRING_H_ */ diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h index c4b0eb2b2f04..ada17b57ca44 100644 --- a/include/linux/sunrpc/msg_prot.h +++ b/include/linux/sunrpc/msg_prot.h @@ -69,15 +69,17 @@ enum rpc_reject_stat { }; enum rpc_auth_stat { - RPC_AUTH_OK = 0, - RPC_AUTH_BADCRED = 1, - RPC_AUTH_REJECTEDCRED = 2, - RPC_AUTH_BADVERF = 3, - RPC_AUTH_REJECTEDVERF = 4, - RPC_AUTH_TOOWEAK = 5, + RPC_AUTH_OK = 0, /* success */ + RPC_AUTH_BADCRED = 1, /* bad credential (seal broken) */ + RPC_AUTH_REJECTEDCRED = 2, /* client must begin new session */ + RPC_AUTH_BADVERF = 3, /* bad verifier (seal broken) */ + RPC_AUTH_REJECTEDVERF = 4, /* verifier expired or replayed */ + RPC_AUTH_TOOWEAK = 5, /* rejected for security reasons */ + RPC_AUTH_INVALIDRESP = 6, /* bogus response verifier */ + RPC_AUTH_FAILED = 7, /* reason unknown */ /* RPCSEC_GSS errors */ - RPCSEC_GSS_CREDPROBLEM = 13, - RPCSEC_GSS_CTXPROBLEM = 14 + RPCSEC_GSS_CREDPROBLEM = 13, /* no credentials for user */ + RPCSEC_GSS_CTXPROBLEM = 14 /* problem with context */ }; #define RPC_MAXNETNAMELEN 256 diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index 3b35b6f6533a..2cb406f8ff4e 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -98,7 +98,7 @@ static inline bool rpc_msg_is_inflight(const struct rpc_pipe_msg *msg) { } struct rpc_clnt; -extern struct dentry *rpc_create_client_dir(struct dentry *, const char *, struct rpc_clnt *); +extern int rpc_create_client_dir(struct dentry *, const char *, struct rpc_clnt *); extern int rpc_remove_client_dir(struct rpc_clnt *); extern void rpc_init_pipe_dir_head(struct rpc_pipe_dir_head *pdh); @@ -127,9 +127,9 @@ extern void rpc_remove_cache_dir(struct dentry *); struct rpc_pipe *rpc_mkpipe_data(const struct rpc_pipe_ops *ops, int flags); void rpc_destroy_pipe_data(struct rpc_pipe *pipe); -extern struct dentry *rpc_mkpipe_dentry(struct dentry *, const char *, void *, +extern int rpc_mkpipe_dentry(struct dentry *, const char *, void *, struct rpc_pipe *); -extern int rpc_unlink(struct dentry *); +extern void rpc_unlink(struct rpc_pipe *); extern int register_rpc_pipefs(void); extern void unregister_rpc_pipefs(void); diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 48666b83fe68..40cbe81360ed 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -245,10 +245,10 @@ struct svc_rqst { * initialisation success. */ - unsigned long bc_to_initval; - unsigned int bc_to_retries; - void ** rq_lease_breaker; /* The v4 client breaking a lease */ + unsigned long bc_to_initval; + unsigned int bc_to_retries; unsigned int rq_status_counter; /* RPC processing counter */ + void **rq_lease_breaker; /* The v4 client breaking a lease */ }; /* bits for rq_flags */ diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h index 2e111153f7cd..4b92fec23a49 100644 --- a/include/linux/sunrpc/svcauth.h +++ b/include/linux/sunrpc/svcauth.h @@ -86,7 +86,6 @@ struct auth_domain { enum svc_auth_status { SVC_GARBAGE = 1, - SVC_SYSERR, SVC_VALID, SVC_NEGATIVE, SVC_OK, diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index a2ab813a9800..e3358c630ba1 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -119,6 +119,8 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) #define rpc_autherr_badverf cpu_to_be32(RPC_AUTH_BADVERF) #define rpc_autherr_rejectedverf cpu_to_be32(RPC_AUTH_REJECTEDVERF) #define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK) +#define rpc_autherr_invalidresp cpu_to_be32(RPC_AUTH_INVALIDRESP) +#define rpc_autherr_failed cpu_to_be32(RPC_AUTH_FAILED) #define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM) #define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM) @@ -242,8 +244,7 @@ typedef int (*kxdrdproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr, extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p, struct rpc_rqst *rqst); -extern void xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, - struct page **pages, struct rpc_rqst *rqst); +void xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf); extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes); extern int xdr_reserve_space_vec(struct xdr_stream *xdr, size_t nbytes); extern void __xdr_commit_encode(struct xdr_stream *xdr); diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 6a3f92098872..317ae31e89b3 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -478,6 +478,7 @@ extern unsigned int lock_system_sleep(void); extern void unlock_system_sleep(unsigned int); extern bool pm_sleep_transition_in_progress(void); +bool pm_hibernate_is_recovering(void); #else /* !CONFIG_PM_SLEEP */ @@ -511,6 +512,7 @@ static inline unsigned int lock_system_sleep(void) { return 0; } static inline void unlock_system_sleep(unsigned int flags) {} static inline bool pm_sleep_transition_in_progress(void) { return false; } +static inline bool pm_hibernate_is_recovering(void) { return false; } #endif /* !CONFIG_PM_SLEEP */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index e5603cc91963..77f45e5d4413 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -78,6 +78,7 @@ struct cachestat; struct statmount; struct mnt_id_req; struct xattr_args; +struct file_attr; #include <linux/types.h> #include <linux/aio_abi.h> @@ -371,6 +372,12 @@ asmlinkage long sys_removexattrat(int dfd, const char __user *path, asmlinkage long sys_lremovexattr(const char __user *path, const char __user *name); asmlinkage long sys_fremovexattr(int fd, const char __user *name); +asmlinkage long sys_file_getattr(int dfd, const char __user *filename, + struct file_attr __user *attr, size_t usize, + unsigned int at_flags); +asmlinkage long sys_file_setattr(int dfd, const char __user *filename, + struct file_attr __user *attr, size_t usize, + unsigned int at_flags); asmlinkage long sys_getcwd(char __user *buf, unsigned long size); asmlinkage long sys_eventfd2(unsigned int count, int flags); asmlinkage long sys_epoll_create1(int flags); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 40a6ac6c9713..92e9146b1104 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -242,9 +242,7 @@ int do_proc_douintvec(const struct ctl_table *table, int write, int write, void *data), void *data); -extern int pwrsw_enabled; extern int unaligned_enabled; -extern int unaligned_dump_stack; extern int no_unaligned_warning; #else /* CONFIG_SYSCTL */ @@ -285,7 +283,4 @@ static inline bool sysctl_is_alias(char *param) } #endif /* CONFIG_SYSCTL */ -int sysctl_max_threads(const struct ctl_table *table, int write, void *buffer, - size_t *lenp, loff_t *ppos); - #endif /* _LINUX_SYSCTL_H */ diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h index 07cbab516942..b449665c686a 100644 --- a/include/linux/sysfb.h +++ b/include/linux/sysfb.h @@ -7,9 +7,13 @@ * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com> */ -#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/types.h> + #include <linux/platform_data/simplefb.h> +struct device; +struct platform_device; struct screen_info; enum { diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 29f59d50dc73..57e478bfaef2 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -208,7 +208,6 @@ struct tcp_sock { u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */ u16 gso_segs; /* Max number of segs per GSO packet */ /* from STCP, retrans queue hinting */ - struct sk_buff *lost_skb_hint; struct sk_buff *retransmit_skb_hint; __cacheline_group_end(tcp_sock_read_tx); @@ -419,8 +418,6 @@ struct tcp_sock { struct tcp_sack_block recv_sack_cache[4]; - int lost_cnt_hint; - u32 prior_ssthresh; /* ssthresh saved at recovery start */ u32 high_seq; /* snd_nxt at onset of congestion */ @@ -624,6 +621,7 @@ void tcp_sock_set_nodelay(struct sock *sk); void tcp_sock_set_quickack(struct sock *sk, int val); int tcp_sock_set_syncnt(struct sock *sk, int val); int tcp_sock_set_user_timeout(struct sock *sk, int val); +int tcp_sock_set_maxseg(struct sock *sk, int val); static inline bool dst_tcp_usec_ts(const struct dst_entry *dst) { diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h index 0982d1d52b24..dce03a5cafb7 100644 --- a/include/linux/timecounter.h +++ b/include/linux/timecounter.h @@ -28,7 +28,7 @@ * @shift: cycle to nanosecond divisor (power of two) */ struct cyclecounter { - u64 (*read)(const struct cyclecounter *cc); + u64 (*read)(struct cyclecounter *cc); u64 mask; u32 mult; u32 shift; @@ -53,7 +53,7 @@ struct cyclecounter { * @frac: accumulated fractional nanoseconds */ struct timecounter { - const struct cyclecounter *cc; + struct cyclecounter *cc; u64 cycle_last; u64 nsec; u64 mask; @@ -100,7 +100,7 @@ static inline void timecounter_adjtime(struct timecounter *tc, s64 delta) * the time stamp counter by the number of elapsed nanoseconds. */ extern void timecounter_init(struct timecounter *tc, - const struct cyclecounter *cc, + struct cyclecounter *cc, u64 start_tstamp); /** diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 785048a3b3e6..c27aac67cb3f 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -12,6 +12,22 @@ #include <linux/time.h> /** + * timekeeper_ids - IDs for various time keepers in the kernel + * @TIMEKEEPER_CORE: The central core timekeeper managing system time + * @TIMEKEEPER_AUX_FIRST: The first AUX timekeeper + * @TIMEKEEPER_AUX_LAST: The last AUX timekeeper + * @TIMEKEEPERS_MAX: The maximum number of timekeepers managed + */ +enum timekeeper_ids { + TIMEKEEPER_CORE, +#ifdef CONFIG_POSIX_AUX_CLOCKS + TIMEKEEPER_AUX_FIRST, + TIMEKEEPER_AUX_LAST = TIMEKEEPER_AUX_FIRST + MAX_AUX_CLOCKS - 1, +#endif + TIMEKEEPERS_MAX, +}; + +/** * struct tk_read_base - base structure for timekeeping readout * @clock: Current clocksource used for timekeeping. * @mask: Bitmask for two's complement subtraction of non 64bit clocks @@ -51,11 +67,14 @@ struct tk_read_base { * @offs_real: Offset clock monotonic -> clock realtime * @offs_boot: Offset clock monotonic -> clock boottime * @offs_tai: Offset clock monotonic -> clock tai + * @offs_aux: Offset clock monotonic -> clock AUX * @coarse_nsec: The nanoseconds part for coarse time getters + * @id: The timekeeper ID * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds * @clock_was_set_seq: The sequence number of clock was set events * @cs_was_changed_seq: The sequence number of clocksource change events + * @clock_valid: Indicator for valid clock * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset * @cycle_interval: Number of clock cycles in one NTP interval * @xtime_interval: Number of clock shifted nano seconds in one NTP @@ -95,13 +114,16 @@ struct tk_read_base { * @monotonic_to_boottime is a timespec64 representation of @offs_boot to * accelerate the VDSO update for CLOCK_BOOTTIME. * + * @offs_aux is used by the auxiliary timekeepers which do not utilize any + * of the regular timekeeper offset fields. + * * The cacheline ordering of the structure is optimized for in kernel usage of * the ktime_get() and ktime_get_ts64() family of time accessors. Struct * timekeeper is prepended in the core timekeeping code with a sequence count, * which results in the following cacheline layout: * * 0: seqcount, tkr_mono - * 1: xtime_sec ... coarse_nsec + * 1: xtime_sec ... id * 2: tkr_raw, raw_sec * 3,4: Internal variables * @@ -121,8 +143,12 @@ struct timekeeper { struct timespec64 wall_to_monotonic; ktime_t offs_real; ktime_t offs_boot; - ktime_t offs_tai; + union { + ktime_t offs_tai; + ktime_t offs_aux; + }; u32 coarse_nsec; + enum timekeeper_ids id; /* Cacheline 2: */ struct tk_read_base tkr_raw; @@ -131,6 +157,7 @@ struct timekeeper { /* Cachline 3 and 4 (timekeeping internal variables): */ unsigned int clock_was_set_seq; u8 cs_was_changed_seq; + u8 clock_valid; struct timespec64 monotonic_to_boot; @@ -163,4 +190,10 @@ static inline void update_vsyscall_tz(void) } #endif +#if defined(CONFIG_GENERIC_GETTIMEOFDAY) && defined(CONFIG_POSIX_AUX_CLOCKS) +extern void vdso_time_update_aux(struct timekeeper *tk); +#else +static inline void vdso_time_update_aux(struct timekeeper *tk) { } +#endif + #endif /* _LINUX_TIMEKEEPER_INTERNAL_H */ diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 542773650200..aee2c1a46e47 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -44,6 +44,7 @@ extern void ktime_get_ts64(struct timespec64 *ts); extern void ktime_get_real_ts64(struct timespec64 *tv); extern void ktime_get_coarse_ts64(struct timespec64 *ts); extern void ktime_get_coarse_real_ts64(struct timespec64 *ts); +extern void ktime_get_clock_ts64(clockid_t id, struct timespec64 *ts); /* Multigrain timestamp interfaces */ extern void ktime_get_coarse_real_ts64_mg(struct timespec64 *ts); @@ -263,6 +264,17 @@ extern bool timekeeping_rtc_skipresume(void); extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta); +/* + * Auxiliary clock interfaces + */ +#ifdef CONFIG_POSIX_AUX_CLOCKS +extern bool ktime_get_aux(clockid_t id, ktime_t *kt); +extern bool ktime_get_aux_ts64(clockid_t id, struct timespec64 *kt); +#else +static inline bool ktime_get_aux(clockid_t id, ktime_t *kt) { return false; } +static inline bool ktime_get_aux_ts64(clockid_t id, struct timespec64 *kt) { return false; } +#endif + /** * struct system_time_snapshot - simultaneous raw/real time capture with * counter value diff --git a/include/linux/tnum.h b/include/linux/tnum.h index 3c13240077b8..57ed3035cc30 100644 --- a/include/linux/tnum.h +++ b/include/linux/tnum.h @@ -40,6 +40,8 @@ struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness); struct tnum tnum_add(struct tnum a, struct tnum b); /* Subtract two tnums, return @a - @b */ struct tnum tnum_sub(struct tnum a, struct tnum b); +/* Neg of a tnum, return 0 - @a */ +struct tnum tnum_neg(struct tnum a); /* Bitwise-AND, return @a & @b */ struct tnum tnum_and(struct tnum a, struct tnum b); /* Bitwise-OR, return @a | @b */ diff --git a/include/linux/tpm.h b/include/linux/tpm.h index a3d8305e88a5..b0e9eb5ef022 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -87,7 +87,8 @@ struct tpm_class_ops { const u8 req_complete_val; bool (*req_canceled)(struct tpm_chip *chip, u8 status); int (*recv) (struct tpm_chip *chip, u8 *buf, size_t len); - int (*send) (struct tpm_chip *chip, u8 *buf, size_t len); + int (*send)(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t cmd_len); void (*cancel) (struct tpm_chip *chip); u8 (*status) (struct tpm_chip *chip); void (*update_timeouts)(struct tpm_chip *chip, @@ -182,7 +183,7 @@ struct tpm_chip { unsigned long duration[TPM_NUM_DURATIONS]; /* jiffies */ bool duration_adjusted; - struct dentry *bios_dir[TPM_NUM_EVENT_LOG_FILES]; + struct dentry *bios_dir; const struct attribute_group *groups[3 + TPM_MAX_HASHES]; unsigned int groups_cnt; @@ -350,6 +351,7 @@ enum tpm_chip_flags { TPM_CHIP_FLAG_SUSPENDED = BIT(8), TPM_CHIP_FLAG_HWRNG_DISABLED = BIT(9), TPM_CHIP_FLAG_DISABLE = BIT(10), + TPM_CHIP_FLAG_SYNC = BIT(11), }; #define to_tpm_chip(d) container_of(d, struct tpm_chip, dev) diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h index 08f89a598366..332ddb93603e 100644 --- a/include/linux/tty_port.h +++ b/include/linux/tty_port.h @@ -232,7 +232,7 @@ bool tty_port_carrier_raised(struct tty_port *port); void tty_port_raise_dtr_rts(struct tty_port *port); void tty_port_lower_dtr_rts(struct tty_port *port); void tty_port_hangup(struct tty_port *port); -void tty_port_tty_hangup(struct tty_port *port, bool check_clocal); +void __tty_port_tty_hangup(struct tty_port *port, bool check_clocal, bool async); void tty_port_tty_wakeup(struct tty_port *port); int tty_port_block_til_ready(struct tty_port *port, struct tty_struct *tty, struct file *filp); @@ -251,4 +251,23 @@ static inline int tty_port_users(struct tty_port *port) return port->count + port->blocked_open; } +/** + * tty_port_tty_hangup - helper to hang up a tty asynchronously + * @port: tty port + * @check_clocal: hang only ttys with %CLOCAL unset? + */ +static inline void tty_port_tty_hangup(struct tty_port *port, bool check_clocal) +{ + __tty_port_tty_hangup(port, check_clocal, true); +} + +/** + * tty_port_tty_vhangup - helper to hang up a tty synchronously + * @port: tty port + */ +static inline void tty_port_tty_vhangup(struct tty_port *port) +{ + __tty_port_tty_hangup(port, false, false); +} + #endif diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 7c06f4795670..1beb5b395d81 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -296,6 +296,8 @@ static inline bool pagefault_disabled(void) */ #define faulthandler_disabled() (pagefault_disabled() || in_atomic()) +DEFINE_LOCK_GUARD_0(pagefault, pagefault_disable(), pagefault_enable()) + #ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS /** diff --git a/include/linux/usb.h b/include/linux/usb.h index 92c752f5446f..9d662c6abb4d 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1455,6 +1455,10 @@ typedef void (*usb_complete_t)(struct urb *); * @sg: scatter gather buffer list, the buffer size of each element in * the list (except the last) must be divisible by the endpoint's * max packet size if no_sg_constraint isn't set in 'struct usb_bus' + * @sgt: used to hold a scatter gather table returned by usb_alloc_noncoherent(), + * which describes the allocated non-coherent and possibly non-contiguous + * memory and is guaranteed to have 1 single DMA mapped segment. The + * allocated memory needs to be freed by usb_free_noncoherent(). * @num_mapped_sgs: (internal) number of mapped sg entries * @num_sgs: number of entries in the sg list * @transfer_buffer_length: How big is transfer_buffer. The transfer may @@ -1621,6 +1625,7 @@ struct urb { void *transfer_buffer; /* (in) associated data buffer */ dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */ struct scatterlist *sg; /* (in) scatter gather buffer list */ + struct sg_table *sgt; /* (in) scatter gather table for noncoherent buffer */ int num_mapped_sgs; /* (internal) mapped sg entries */ int num_sgs; /* (in) number of entries in the sg list */ u32 transfer_buffer_length; /* (in) data buffer length */ @@ -1782,7 +1787,6 @@ extern void usb_block_urb(struct urb *urb); extern void usb_kill_anchored_urbs(struct usb_anchor *anchor); extern void usb_poison_anchored_urbs(struct usb_anchor *anchor); extern void usb_unpoison_anchored_urbs(struct usb_anchor *anchor); -extern void usb_unlink_anchored_urbs(struct usb_anchor *anchor); extern void usb_anchor_suspend_wakeups(struct usb_anchor *anchor); extern void usb_anchor_resume_wakeups(struct usb_anchor *anchor); extern void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor); @@ -1827,6 +1831,16 @@ void *usb_alloc_coherent(struct usb_device *dev, size_t size, void usb_free_coherent(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma); +enum dma_data_direction; + +void *usb_alloc_noncoherent(struct usb_device *dev, size_t size, + gfp_t mem_flags, dma_addr_t *dma, + enum dma_data_direction dir, + struct sg_table **table); +void usb_free_noncoherent(struct usb_device *dev, size_t size, + void *addr, enum dma_data_direction dir, + struct sg_table *table); + /*-------------------------------------------------------------------* * SYNCHRONOUS CALL SUPPORT * *-------------------------------------------------------------------*/ diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index 2d207cb4837d..4ac082a63173 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h @@ -119,6 +119,7 @@ struct cdc_ncm_ctx { u32 timer_interval; u32 max_ndp_size; u8 is_ndp16; + u8 filtering_supported; union { struct usb_cdc_ncm_ndp16 *delayed_ndp16; struct usb_cdc_ncm_ndp32 *delayed_ndp32; diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index ebdfef124b2b..e17ebeee24e3 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -72,6 +72,7 @@ struct ci_hdrc_platform_data { #define CI_HDRC_IMX_HSIC_ACTIVE_EVENT 2 #define CI_HDRC_IMX_HSIC_SUSPEND_EVENT 3 #define CI_HDRC_CONTROLLER_VBUS_EVENT 4 +#define CI_HDRC_CONTROLLER_PULLUP_EVENT 5 int (*notify_event) (struct ci_hdrc *ci, unsigned event); struct regulator *reg_vbus; struct usb_otg_caps ci_otg_caps; diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index d8c4e9f73839..c18041fafa52 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -339,9 +339,6 @@ int usb_add_config(struct usb_composite_dev *, struct usb_configuration *, int (*)(struct usb_configuration *)); -void usb_remove_config(struct usb_composite_dev *, - struct usb_configuration *); - /* predefined index for usb_composite_driver */ enum { USB_GADGET_MANUFACTURER_IDX = 0, diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index df33333650a0..0f28c5512fcb 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -860,10 +860,6 @@ container_of(str_item, struct gadget_string, item) int usb_descriptor_fillbuf(void *, unsigned, const struct usb_descriptor_header **); -/* build config descriptor from single descriptor vector */ -int usb_gadget_config_buf(const struct usb_config_descriptor *config, - void *buf, unsigned buflen, const struct usb_descriptor_header **desc); - /* copy a NULL-terminated vector of descriptors */ struct usb_descriptor_header **usb_copy_descriptors( struct usb_descriptor_header **); diff --git a/include/linux/usb/tegra_usb_phy.h b/include/linux/usb/tegra_usb_phy.h index e6c14f2b1f9b..40afcee8b4f5 100644 --- a/include/linux/usb/tegra_usb_phy.h +++ b/include/linux/usb/tegra_usb_phy.h @@ -80,13 +80,4 @@ struct tegra_usb_phy { bool powered_on; }; -void tegra_usb_phy_preresume(struct usb_phy *phy); - -void tegra_usb_phy_postresume(struct usb_phy *phy); - -void tegra_ehci_phy_restore_start(struct usb_phy *phy, - enum tegra_usb_phy_port_speed port_speed); - -void tegra_ehci_phy_restore_end(struct usb_phy *phy); - #endif /* __TEGRA_USB_PHY_H */ diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 0b9f1e598e3a..a2d54122823d 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -58,7 +58,7 @@ struct usbnet { unsigned interrupt_count; struct mutex interrupt_mutex; struct usb_anchor deferred; - struct tasklet_struct bh; + struct work_struct bh_work; struct work_struct kevent; unsigned long flags; @@ -76,6 +76,7 @@ struct usbnet { # define EVENT_LINK_CHANGE 11 # define EVENT_SET_RX_MODE 12 # define EVENT_NO_IP_ALIGN 13 +# define EVENT_LINK_CARRIER_ON 14 /* This one is special, as it indicates that the device is going away * there are cyclic dependencies between tasklet, timer and bh * that must be broken diff --git a/include/linux/usermode_driver.h b/include/linux/usermode_driver.h deleted file mode 100644 index ad970416260d..000000000000 --- a/include/linux/usermode_driver.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef __LINUX_USERMODE_DRIVER_H__ -#define __LINUX_USERMODE_DRIVER_H__ - -#include <linux/umh.h> -#include <linux/path.h> - -struct umd_info { - const char *driver_name; - struct file *pipe_to_umh; - struct file *pipe_from_umh; - struct path wd; - struct pid *tgid; -}; -int umd_load_blob(struct umd_info *info, const void *data, size_t len); -int umd_unload_blob(struct umd_info *info); -int fork_usermode_driver(struct umd_info *info); -void umd_cleanup_helper(struct umd_info *info); - -#endif /* __LINUX_USERMODE_DRIVER_H__ */ diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 64cb4b04be7a..04b90c88d164 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -11,6 +11,7 @@ #include <linux/gfp.h> #include <linux/dma-mapping.h> #include <linux/completion.h> +#include <linux/virtio_features.h> /** * struct virtqueue - a queue to register buffers for sending or receiving. @@ -141,7 +142,9 @@ struct virtio_admin_cmd { * @config: the configuration ops for this device. * @vringh_config: configuration ops for host vrings. * @vqs: the list of virtqueues for this device. - * @features: the features supported by both driver and device. + * @features: the 64 lower features supported by both driver and device. + * @features_array: the full features space supported by both driver and + * device. * @priv: private pointer for the driver's use. * @debugfs_dir: debugfs directory entry. * @debugfs_filter_features: features to be filtered set by debugfs. @@ -159,11 +162,11 @@ struct virtio_device { const struct virtio_config_ops *config; const struct vringh_config_ops *vringh_config; struct list_head vqs; - u64 features; + VIRTIO_DECLARE_FEATURES(features); void *priv; #ifdef CONFIG_VIRTIO_DEBUG struct dentry *debugfs_dir; - u64 debugfs_filter_features; + u64 debugfs_filter_features[VIRTIO_FEATURES_DWORDS]; #endif }; diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index b3e1d30c765b..918cf25cd3c6 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -77,7 +77,11 @@ struct virtqueue_info { * vdev: the virtio_device * @get_features: get the array of feature bits for this device. * vdev: the virtio_device - * Returns the first 64 feature bits (all we currently need). + * Returns the first 64 feature bits. + * @get_extended_features: + * vdev: the virtio_device + * Returns the first VIRTIO_FEATURES_MAX feature bits (all we currently + * need). * @finalize_features: confirm what device features we'll be using. * vdev: the virtio_device * This sends the driver feature bits to the device: it can change @@ -121,6 +125,8 @@ struct virtio_config_ops { void (*del_vqs)(struct virtio_device *); void (*synchronize_cbs)(struct virtio_device *); u64 (*get_features)(struct virtio_device *vdev); + void (*get_extended_features)(struct virtio_device *vdev, + u64 *features); int (*finalize_features)(struct virtio_device *vdev); const char *(*bus_name)(struct virtio_device *vdev); int (*set_vq_affinity)(struct virtqueue *vq, @@ -147,13 +153,7 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev, static inline bool __virtio_test_bit(const struct virtio_device *vdev, unsigned int fbit) { - /* Did you forget to fix assumptions on max features? */ - if (__builtin_constant_p(fbit)) - BUILD_BUG_ON(fbit >= 64); - else - BUG_ON(fbit >= 64); - - return vdev->features & BIT_ULL(fbit); + return virtio_features_test_bit(vdev->features_array, fbit); } /** @@ -164,13 +164,7 @@ static inline bool __virtio_test_bit(const struct virtio_device *vdev, static inline void __virtio_set_bit(struct virtio_device *vdev, unsigned int fbit) { - /* Did you forget to fix assumptions on max features? */ - if (__builtin_constant_p(fbit)) - BUILD_BUG_ON(fbit >= 64); - else - BUG_ON(fbit >= 64); - - vdev->features |= BIT_ULL(fbit); + virtio_features_set_bit(vdev->features_array, fbit); } /** @@ -181,13 +175,7 @@ static inline void __virtio_set_bit(struct virtio_device *vdev, static inline void __virtio_clear_bit(struct virtio_device *vdev, unsigned int fbit) { - /* Did you forget to fix assumptions on max features? */ - if (__builtin_constant_p(fbit)) - BUILD_BUG_ON(fbit >= 64); - else - BUG_ON(fbit >= 64); - - vdev->features &= ~BIT_ULL(fbit); + virtio_features_clear_bit(vdev->features_array, fbit); } /** @@ -204,6 +192,17 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev, return __virtio_test_bit(vdev, fbit); } +static inline void virtio_get_features(struct virtio_device *vdev, + u64 *features) +{ + if (vdev->config->get_extended_features) { + vdev->config->get_extended_features(vdev, features); + return; + } + + virtio_features_from_u64(features, vdev->config->get_features(vdev)); +} + /** * virtio_has_dma_quirk - determine whether this device has the DMA quirk * @vdev: the device diff --git a/include/linux/virtio_features.h b/include/linux/virtio_features.h new file mode 100644 index 000000000000..f748f2f87de8 --- /dev/null +++ b/include/linux/virtio_features.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_VIRTIO_FEATURES_H +#define _LINUX_VIRTIO_FEATURES_H + +#include <linux/bits.h> + +#define VIRTIO_FEATURES_DWORDS 2 +#define VIRTIO_FEATURES_MAX (VIRTIO_FEATURES_DWORDS * 64) +#define VIRTIO_FEATURES_WORDS (VIRTIO_FEATURES_DWORDS * 2) +#define VIRTIO_BIT(b) BIT_ULL((b) & 0x3f) +#define VIRTIO_DWORD(b) ((b) >> 6) +#define VIRTIO_DECLARE_FEATURES(name) \ + union { \ + u64 name; \ + u64 name##_array[VIRTIO_FEATURES_DWORDS];\ + } + +static inline bool virtio_features_chk_bit(unsigned int bit) +{ + if (__builtin_constant_p(bit)) { + /* + * Don't care returning the correct value: the build + * will fail before any bad features access + */ + BUILD_BUG_ON(bit >= VIRTIO_FEATURES_MAX); + } else { + if (WARN_ON_ONCE(bit >= VIRTIO_FEATURES_MAX)) + return false; + } + return true; +} + +static inline bool virtio_features_test_bit(const u64 *features, + unsigned int bit) +{ + return virtio_features_chk_bit(bit) && + !!(features[VIRTIO_DWORD(bit)] & VIRTIO_BIT(bit)); +} + +static inline void virtio_features_set_bit(u64 *features, + unsigned int bit) +{ + if (virtio_features_chk_bit(bit)) + features[VIRTIO_DWORD(bit)] |= VIRTIO_BIT(bit); +} + +static inline void virtio_features_clear_bit(u64 *features, + unsigned int bit) +{ + if (virtio_features_chk_bit(bit)) + features[VIRTIO_DWORD(bit)] &= ~VIRTIO_BIT(bit); +} + +static inline void virtio_features_zero(u64 *features) +{ + memset(features, 0, sizeof(features[0]) * VIRTIO_FEATURES_DWORDS); +} + +static inline void virtio_features_from_u64(u64 *features, u64 from) +{ + virtio_features_zero(features); + features[0] = from; +} + +static inline bool virtio_features_equal(const u64 *f1, const u64 *f2) +{ + int i; + + for (i = 0; i < VIRTIO_FEATURES_DWORDS; ++i) + if (f1[i] != f2[i]) + return false; + return true; +} + +static inline void virtio_features_copy(u64 *to, const u64 *from) +{ + memcpy(to, from, sizeof(to[0]) * VIRTIO_FEATURES_DWORDS); +} + +static inline void virtio_features_andnot(u64 *to, const u64 *f1, const u64 *f2) +{ + int i; + + for (i = 0; i < VIRTIO_FEATURES_DWORDS; i++) + to[i] = f1[i] & ~f2[i]; +} + +#endif diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 02a9f4dc594d..20e0584db1dd 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -47,9 +47,9 @@ static inline int virtio_net_hdr_set_proto(struct sk_buff *skb, return 0; } -static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, - const struct virtio_net_hdr *hdr, - bool little_endian) +static inline int __virtio_net_hdr_to_skb(struct sk_buff *skb, + const struct virtio_net_hdr *hdr, + bool little_endian, u8 hdr_gso_type) { unsigned int nh_min_len = sizeof(struct iphdr); unsigned int gso_type = 0; @@ -57,8 +57,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, unsigned int p_off = 0; unsigned int ip_proto; - if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { - switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { + if (hdr_gso_type != VIRTIO_NET_HDR_GSO_NONE) { + switch (hdr_gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: gso_type = SKB_GSO_TCPV4; ip_proto = IPPROTO_TCP; @@ -84,7 +84,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, return -EINVAL; } - if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) + if (hdr_gso_type & VIRTIO_NET_HDR_GSO_ECN) gso_type |= SKB_GSO_TCP_ECN; if (hdr->gso_size == 0) @@ -122,7 +122,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, if (!protocol) virtio_net_hdr_set_proto(skb, hdr); - else if (!virtio_net_hdr_match_proto(protocol, hdr->gso_type)) + else if (!virtio_net_hdr_match_proto(protocol, + hdr_gso_type)) return -EINVAL; else skb->protocol = protocol; @@ -153,7 +154,7 @@ retry: } } - if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { + if (hdr_gso_type != VIRTIO_NET_HDR_GSO_NONE) { u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); unsigned int nh_off = p_off; struct skb_shared_info *shinfo = skb_shinfo(skb); @@ -199,6 +200,13 @@ retry: return 0; } +static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, + const struct virtio_net_hdr *hdr, + bool little_endian) +{ + return __virtio_net_hdr_to_skb(skb, hdr, little_endian, hdr->gso_type); +} + static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, struct virtio_net_hdr *hdr, bool little_endian, @@ -242,4 +250,177 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, return 0; } +static inline unsigned int virtio_l3min(bool is_ipv6) +{ + return is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr); +} + +static inline int +virtio_net_hdr_tnl_to_skb(struct sk_buff *skb, + const struct virtio_net_hdr_v1_hash_tunnel *vhdr, + bool tnl_hdr_negotiated, + bool tnl_csum_negotiated, + bool little_endian) +{ + const struct virtio_net_hdr *hdr = (const struct virtio_net_hdr *)vhdr; + unsigned int inner_nh, outer_th, inner_th; + unsigned int inner_l3min, outer_l3min; + u8 gso_inner_type, gso_tunnel_type; + bool outer_isv6, inner_isv6; + int ret; + + gso_tunnel_type = hdr->gso_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL; + if (!gso_tunnel_type) + return virtio_net_hdr_to_skb(skb, hdr, little_endian); + + /* Tunnel not supported/negotiated, but the hdr asks for it. */ + if (!tnl_hdr_negotiated) + return -EINVAL; + + /* Either ipv4 or ipv6. */ + if (gso_tunnel_type == VIRTIO_NET_HDR_GSO_UDP_TUNNEL) + return -EINVAL; + + /* The UDP tunnel must carry a GSO packet, but no UFO. */ + gso_inner_type = hdr->gso_type & ~(VIRTIO_NET_HDR_GSO_ECN | + VIRTIO_NET_HDR_GSO_UDP_TUNNEL); + if (!gso_inner_type || gso_inner_type == VIRTIO_NET_HDR_GSO_UDP) + return -EINVAL; + + /* Rely on csum being present. */ + if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) + return -EINVAL; + + /* Validate offsets. */ + outer_isv6 = gso_tunnel_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6; + inner_isv6 = gso_inner_type == VIRTIO_NET_HDR_GSO_TCPV6; + inner_l3min = virtio_l3min(inner_isv6); + outer_l3min = ETH_HLEN + virtio_l3min(outer_isv6); + + inner_th = __virtio16_to_cpu(little_endian, hdr->csum_start); + inner_nh = le16_to_cpu(vhdr->inner_nh_offset); + outer_th = le16_to_cpu(vhdr->outer_th_offset); + if (outer_th < outer_l3min || + inner_nh < outer_th + sizeof(struct udphdr) || + inner_th < inner_nh + inner_l3min) + return -EINVAL; + + /* Let the basic parsing deal with plain GSO features. */ + ret = __virtio_net_hdr_to_skb(skb, hdr, true, + hdr->gso_type & ~gso_tunnel_type); + if (ret) + return ret; + + /* In case of USO, the inner protocol is still unknown and + * `inner_isv6` is just a guess, additional parsing is needed. + * The previous validation ensures that accessing an ipv4 inner + * network header is safe. + */ + if (gso_inner_type == VIRTIO_NET_HDR_GSO_UDP_L4) { + struct iphdr *iphdr = (struct iphdr *)(skb->data + inner_nh); + + inner_isv6 = iphdr->version == 6; + inner_l3min = virtio_l3min(inner_isv6); + if (inner_th < inner_nh + inner_l3min) + return -EINVAL; + } + + skb_set_inner_protocol(skb, inner_isv6 ? htons(ETH_P_IPV6) : + htons(ETH_P_IP)); + if (hdr->flags & VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM) { + if (!tnl_csum_negotiated) + return -EINVAL; + + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; + } else { + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; + } + + skb->inner_transport_header = inner_th + skb_headroom(skb); + skb->inner_network_header = inner_nh + skb_headroom(skb); + skb->inner_mac_header = inner_nh + skb_headroom(skb); + skb->transport_header = outer_th + skb_headroom(skb); + skb->encapsulation = 1; + return 0; +} + +/* Checksum-related fields validation for the driver */ +static inline int virtio_net_handle_csum_offload(struct sk_buff *skb, + struct virtio_net_hdr *hdr, + bool tnl_csum_negotiated) +{ + if (!(hdr->gso_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL)) { + if (!(hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID)) + return 0; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + if (!(hdr->flags & VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM)) + return 0; + + /* tunnel csum packets are invalid when the related + * feature has not been negotiated + */ + if (!tnl_csum_negotiated) + return -EINVAL; + skb->csum_level = 1; + return 0; + } + + /* DATA_VALID is mutually exclusive with NEEDS_CSUM, and GSO + * over UDP tunnel requires the latter + */ + if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID) + return -EINVAL; + return 0; +} + +/* + * vlan_hlen always refers to the outermost MAC header. That also + * means it refers to the only MAC header, if the packet does not carry + * any encapsulation. + */ +static inline int +virtio_net_hdr_tnl_from_skb(const struct sk_buff *skb, + struct virtio_net_hdr_v1_hash_tunnel *vhdr, + bool tnl_hdr_negotiated, + bool little_endian, + int vlan_hlen) +{ + struct virtio_net_hdr *hdr = (struct virtio_net_hdr *)vhdr; + unsigned int inner_nh, outer_th; + int tnl_gso_type; + int ret; + + tnl_gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | + SKB_GSO_UDP_TUNNEL_CSUM); + if (!tnl_gso_type) + return virtio_net_hdr_from_skb(skb, hdr, little_endian, false, + vlan_hlen); + + /* Tunnel support not negotiated but skb ask for it. */ + if (!tnl_hdr_negotiated) + return -EINVAL; + + /* Let the basic parsing deal with plain GSO features. */ + skb_shinfo(skb)->gso_type &= ~tnl_gso_type; + ret = virtio_net_hdr_from_skb(skb, hdr, true, false, vlan_hlen); + skb_shinfo(skb)->gso_type |= tnl_gso_type; + if (ret) + return ret; + + if (skb->protocol == htons(ETH_P_IPV6)) + hdr->gso_type |= VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6; + else + hdr->gso_type |= VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV4; + + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) + hdr->flags |= VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM; + + inner_nh = skb->inner_network_header - skb_headroom(skb); + outer_th = skb->transport_header - skb_headroom(skb); + vhdr->inner_nh_offset = cpu_to_le16(inner_nh); + vhdr->outer_th_offset = cpu_to_le16(outer_th); + return 0; +} + #endif /* _LINUX_VIRTIO_NET_H */ diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h index c0b1b1ca1163..48bc12d1045b 100644 --- a/include/linux/virtio_pci_modern.h +++ b/include/linux/virtio_pci_modern.h @@ -3,6 +3,7 @@ #define _LINUX_VIRTIO_PCI_MODERN_H #include <linux/pci.h> +#include <linux/virtio_config.h> #include <linux/virtio_pci.h> /** @@ -95,10 +96,44 @@ static inline void vp_iowrite64_twopart(u64 val, vp_iowrite32(val >> 32, hi); } -u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev); -u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev); -void vp_modern_set_features(struct virtio_pci_modern_device *mdev, - u64 features); +void +vp_modern_get_driver_extended_features(struct virtio_pci_modern_device *mdev, + u64 *features); +void vp_modern_get_extended_features(struct virtio_pci_modern_device *mdev, + u64 *features); +void vp_modern_set_extended_features(struct virtio_pci_modern_device *mdev, + const u64 *features); + +static inline u64 +vp_modern_get_features(struct virtio_pci_modern_device *mdev) +{ + u64 features_array[VIRTIO_FEATURES_DWORDS]; + + vp_modern_get_extended_features(mdev, features_array); + return features_array[0]; +} + +static inline u64 +vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev) +{ + u64 features_array[VIRTIO_FEATURES_DWORDS]; + int i; + + vp_modern_get_driver_extended_features(mdev, features_array); + for (i = 1; i < VIRTIO_FEATURES_DWORDS; ++i) + WARN_ON_ONCE(features_array[i]); + return features_array[0]; +} + +static inline void +vp_modern_set_features(struct virtio_pci_modern_device *mdev, u64 features) +{ + u64 features_array[VIRTIO_FEATURES_DWORDS]; + + virtio_features_from_u64(features_array, features); + vp_modern_set_extended_features(mdev, features_array); +} + u32 vp_modern_generation(struct virtio_pci_modern_device *mdev); u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev); void vp_modern_set_status(struct virtio_pci_modern_device *mdev, diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h index f28907345c80..41764a684423 100644 --- a/include/linux/vmw_vmci_api.h +++ b/include/linux/vmw_vmci_api.h @@ -35,7 +35,6 @@ int vmci_doorbell_create(struct vmci_handle *handle, u32 flags, u32 priv_flags, vmci_callback notify_cb, void *client_data); int vmci_doorbell_destroy(struct vmci_handle handle); -int vmci_doorbell_notify(struct vmci_handle handle, u32 priv_flags); u32 vmci_get_context_id(void); bool vmci_is_context_owner(u32 context_id, kuid_t uid); int vmci_register_vsock_callback(vmci_vsock_cb callback); @@ -61,12 +60,6 @@ s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair); s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair); s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair); s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair); -ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair, - const void *buf, size_t buf_size, int mode); -ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, - void *buf, size_t buf_size, int mode); -ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size, - int mode); ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size, int mode); ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, diff --git a/include/linux/wait.h b/include/linux/wait.h index 965a19809c7e..09855d819418 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -164,6 +164,8 @@ static inline bool wq_has_sleeper(struct wait_queue_head *wq_head) extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); +extern int add_wait_queue_priority_exclusive(struct wait_queue_head *wq_head, + struct wait_queue_entry *wq_entry); extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |