diff options
Diffstat (limited to 'drivers/net/ethernet/ibm/ibmvnic.h')
| -rw-r--r-- | drivers/net/ethernet/ibm/ibmvnic.h | 103 |
1 files changed, 81 insertions, 22 deletions
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 22df602323bc..480dc587078f 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -18,8 +18,6 @@ #define IBMVNIC_NAME "ibmvnic" #define IBMVNIC_DRIVER_VERSION "1.0.1" #define IBMVNIC_INVALID_MAP -1 -#define IBMVNIC_STATS_TIMEOUT 1 -#define IBMVNIC_INIT_FAILED 2 #define IBMVNIC_OPEN_FAILED 3 /* basic structures plus 100 2k buffers */ @@ -31,23 +29,60 @@ #define IBMVNIC_BUFFS_PER_POOL 100 #define IBMVNIC_MAX_QUEUES 16 #define IBMVNIC_MAX_QUEUE_SZ 4096 -#define IBMVNIC_MAX_IND_DESCS 16 -#define IBMVNIC_IND_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32) +#define IBMVNIC_MAX_IND_DESCS 128 +#define IBMVNIC_SAFE_IND_DESC 16 +#define IBMVNIC_IND_MAX_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32) #define IBMVNIC_TSO_BUF_SZ 65536 #define IBMVNIC_TSO_BUFS 64 #define IBMVNIC_TSO_POOL_MASK 0x80000000 -#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE) -#define IBMVNIC_BUFFER_HLEN 500 +/* A VNIC adapter has set of Rx and Tx pools (aka queues). Each Rx/Tx pool + * has a set of buffers. The size of each buffer is determined by the MTU. + * + * Each Rx/Tx pool is also associated with a DMA region that is shared + * with the "hardware" (VIOS) and used to send/receive packets. The DMA + * region is also referred to as a Long Term Buffer or LTB. + * + * The size of the DMA region required for an Rx/Tx pool depends on the + * number and size (MTU) of the buffers in the pool. At the max levels + * of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus + * some padding. + * + * But the size of a single DMA region is limited by MAX_PAGE_ORDER in the + * kernel (about 16MB currently). To support say 4K Jumbo frames, we + * use a set of LTBs (struct ltb_set) per pool. + * + * IBMVNIC_ONE_LTB_MAX - max size of each LTB supported by kernel + * IBMVNIC_ONE_LTB_SIZE - current max size of each LTB in an ltb_set + * (must be <= IBMVNIC_ONE_LTB_MAX) + * IBMVNIC_LTB_SET_SIZE - current size of all LTBs in an ltb_set + * + * Each VNIC can have upto 16 Rx, 16 Tx and 16 TSO pools. The TSO pools + * are of fixed length (IBMVNIC_TSO_BUF_SZ * IBMVNIC_TSO_BUFS) of 4MB. + * + * The Rx and Tx pools can have upto 4096 buffers. The max size of these + * buffers is about 9588 (for jumbo frames, including IBMVNIC_BUFFER_HLEN). + * So, setting the IBMVNIC_LTB_SET_SIZE for a pool to 4096 * 9588 ~= 38MB. + * + * There is a trade-off in setting IBMVNIC_ONE_LTB_SIZE. If it is large, + * the allocation of the LTB can fail when system is low in memory. If + * its too small, we would need several mappings for each of the Rx/ + * Tx/TSO pools but there is a limit of 255 mappings per vnic in the + * VNIC protocol. + * + * So setting IBMVNIC_ONE_LTB_SIZE to 8MB. With IBMVNIC_LTB_SET_SIZE set + * to 38MB, we will need 5 LTBs per Rx and Tx pool and 1 LTB per TSO + * pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160 + * plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC. + */ +#define IBMVNIC_ONE_LTB_MAX ((u32)((1 << MAX_PAGE_ORDER) * PAGE_SIZE)) +#define IBMVNIC_ONE_LTB_SIZE min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX) +#define IBMVNIC_LTB_SET_SIZE (38 << 20) +#define IBMVNIC_BUFFER_HLEN 500 #define IBMVNIC_RESET_DELAY 100 -static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = { -#define IBMVNIC_USE_SERVER_MAXES 0x1 - "use-server-maxes" -}; - struct ibmvnic_login_buffer { __be32 len; __be32 version; @@ -177,20 +212,25 @@ struct ibmvnic_statistics { u8 reserved[72]; } __packed __aligned(8); -#define NUM_TX_STATS 3 struct ibmvnic_tx_queue_stats { - u64 packets; + u64 batched_packets; + u64 direct_packets; u64 bytes; u64 dropped_packets; }; -#define NUM_RX_STATS 3 +#define NUM_TX_STATS \ + (sizeof(struct ibmvnic_tx_queue_stats) / sizeof(u64)) + struct ibmvnic_rx_queue_stats { u64 packets; u64 bytes; u64 interrupts; }; +#define NUM_RX_STATS \ + (sizeof(struct ibmvnic_rx_queue_stats) / sizeof(u64)) + struct ibmvnic_acl_buffer { __be32 len; __be32 version; @@ -791,6 +831,7 @@ struct ibmvnic_sub_crq_queue { atomic_t used; char name[32]; u64 handle; + cpumask_var_t affinity_mask; } ____cacheline_aligned; struct ibmvnic_long_term_buff { @@ -800,6 +841,11 @@ struct ibmvnic_long_term_buff { u8 map_id; }; +struct ibmvnic_ltb_set { + int num_ltbs; + struct ibmvnic_long_term_buff *ltbs; +}; + struct ibmvnic_tx_buff { struct sk_buff *skb; int index; @@ -812,7 +858,7 @@ struct ibmvnic_tx_pool { int *free_map; int consumer_index; int producer_index; - struct ibmvnic_long_term_buff long_term_buff; + struct ibmvnic_ltb_set ltb_set; int num_buffers; int buf_size; } ____cacheline_aligned; @@ -827,7 +873,7 @@ struct ibmvnic_rx_buff { struct ibmvnic_rx_pool { struct ibmvnic_rx_buff *rx_buff; - int size; + int size; /* # of buffers in the pool */ int index; int buff_size; atomic_t available; @@ -835,7 +881,7 @@ struct ibmvnic_rx_pool { int next_free; int next_alloc; int active; - struct ibmvnic_long_term_buff long_term_buff; + struct ibmvnic_ltb_set ltb_set; } ____cacheline_aligned; struct ibmvnic_vpd { @@ -885,7 +931,7 @@ struct ibmvnic_adapter { struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl; dma_addr_t ip_offload_ctrl_tok; u32 msg_enable; - u32 priv_flags; + u32 cur_max_ind_descs; /* Vital Product Data (VPD) */ struct ibmvnic_vpd *vpd; @@ -921,7 +967,6 @@ struct ibmvnic_adapter { int login_rsp_buf_sz; atomic_t running_cap_crqs; - bool wait_capability; struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned; struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned; @@ -933,6 +978,7 @@ struct ibmvnic_adapter { struct ibmvnic_tx_pool *tx_pool; struct ibmvnic_tx_pool *tso_pool; + struct completion probe_done; struct completion init_done; int init_done_rc; @@ -945,6 +991,10 @@ struct ibmvnic_adapter { int reset_done_rc; bool wait_for_reset; + /* CPU hotplug instances for online & dead */ + struct hlist_node node; + struct hlist_node node_dead; + /* partner capabilities */ u64 min_tx_queues; u64 min_rx_queues; @@ -967,6 +1017,7 @@ struct ibmvnic_adapter { u64 min_mtu; u64 max_mtu; u64 req_mtu; + u64 prev_mtu; u64 max_multicast_filters; u64 vlan_header_insertion; u64 rx_vlan_header_insertion; @@ -979,13 +1030,18 @@ struct ibmvnic_adapter { u64 opt_tx_entries_per_subcrq; u64 opt_rxba_entries_per_subcrq; __be64 tx_rx_desc_req; - u8 map_id; +#define MAX_MAP_ID 255 + DECLARE_BITMAP(map_ids, MAX_MAP_ID); u32 num_active_rx_scrqs; u32 num_active_rx_pools; u32 num_active_rx_napi; u32 num_active_tx_scrqs; u32 num_active_tx_pools; + + u32 prev_rx_pool_size; + u32 prev_tx_pool_size; u32 cur_rx_buf_sz; + u32 prev_rx_buf_sz; struct tasklet_struct tasklet; enum vnic_state state; @@ -1002,11 +1058,14 @@ struct ibmvnic_adapter { struct work_struct ibmvnic_reset; struct delayed_work ibmvnic_delayed_reset; unsigned long resetting; - bool napi_enabled, from_passive_init; - bool login_pending; /* last device reset time */ unsigned long last_reset_time; + bool napi_enabled; + bool from_passive_init; + bool login_pending; + /* protected by rcu */ + bool tx_queues_active; bool failover_pending; bool force_reset_recovery; |
