1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
|
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (c) 2025 Broadcom.
#ifndef __BNG_RES_H__
#define __BNG_RES_H__
#include "roce_hsi.h"
#define BNG_ROCE_FW_MAX_TIMEOUT 60
#define PTR_CNT_PER_PG (PAGE_SIZE / sizeof(void *))
#define PTR_MAX_IDX_PER_PG (PTR_CNT_PER_PG - 1)
#define PTR_PG(x) (((x) & ~PTR_MAX_IDX_PER_PG) / PTR_CNT_PER_PG)
#define PTR_IDX(x) ((x) & PTR_MAX_IDX_PER_PG)
#define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1))
#define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \
((HWQ_CMP(hwq->prod, hwq)\
- HWQ_CMP(hwq->cons, hwq))\
& (hwq->max_elements - 1)))
#define MAX_PBL_LVL_0_PGS 1
#define MAX_PBL_LVL_1_PGS 512
#define MAX_PBL_LVL_1_PGS_SHIFT 9
#define MAX_PBL_LVL_1_PGS_FOR_LVL_2 256
#define MAX_PBL_LVL_2_PGS (256 * 512)
#define MAX_PDL_LVL_SHIFT 9
#define BNG_RE_DBR_VALID (0x1UL << 26)
#define BNG_RE_DBR_EPOCH_SHIFT 24
#define BNG_RE_DBR_TOGGLE_SHIFT 25
#define BNG_MAX_TQM_ALLOC_REQ 48
struct bng_re_reg_desc {
u8 bar_id;
resource_size_t bar_base;
unsigned long offset;
void __iomem *bar_reg;
size_t len;
};
struct bng_re_db_info {
void __iomem *db;
void __iomem *priv_db;
struct bng_re_hwq *hwq;
u32 xid;
u32 max_slot;
u32 flags;
u8 toggle;
};
enum bng_re_db_info_flags_mask {
BNG_RE_FLAG_EPOCH_CONS_SHIFT = 0x0UL,
BNG_RE_FLAG_EPOCH_PROD_SHIFT = 0x1UL,
BNG_RE_FLAG_EPOCH_CONS_MASK = 0x1UL,
BNG_RE_FLAG_EPOCH_PROD_MASK = 0x2UL,
};
enum bng_re_db_epoch_flag_shift {
BNG_RE_DB_EPOCH_CONS_SHIFT = BNG_RE_DBR_EPOCH_SHIFT,
BNG_RE_DB_EPOCH_PROD_SHIFT = (BNG_RE_DBR_EPOCH_SHIFT - 1),
};
struct bng_re_chip_ctx {
u16 chip_num;
u16 hw_stats_size;
u64 hwrm_intf_ver;
u16 hwrm_cmd_max_timeout;
};
struct bng_re_pbl {
u32 pg_count;
u32 pg_size;
void **pg_arr;
dma_addr_t *pg_map_arr;
};
enum bng_re_pbl_lvl {
BNG_PBL_LVL_0,
BNG_PBL_LVL_1,
BNG_PBL_LVL_2,
BNG_PBL_LVL_MAX
};
enum bng_re_hwq_type {
BNG_HWQ_TYPE_CTX,
BNG_HWQ_TYPE_QUEUE
};
struct bng_re_sg_info {
u32 npages;
u32 pgshft;
u32 pgsize;
bool nopte;
};
struct bng_re_hwq_attr {
struct bng_re_res *res;
struct bng_re_sg_info *sginfo;
enum bng_re_hwq_type type;
u32 depth;
u32 stride;
u32 aux_stride;
u32 aux_depth;
};
struct bng_re_hwq {
struct pci_dev *pdev;
/* lock to protect hwq */
spinlock_t lock;
struct bng_re_pbl pbl[BNG_PBL_LVL_MAX + 1];
/* Valid values: 0, 1, 2 */
enum bng_re_pbl_lvl level;
/* PBL entries */
void **pbl_ptr;
/* PBL dma_addr */
dma_addr_t *pbl_dma_ptr;
u32 max_elements;
u32 depth;
u16 element_size;
u32 prod;
u32 cons;
/* queue entry per page */
u16 qe_ppg;
};
struct bng_re_stats {
dma_addr_t dma_map;
void *dma;
u32 size;
u32 fw_id;
};
struct bng_re_res {
struct pci_dev *pdev;
struct bng_re_chip_ctx *cctx;
struct bng_re_dev_attr *dattr;
};
static inline void *bng_re_get_qe(struct bng_re_hwq *hwq,
u32 indx, u64 *pg)
{
u32 pg_num, pg_idx;
pg_num = (indx / hwq->qe_ppg);
pg_idx = (indx % hwq->qe_ppg);
if (pg)
*pg = (u64)&hwq->pbl_ptr[pg_num];
return (void *)(hwq->pbl_ptr[pg_num] + hwq->element_size * pg_idx);
}
#define BNG_RE_INIT_DBHDR(xid, type, indx, toggle) \
(((u64)(((xid) & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | \
(type) | BNG_RE_DBR_VALID) << 32) | (indx) | \
(((u32)(toggle)) << (BNG_RE_DBR_TOGGLE_SHIFT)))
static inline void bng_re_ring_db(struct bng_re_db_info *info,
u32 type)
{
u64 key = 0;
u32 indx;
u8 toggle = 0;
if (type == DBC_DBC_TYPE_CQ_ARMALL ||
type == DBC_DBC_TYPE_CQ_ARMSE)
toggle = info->toggle;
indx = (info->hwq->cons & DBC_DBC_INDEX_MASK) |
((info->flags & BNG_RE_FLAG_EPOCH_CONS_MASK) <<
BNG_RE_DB_EPOCH_CONS_SHIFT);
key = BNG_RE_INIT_DBHDR(info->xid, type, indx, toggle);
writeq(key, info->db);
}
static inline void bng_re_ring_nq_db(struct bng_re_db_info *info,
struct bng_re_chip_ctx *cctx,
bool arm)
{
u32 type;
type = arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
bng_re_ring_db(info, type);
}
static inline void bng_re_hwq_incr_cons(u32 max_elements, u32 *cons, u32 cnt,
u32 *dbinfo_flags)
{
/* move cons and update toggle/epoch if wrap around */
*cons += cnt;
if (*cons >= max_elements) {
*cons %= max_elements;
*dbinfo_flags ^= 1UL << BNG_RE_FLAG_EPOCH_CONS_SHIFT;
}
}
static inline bool _is_max_srq_ext_supported(u16 dev_cap_ext_flags_2)
{
return !!(dev_cap_ext_flags_2 & CREQ_QUERY_FUNC_RESP_SB_MAX_SRQ_EXTENDED);
}
void bng_re_free_hwq(struct bng_re_res *res,
struct bng_re_hwq *hwq);
int bng_re_alloc_init_hwq(struct bng_re_hwq *hwq,
struct bng_re_hwq_attr *hwq_attr);
void bng_re_free_stats_ctx_mem(struct pci_dev *pdev,
struct bng_re_stats *stats);
int bng_re_alloc_stats_ctx_mem(struct pci_dev *pdev,
struct bng_re_chip_ctx *cctx,
struct bng_re_stats *stats);
#endif
|