1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include "wx_type.h"
#include "wx_hw.h"
#include "wx_mbx.h"
#include "wx_vf.h"
static void wx_virt_clr_reg(struct wx *wx)
{
u32 vfsrrctl, i;
/* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */
vfsrrctl = WX_VXRXDCTL_HDRSZ(wx_hdr_sz(WX_RX_HDR_SIZE));
vfsrrctl |= WX_VXRXDCTL_BUFSZ(wx_buf_sz(WX_RX_BUF_SIZE));
/* clear all rxd ctl */
for (i = 0; i < WX_VF_MAX_RING_NUMS; i++)
wr32m(wx, WX_VXRXDCTL(i),
WX_VXRXDCTL_HDRSZ_MASK | WX_VXRXDCTL_BUFSZ_MASK,
vfsrrctl);
rd32(wx, WX_VXSTATUS);
}
/**
* wx_init_hw_vf - virtual function hardware initialization
* @wx: pointer to hardware structure
*
* Initialize the mac address
**/
void wx_init_hw_vf(struct wx *wx)
{
wx_get_mac_addr_vf(wx, wx->mac.addr);
}
EXPORT_SYMBOL(wx_init_hw_vf);
static int wx_mbx_write_and_read_reply(struct wx *wx, u32 *req_buf,
u32 *resp_buf, u16 size)
{
int ret;
ret = wx_write_posted_mbx(wx, req_buf, size);
if (ret)
return ret;
return wx_read_posted_mbx(wx, resp_buf, size);
}
/**
* wx_reset_hw_vf - Performs hardware reset
* @wx: pointer to hardware structure
*
* Resets the hardware by resetting the transmit and receive units, masks and
* clears all interrupts.
*
* Return: returns 0 on success, negative error code on failure
**/
int wx_reset_hw_vf(struct wx *wx)
{
struct wx_mbx_info *mbx = &wx->mbx;
u32 msgbuf[4] = {WX_VF_RESET};
u8 *addr = (u8 *)(&msgbuf[1]);
u32 b4_buf[16] = {0};
u32 timeout = 200;
int ret;
u32 i;
/* Call wx stop to disable tx/rx and clear interrupts */
wx_stop_adapter_vf(wx);
/* reset the api version */
wx->vfinfo->vf_api = wx_mbox_api_null;
/* backup msix vectors */
if (wx->b4_addr) {
for (i = 0; i < 16; i++)
b4_buf[i] = readl(wx->b4_addr + i * 4);
}
wr32m(wx, WX_VXCTRL, WX_VXCTRL_RST, WX_VXCTRL_RST);
rd32(wx, WX_VXSTATUS);
/* we cannot reset while the RSTI / RSTD bits are asserted */
while (!wx_check_for_rst_vf(wx) && timeout) {
timeout--;
udelay(5);
}
/* restore msix vectors */
if (wx->b4_addr) {
for (i = 0; i < 16; i++)
writel(b4_buf[i], wx->b4_addr + i * 4);
}
/* amlite: bme */
if (wx->mac.type == wx_mac_aml || wx->mac.type == wx_mac_aml40)
wr32(wx, WX_VX_PF_BME, WX_VF_BME_ENABLE);
if (!timeout)
return -EBUSY;
/* Reset VF registers to initial values */
wx_virt_clr_reg(wx);
/* mailbox timeout can now become active */
mbx->timeout = 2000;
ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
ARRAY_SIZE(msgbuf));
if (ret)
return ret;
if (msgbuf[0] != (WX_VF_RESET | WX_VT_MSGTYPE_ACK) &&
msgbuf[0] != (WX_VF_RESET | WX_VT_MSGTYPE_NACK))
return -EINVAL;
if (msgbuf[0] == (WX_VF_RESET | WX_VT_MSGTYPE_ACK))
ether_addr_copy(wx->mac.perm_addr, addr);
wx->mac.mc_filter_type = msgbuf[3];
return 0;
}
EXPORT_SYMBOL(wx_reset_hw_vf);
/**
* wx_stop_adapter_vf - Generic stop Tx/Rx units
* @wx: pointer to hardware structure
*
* Clears interrupts, disables transmit and receive units.
**/
void wx_stop_adapter_vf(struct wx *wx)
{
u32 reg_val;
u16 i;
/* Clear interrupt mask to stop from interrupts being generated */
wr32(wx, WX_VXIMS, WX_VF_IRQ_CLEAR_MASK);
/* Clear any pending interrupts, flush previous writes */
wr32(wx, WX_VXICR, U32_MAX);
/* Disable the transmit unit. Each queue must be disabled. */
for (i = 0; i < wx->mac.max_tx_queues; i++)
wr32(wx, WX_VXTXDCTL(i), WX_VXTXDCTL_FLUSH);
/* Disable the receive unit by stopping each queue */
for (i = 0; i < wx->mac.max_rx_queues; i++) {
reg_val = rd32(wx, WX_VXRXDCTL(i));
reg_val &= ~WX_VXRXDCTL_ENABLE;
wr32(wx, WX_VXRXDCTL(i), reg_val);
}
/* Clear packet split and pool config */
wr32(wx, WX_VXMRQC, 0);
/* flush all queues disables */
rd32(wx, WX_VXSTATUS);
}
EXPORT_SYMBOL(wx_stop_adapter_vf);
/**
* wx_set_rar_vf - set device MAC address
* @wx: pointer to hardware structure
* @index: Receive address register to write
* @addr: Address to put into receive address register
* @enable_addr: set flag that address is active
*
* Return: returns 0 on success, negative error code on failure
**/
int wx_set_rar_vf(struct wx *wx, u32 index, u8 *addr, u32 enable_addr)
{
u32 msgbuf[3] = {WX_VF_SET_MAC_ADDR};
u8 *msg_addr = (u8 *)(&msgbuf[1]);
int ret;
memcpy(msg_addr, addr, ETH_ALEN);
ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
ARRAY_SIZE(msgbuf));
if (ret)
return ret;
msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
/* if nacked the address was rejected, use "perm_addr" */
if (msgbuf[0] == (WX_VF_SET_MAC_ADDR | WX_VT_MSGTYPE_NACK)) {
wx_get_mac_addr_vf(wx, wx->mac.addr);
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(wx_set_rar_vf);
/**
* wx_update_mc_addr_list_vf - Update Multicast addresses
* @wx: pointer to the HW structure
* @netdev: pointer to the net device structure
*
* Updates the Multicast Table Array.
*
* Return: returns 0 on success, negative error code on failure
**/
int wx_update_mc_addr_list_vf(struct wx *wx, struct net_device *netdev)
{
u32 msgbuf[WX_VXMAILBOX_SIZE] = {WX_VF_SET_MULTICAST};
u16 *vector_l = (u16 *)&msgbuf[1];
struct netdev_hw_addr *ha;
u32 cnt, i;
cnt = netdev_mc_count(netdev);
if (cnt > 28)
cnt = 28;
msgbuf[0] |= cnt << WX_VT_MSGINFO_SHIFT;
i = 0;
netdev_for_each_mc_addr(ha, netdev) {
if (i == cnt)
break;
if (is_link_local_ether_addr(ha->addr))
continue;
vector_l[i++] = wx_mta_vector(wx, ha->addr);
}
return wx_write_posted_mbx(wx, msgbuf, ARRAY_SIZE(msgbuf));
}
EXPORT_SYMBOL(wx_update_mc_addr_list_vf);
/**
* wx_update_xcast_mode_vf - Update Multicast mode
* @wx: pointer to the HW structure
* @xcast_mode: new multicast mode
*
* Updates the Multicast Mode of VF.
*
* Return: returns 0 on success, negative error code on failure
**/
int wx_update_xcast_mode_vf(struct wx *wx, int xcast_mode)
{
u32 msgbuf[2] = {WX_VF_UPDATE_XCAST_MODE, xcast_mode};
int ret = 0;
if (wx->vfinfo->vf_api < wx_mbox_api_13)
return -EINVAL;
ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
ARRAY_SIZE(msgbuf));
if (ret)
return ret;
msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
if (msgbuf[0] == (WX_VF_UPDATE_XCAST_MODE | WX_VT_MSGTYPE_NACK))
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(wx_update_xcast_mode_vf);
/**
* wx_get_link_state_vf - Get VF link state from PF
* @wx: pointer to the HW structure
* @link_state: link state storage
*
* Return: return state of the operation error or success.
**/
int wx_get_link_state_vf(struct wx *wx, u16 *link_state)
{
u32 msgbuf[2] = {WX_VF_GET_LINK_STATE};
int ret;
ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
ARRAY_SIZE(msgbuf));
if (ret)
return ret;
if (msgbuf[0] & WX_VT_MSGTYPE_NACK)
return -EINVAL;
*link_state = msgbuf[1];
return 0;
}
EXPORT_SYMBOL(wx_get_link_state_vf);
/**
* wx_set_vfta_vf - Set/Unset vlan filter table address
* @wx: pointer to the HW structure
* @vlan: 12 bit VLAN ID
* @vind: unused by VF drivers
* @vlan_on: if true then set bit, else clear bit
* @vlvf_bypass: boolean flag indicating updating default pool is okay
*
* Turn on/off specified VLAN in the VLAN filter table.
*
* Return: returns 0 on success, negative error code on failure
**/
int wx_set_vfta_vf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on,
bool vlvf_bypass)
{
u32 msgbuf[2] = {WX_VF_SET_VLAN, vlan};
bool vlan_offload = false;
int ret;
/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
msgbuf[0] |= vlan_on << WX_VT_MSGINFO_SHIFT;
/* if vf vlan offload is disabled, allow to create vlan under pf port vlan */
msgbuf[0] |= BIT(vlan_offload);
ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
ARRAY_SIZE(msgbuf));
if (ret)
return ret;
if (msgbuf[0] & WX_VT_MSGTYPE_ACK)
return 0;
return msgbuf[0] & WX_VT_MSGTYPE_NACK;
}
EXPORT_SYMBOL(wx_set_vfta_vf);
void wx_get_mac_addr_vf(struct wx *wx, u8 *mac_addr)
{
ether_addr_copy(mac_addr, wx->mac.perm_addr);
}
EXPORT_SYMBOL(wx_get_mac_addr_vf);
int wx_get_fw_version_vf(struct wx *wx)
{
u32 msgbuf[2] = {WX_VF_GET_FW_VERSION};
int ret;
ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
ARRAY_SIZE(msgbuf));
if (ret)
return ret;
if (msgbuf[0] & WX_VT_MSGTYPE_NACK)
return -EINVAL;
snprintf(wx->eeprom_id, 32, "0x%08x", msgbuf[1]);
return 0;
}
EXPORT_SYMBOL(wx_get_fw_version_vf);
int wx_set_uc_addr_vf(struct wx *wx, u32 index, u8 *addr)
{
u32 msgbuf[3] = {WX_VF_SET_MACVLAN};
u8 *msg_addr = (u8 *)(&msgbuf[1]);
int ret;
/* If index is one then this is the start of a new list and needs
* indication to the PF so it can do it's own list management.
* If it is zero then that tells the PF to just clear all of
* this VF's macvlans and there is no new list.
*/
msgbuf[0] |= index << WX_VT_MSGINFO_SHIFT;
if (addr)
memcpy(msg_addr, addr, 6);
ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
ARRAY_SIZE(msgbuf));
if (ret)
return ret;
msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
if (msgbuf[0] == (WX_VF_SET_MACVLAN | WX_VT_MSGTYPE_NACK))
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(wx_set_uc_addr_vf);
/**
* wx_rlpml_set_vf - Set the maximum receive packet length
* @wx: pointer to the HW structure
* @max_size: value to assign to max frame size
*
* Return: returns 0 on success, negative error code on failure
**/
int wx_rlpml_set_vf(struct wx *wx, u16 max_size)
{
u32 msgbuf[2] = {WX_VF_SET_LPE, max_size};
int ret;
ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
ARRAY_SIZE(msgbuf));
if (ret)
return ret;
if ((msgbuf[0] & WX_VF_SET_LPE) &&
(msgbuf[0] & WX_VT_MSGTYPE_NACK))
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(wx_rlpml_set_vf);
/**
* wx_negotiate_api_version - Negotiate supported API version
* @wx: pointer to the HW structure
* @api: integer containing requested API version
*
* Return: returns 0 on success, negative error code on failure
**/
int wx_negotiate_api_version(struct wx *wx, int api)
{
u32 msgbuf[2] = {WX_VF_API_NEGOTIATE, api};
int ret;
ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
ARRAY_SIZE(msgbuf));
if (ret)
return ret;
msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
/* Store value and return 0 on success */
if (msgbuf[0] == (WX_VF_API_NEGOTIATE | WX_VT_MSGTYPE_NACK))
return -EINVAL;
wx->vfinfo->vf_api = api;
return 0;
}
EXPORT_SYMBOL(wx_negotiate_api_version);
int wx_get_queues_vf(struct wx *wx, u32 *num_tcs, u32 *default_tc)
{
u32 msgbuf[5] = {WX_VF_GET_QUEUES};
int ret;
/* do nothing if API doesn't support wx_get_queues */
if (wx->vfinfo->vf_api < wx_mbox_api_13)
return -EINVAL;
/* Fetch queue configuration from the PF */
ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
ARRAY_SIZE(msgbuf));
if (ret)
return ret;
msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
/* if we didn't get an ACK there must have been
* some sort of mailbox error so we should treat it
* as such
*/
if (msgbuf[0] != (WX_VF_GET_QUEUES | WX_VT_MSGTYPE_ACK))
return -EINVAL;
/* record and validate values from message */
wx->mac.max_tx_queues = msgbuf[WX_VF_TX_QUEUES];
if (wx->mac.max_tx_queues == 0 ||
wx->mac.max_tx_queues > WX_VF_MAX_TX_QUEUES)
wx->mac.max_tx_queues = WX_VF_MAX_TX_QUEUES;
wx->mac.max_rx_queues = msgbuf[WX_VF_RX_QUEUES];
if (wx->mac.max_rx_queues == 0 ||
wx->mac.max_rx_queues > WX_VF_MAX_RX_QUEUES)
wx->mac.max_rx_queues = WX_VF_MAX_RX_QUEUES;
*num_tcs = msgbuf[WX_VF_TRANS_VLAN];
/* in case of unknown state assume we cannot tag frames */
if (*num_tcs > wx->mac.max_rx_queues)
*num_tcs = 1;
*default_tc = msgbuf[WX_VF_DEF_QUEUE];
/* default to queue 0 on out-of-bounds queue number */
if (*default_tc >= wx->mac.max_tx_queues)
*default_tc = 0;
return 0;
}
EXPORT_SYMBOL(wx_get_queues_vf);
static int wx_get_link_status_from_pf(struct wx *wx, u32 *msgbuf)
{
u32 links_reg = msgbuf[1];
if (msgbuf[1] & WX_PF_NOFITY_VF_NET_NOT_RUNNING)
wx->notify_down = true;
else
wx->notify_down = false;
if (wx->notify_down) {
wx->link = false;
wx->speed = SPEED_UNKNOWN;
return 0;
}
wx->link = WX_PFLINK_STATUS(links_reg);
wx->speed = WX_PFLINK_SPEED(links_reg);
return 0;
}
static int wx_pf_ping_vf(struct wx *wx, u32 *msgbuf)
{
if (!(msgbuf[0] & WX_VT_MSGTYPE_CTS))
/* msg is not CTS, we need to do reset */
return -EINVAL;
return 0;
}
static struct wx_link_reg_fields wx_speed_lookup_vf[] = {
{wx_mac_unknown},
{wx_mac_sp, SPEED_10000, SPEED_1000, SPEED_100, SPEED_UNKNOWN, SPEED_UNKNOWN},
{wx_mac_em, SPEED_1000, SPEED_100, SPEED_10, SPEED_UNKNOWN, SPEED_UNKNOWN},
{wx_mac_aml, SPEED_40000, SPEED_25000, SPEED_10000, SPEED_1000, SPEED_UNKNOWN},
{wx_mac_aml40, SPEED_40000, SPEED_25000, SPEED_10000, SPEED_1000, SPEED_UNKNOWN},
};
static void wx_check_physical_link(struct wx *wx)
{
u32 val, link_val;
int ret;
/* get link status from hw status reg
* for SFP+ modules and DA cables, it can take up to 500usecs
* before the link status is correct
*/
if (wx->mac.type == wx_mac_em)
ret = read_poll_timeout_atomic(rd32, val, val & GENMASK(4, 1),
100, 500, false, wx, WX_VXSTATUS);
else
ret = read_poll_timeout_atomic(rd32, val, val & BIT(0), 100,
500, false, wx, WX_VXSTATUS);
if (ret) {
wx->speed = SPEED_UNKNOWN;
wx->link = false;
return;
}
wx->link = true;
link_val = WX_VXSTATUS_SPEED(val);
if (link_val & BIT(0))
wx->speed = wx_speed_lookup_vf[wx->mac.type].bit0_f;
else if (link_val & BIT(1))
wx->speed = wx_speed_lookup_vf[wx->mac.type].bit1_f;
else if (link_val & BIT(2))
wx->speed = wx_speed_lookup_vf[wx->mac.type].bit2_f;
else if (link_val & BIT(3))
wx->speed = wx_speed_lookup_vf[wx->mac.type].bit3_f;
else
wx->speed = SPEED_UNKNOWN;
}
int wx_check_mac_link_vf(struct wx *wx)
{
struct wx_mbx_info *mbx = &wx->mbx;
u32 msgbuf[2] = {0};
int ret = 0;
if (!mbx->timeout)
goto out;
wx_check_for_rst_vf(wx);
if (!wx_check_for_msg_vf(wx))
ret = wx_read_mbx_vf(wx, msgbuf, 2);
if (ret)
goto out;
switch (msgbuf[0] & GENMASK(8, 0)) {
case WX_PF_NOFITY_VF_LINK_STATUS | WX_PF_CONTROL_MSG:
ret = wx_get_link_status_from_pf(wx, msgbuf);
goto out;
case WX_PF_CONTROL_MSG:
ret = wx_pf_ping_vf(wx, msgbuf);
goto out;
case 0:
if (msgbuf[0] & WX_VT_MSGTYPE_NACK) {
/* msg is NACK, we must have lost CTS status */
ret = -EBUSY;
goto out;
}
/* no message, check link status */
wx_check_physical_link(wx);
goto out;
default:
break;
}
if (!(msgbuf[0] & WX_VT_MSGTYPE_CTS)) {
/* msg is not CTS and is NACK we must have lost CTS status */
if (msgbuf[0] & WX_VT_MSGTYPE_NACK)
ret = -EBUSY;
goto out;
}
/* the pf is talking, if we timed out in the past we reinit */
if (!mbx->timeout) {
ret = -EBUSY;
goto out;
}
out:
return ret;
}
|