diff options
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c')
| -rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c | 479 |
1 files changed, 381 insertions, 98 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 2ca182a4ce82..3abd750a4bd7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -14,6 +14,7 @@ #include "lmac_common.h" #include "rvu_reg.h" #include "rvu_trace.h" +#include "rvu_npc_hash.h" struct cgx_evq_entry { struct list_head evq_node; @@ -33,7 +34,7 @@ static struct _req_type __maybe_unused \ return NULL; \ req->hdr.sig = OTX2_MBOX_REQ_SIG; \ req->hdr.id = _id; \ - trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \ + trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req), 0); \ return req; \ } @@ -54,8 +55,9 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature) return (cgx_features_get(cgxd) & feature); } +#define CGX_OFFSET(x) ((x) * rvu->hw->lmac_per_cgx) /* Returns bitmap of mapped PFs */ -static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) +static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) { return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; } @@ -70,7 +72,8 @@ int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) if (!pfmap) return -ENODEV; else - return find_first_bit(&pfmap, 16); + return find_first_bit(&pfmap, + rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx); } static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id) @@ -111,7 +114,7 @@ static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf, p2x = cgx_lmac_get_p2x(cgx_id, lmac_id); /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */ pfvf->nix_blkaddr = BLKADDR_NIX0; - if (p2x == CMR_P2X_SEL_NIX1) + if (is_rvu_supports_nix1(rvu) && p2x == CMR_P2X_SEL_NIX1) pfvf->nix_blkaddr = BLKADDR_NIX1; } @@ -128,14 +131,14 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) if (!cgx_cnt_max) return 0; - if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF) + if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF) return -EINVAL; /* Alloc map table * An additional entry is required since PF id starts from 1 and * hence entry at offset 0 is invalid. */ - size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8); + size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8); rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL); if (!rvu->pf2cgxlmac_map) return -ENOMEM; @@ -144,9 +147,10 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) memset(rvu->pf2cgxlmac_map, 0xFF, size); /* Reverse map table */ - rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev, - cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16), - GFP_KERNEL); + rvu->cgxlmac2pf_map = + devm_kzalloc(rvu->dev, + cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64), + GFP_KERNEL); if (!rvu->cgxlmac2pf_map) return -ENOMEM; @@ -155,7 +159,9 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) if (!rvu_cgx_pdata(cgx, rvu)) continue; lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); - for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) { + for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { + if (iter >= MAX_LMAC_COUNT) + continue; lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), iter); rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); @@ -228,13 +234,19 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) struct cgx_link_user_info *linfo; struct cgx_link_info_msg *msg; unsigned long pfmap; - int err, pfid; + int pfid; linfo = &event->link_uinfo; pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id); + if (!pfmap) { + dev_err(rvu->dev, "CGX port%d:%d not mapped with PF\n", + event->cgx_id, event->lmac_id); + return; + } do { - pfid = find_first_bit(&pfmap, 16); + pfid = find_first_bit(&pfmap, + rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx); clear_bit(pfid, &pfmap); /* check if notification is enabled */ @@ -245,16 +257,24 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) continue; } + mutex_lock(&rvu->mbox_lock); + /* Send mbox message to PF */ msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid); - if (!msg) + if (!msg) { + mutex_unlock(&rvu->mbox_lock); continue; + } + msg->link_info = *linfo; - otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid); - err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid); - if (err) - dev_warn(rvu->dev, "notification to pf %d failed\n", - pfid); + + otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pfid); + + otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid); + + otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid); + + mutex_unlock(&rvu->mbox_lock); } while (pfmap); } @@ -295,7 +315,7 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu) spin_lock_init(&rvu->cgx_evq_lock); INIT_LIST_HEAD(&rvu->cgx_evq_head); INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task); - rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0); + rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", WQ_PERCPU, 0); if (!rvu->cgx_evh_wq) { dev_err(rvu->dev, "alloc workqueue failed"); return -ENOMEM; @@ -309,7 +329,7 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu) if (!cgxd) continue; lmac_bmap = cgx_get_lmac_bmap(cgxd); - for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) { + for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) { err = cgx_lmac_evh_register(&cb, cgxd, lmac); if (err) dev_err(rvu->dev, @@ -331,6 +351,7 @@ static void rvu_cgx_wq_destroy(struct rvu *rvu) int rvu_cgx_init(struct rvu *rvu) { + struct mac_ops *mac_ops; int cgx, err; void *cgxd; @@ -340,7 +361,7 @@ int rvu_cgx_init(struct rvu *rvu) rvu->cgx_cnt_max = cgx_get_cgxcnt_max(); if (!rvu->cgx_cnt_max) { dev_info(rvu->dev, "No CGX devices found!\n"); - return -ENODEV; + return 0; } rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max * @@ -357,6 +378,15 @@ int rvu_cgx_init(struct rvu *rvu) if (err) return err; + /* Clear X2P reset on all MAC blocks */ + for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) { + cgxd = rvu_cgx_pdata(cgx, rvu); + if (!cgxd) + continue; + mac_ops = get_mac_ops(cgxd); + mac_ops->mac_x2p_reset(cgxd, false); + } + /* Register for CGX events */ err = cgx_lmac_event_handler_init(rvu); if (err) @@ -364,10 +394,26 @@ int rvu_cgx_init(struct rvu *rvu) mutex_init(&rvu->cgx_cfg_lock); - /* Ensure event handler registration is completed, before - * we turn on the links - */ - mb(); + return 0; +} + +void cgx_start_linkup(struct rvu *rvu) +{ + unsigned long lmac_bmap; + struct mac_ops *mac_ops; + int cgx, lmac, err; + void *cgxd; + + /* Enable receive on all LMACS */ + for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { + cgxd = rvu_cgx_pdata(cgx, rvu); + if (!cgxd) + continue; + mac_ops = get_mac_ops(cgxd); + lmac_bmap = cgx_get_lmac_bmap(cgxd); + for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) + mac_ops->mac_enadis_rx(cgxd, lmac, true); + } /* Do link up for all CGX ports */ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { @@ -380,8 +426,6 @@ int rvu_cgx_init(struct rvu *rvu) "Link up process failed to start on cgx %d\n", cgx); } - - return 0; } int rvu_cgx_exit(struct rvu *rvu) @@ -395,7 +439,7 @@ int rvu_cgx_exit(struct rvu *rvu) if (!cgxd) continue; lmac_bmap = cgx_get_lmac_bmap(cgxd); - for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) + for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) cgx_lmac_evh_unregister(cgxd, lmac); } @@ -413,7 +457,7 @@ int rvu_cgx_exit(struct rvu *rvu) inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) { if ((pcifunc & RVU_PFVF_FUNC_MASK) || - !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) + !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) return false; return true; } @@ -440,23 +484,51 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable) int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); + struct mac_ops *mac_ops; u8 cgx_id, lmac_id; + void *cgxd; if (!is_cgx_config_permitted(rvu, pcifunc)) return LMAC_AF_ERR_PERM_DENIED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + mac_ops = get_mac_ops(cgxd); - cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start); + return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start); +} - return 0; +int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable) +{ + int pf = rvu_get_pf(rvu->pdev, pcifunc); + struct mac_ops *mac_ops; + u8 cgx_id, lmac_id; + void *cgxd; + + if (!is_cgx_config_permitted(rvu, pcifunc)) + return LMAC_AF_ERR_PERM_DENIED; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + mac_ops = get_mac_ops(cgxd); + + return mac_ops->mac_tx_enable(cgxd, lmac_id, enable); +} + +int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable) +{ + struct mac_ops *mac_ops; + + mac_ops = get_mac_ops(cgxd); + return mac_ops->mac_tx_enable(cgxd, lmac_id, enable); } void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); int i = 0, lmac_count = 0; + struct mac_ops *mac_ops; u8 max_dmac_filters; u8 cgx_id, lmac_id; void *cgx_dev; @@ -464,10 +536,20 @@ void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc) if (!is_cgx_config_permitted(rvu, pcifunc)) return; + if (rvu_npc_exact_has_match_table(rvu)) { + rvu_npc_exact_reset(rvu, pcifunc); + return; + } + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgx_dev = cgx_get_pdata(cgx_id); lmac_count = cgx_get_lmac_cnt(cgx_dev); - max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count; + + mac_ops = get_mac_ops(cgx_dev); + if (!mac_ops) + return; + + max_dmac_filters = mac_ops->dmac_filter_count / lmac_count; for (i = 0; i < max_dmac_filters; i++) cgx_lmac_addr_del(cgx_id, lmac_id, i); @@ -495,7 +577,7 @@ int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req, static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req, void *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct mac_ops *mac_ops; int stat = 0, err = 0; u64 tx_stat, rx_stat; @@ -548,11 +630,41 @@ int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req, return rvu_lmac_get_stats(rvu, req, (void *)rsp); } +int rvu_mbox_handler_cgx_stats_rst(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); + struct rvu_pfvf *parent_pf; + struct mac_ops *mac_ops; + u8 cgx_idx, lmac; + void *cgxd; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return LMAC_AF_ERR_PERM_DENIED; + + parent_pf = &rvu->pf[pf]; + /* To ensure reset cgx stats won't affect VF stats, + * check if it used by only PF interface. + * If not, return + */ + if (parent_pf->cgx_users > 1) { + dev_info(rvu->dev, "CGX busy, could not reset statistics\n"); + return 0; + } + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); + cgxd = rvu_cgx_pdata(cgx_idx, rvu); + mac_ops = get_mac_ops(cgxd); + + return mac_ops->mac_stats_reset(cgxd, lmac); +} + int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu, struct msg_req *req, struct cgx_fec_stats_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); + struct mac_ops *mac_ops; u8 cgx_idx, lmac; void *cgxd; @@ -561,21 +673,28 @@ int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu, rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); cgxd = rvu_cgx_pdata(cgx_idx, rvu); - return cgx_get_fec_stats(cgxd, lmac, rsp); + mac_ops = get_mac_ops(cgxd); + return mac_ops->get_fec_stats(cgxd, lmac, rsp); } int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); + struct rvu_pfvf *pfvf; u8 cgx_id, lmac_id; - if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) - return -EPERM; + if (!is_pf_cgxmapped(rvu, pf)) + return LMAC_AF_ERR_PF_NOT_MAPPED; + + if (rvu_npc_exact_has_match_table(rvu)) + return rvu_npc_exact_mac_addr_set(rvu, req, rsp); rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + pfvf = &rvu->pf[pf]; + ether_addr_copy(pfvf->mac_addr, req->mac_addr); cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr); return 0; @@ -585,13 +704,16 @@ int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu, struct cgx_mac_addr_add_req *req, struct cgx_mac_addr_add_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; int rc = 0; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return -EPERM; + if (rvu_npc_exact_has_match_table(rvu)) + return rvu_npc_exact_mac_addr_add(rvu, req, rsp); + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr); if (rc >= 0) { @@ -606,12 +728,15 @@ int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu, struct cgx_mac_addr_del_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return -EPERM; + if (rvu_npc_exact_has_match_table(rvu)) + return rvu_npc_exact_mac_addr_del(rvu, req, rsp); + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); return cgx_lmac_addr_del(cgx_id, lmac_id, req->index); } @@ -621,7 +746,7 @@ int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu, struct cgx_max_dmac_entries_get_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; /* If msg is received from PFs(which are not mapped to CGX LMACs) @@ -633,6 +758,11 @@ int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu, return 0; } + if (rvu_npc_exact_has_match_table(rvu)) { + rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu); + return 0; + } + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id); return 0; @@ -642,21 +772,12 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); - u8 cgx_id, lmac_id; - int rc = 0, i; - u64 cfg; + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); - if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) - return -EPERM; - - rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, req->hdr.pcifunc))) + return LMAC_AF_ERR_PF_NOT_MAPPED; - rsp->hdr.rc = rc; - cfg = cgx_lmac_addr_get(cgx_id, lmac_id); - /* copy 48 bit mac address to req->mac_addr */ - for (i = 0; i < ETH_ALEN; i++) - rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8; + ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); return 0; } @@ -664,12 +785,16 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return -EPERM; + /* Disable drop on non hit rule */ + if (rvu_npc_exact_has_match_table(rvu)) + return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc); + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgx_lmac_promisc_config(cgx_id, lmac_id, true); @@ -679,12 +804,16 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return -EPERM; + /* Disable drop on non hit rule */ + if (rvu_npc_exact_has_match_table(rvu)) + return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc); + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgx_lmac_promisc_config(cgx_id, lmac_id, false); @@ -694,7 +823,7 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; @@ -702,18 +831,17 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) return 0; - /* This msg is expected only from PFs that are mapped to CGX LMACs, + /* This msg is expected only from PF/VFs that are mapped to CGX/RPM LMACs, * if received from other PF/VF simply ACK, nothing to do. */ - if ((pcifunc & RVU_PFVF_FUNC_MASK) || - !is_pf_cgxmapped(rvu, pf)) - return -ENODEV; + if (!is_pf_cgxmapped(rvu, pf)) + return -EPERM; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgxd = rvu_cgx_pdata(cgx_id, rvu); mac_ops = get_mac_ops(cgxd); - mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, true); + mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, enable); /* If PTP is enabled then inform NPC that packets to be * parsed by this PF will have their data shifted by 8 bytes * and if PTP is disabled then no shift is required @@ -723,13 +851,15 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) /* This flag is required to clean up CGX conf if app gets killed */ pfvf->hw_rx_tstamp_en = enable; + /* Inform MCS about 8B RX header */ + rvu_mcs_ptp_cfg(rvu, cgx_id, lmac_id, enable); return 0; } int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { - if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc))) + if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, req->hdr.pcifunc))) return -EPERM; return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true); @@ -743,7 +873,7 @@ int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req, static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, pcifunc)) @@ -782,7 +912,7 @@ int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req, u8 cgx_id, lmac_id; int pf, err; - pf = rvu_get_pf(req->hdr.pcifunc); + pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); if (!is_pf_cgxmapped(rvu, pf)) return -ENODEV; @@ -798,7 +928,7 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu, struct msg_req *req, struct cgx_features_info_msg *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_idx, lmac; void *cgxd; @@ -814,18 +944,33 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu, u32 rvu_cgx_get_fifolen(struct rvu *rvu) { + void *cgxd = rvu_first_cgx_pdata(rvu); + + if (!cgxd) + return 0; + + return cgx_get_fifo_len(cgxd); +} + +u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac) +{ struct mac_ops *mac_ops; - u32 fifo_len; + void *cgxd; - mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); - fifo_len = mac_ops ? mac_ops->fifo_len : 0; + cgxd = rvu_cgx_pdata(cgx, rvu); + if (!cgxd) + return 0; - return fifo_len; + mac_ops = get_mac_ops(cgxd); + if (!mac_ops->lmac_fifo_len) + return 0; + + return mac_ops->lmac_fifo_len(cgxd, lmac); } static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; @@ -853,11 +998,10 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req, return 0; } -int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, - struct cgx_pause_frm_cfg *req, - struct cgx_pause_frm_cfg *rsp) +int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); + u8 rx_pfc = 0, tx_pfc = 0; struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; @@ -869,6 +1013,44 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, * if received from other PF/VF simply ACK, nothing to do. */ if (!is_pf_cgxmapped(rvu, pf)) + return LMAC_AF_ERR_PF_NOT_MAPPED; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + mac_ops = get_mac_ops(cgxd); + + mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc); + if (tx_pfc || rx_pfc) { + dev_warn(rvu->dev, + "Can not configure 802.3X flow control as PFC frames are enabled"); + return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED; + } + + mutex_lock(&rvu->rsrc_lock); + if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause, + pcifunc & RVU_PFVF_FUNC_MASK)) { + mutex_unlock(&rvu->rsrc_lock); + return LMAC_AF_ERR_PERM_DENIED; + } + mutex_unlock(&rvu->rsrc_lock); + + return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause); +} + +int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, + struct cgx_pause_frm_cfg *req, + struct cgx_pause_frm_cfg *rsp) +{ + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); + struct mac_ops *mac_ops; + u8 cgx_id, lmac_id; + int err = 0; + void *cgxd; + + /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, + * if received from other PF/VF simply ACK, nothing to do. + */ + if (!is_pf_cgxmapped(rvu, pf)) return -ENODEV; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); @@ -876,19 +1058,17 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, mac_ops = get_mac_ops(cgxd); if (req->set) - mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, - req->tx_pause, req->rx_pause); + err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause); else - mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, - &rsp->tx_pause, - &rsp->rx_pause); - return 0; + mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause); + + return err; } int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_pf_cgxmapped(rvu, pf)) @@ -921,7 +1101,7 @@ int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, /* Assumes LF of a PF and all of its VF belongs to the same * NIX block */ - pcifunc = pf << RVU_PFVF_PF_SHIFT; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return 0; @@ -948,10 +1128,10 @@ int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start) struct rvu_pfvf *parent_pf, *pfvf; int cgx_users, err = 0; - if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) + if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) return 0; - parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; + parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)]; pfvf = rvu_get_pfvf(rvu, pcifunc); mutex_lock(&rvu->cgx_cfg_lock); @@ -994,7 +1174,7 @@ int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu, struct fec_mode *req, struct fec_mode *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_pf_cgxmapped(rvu, pf)) @@ -1010,19 +1190,26 @@ int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu, int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req, struct cgx_fw_data *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!rvu->fwdata) - return -ENXIO; + return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED; if (!is_pf_cgxmapped(rvu, pf)) return -EPERM; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); - memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id], - sizeof(struct cgx_lmac_fwdata_s)); + if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX) + memcpy(&rsp->fwdata, + &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id], + sizeof(struct cgx_lmac_fwdata_s)); + else + memcpy(&rsp->fwdata, + &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id], + sizeof(struct cgx_lmac_fwdata_s)); + return 0; } @@ -1030,7 +1217,8 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, struct cgx_set_link_mode_req *req, struct cgx_set_link_mode_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); + struct cgx_lmac_fwdata_s *linkmodes; u8 cgx_idx, lmac; void *cgxd; @@ -1039,33 +1227,128 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); cgxd = rvu_cgx_pdata(cgx_idx, rvu); - rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac); + if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX) + linkmodes = &rvu->fwdata->cgx_fw_data_usx[cgx_idx][lmac]; + else + linkmodes = &rvu->fwdata->cgx_fw_data[cgx_idx][lmac]; + + rsp->status = cgx_set_link_mode(cgxd, req->args, linkmodes, + cgx_idx, lmac); return 0; } -int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return LMAC_AF_ERR_PERM_DENIED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + if (rvu_npc_exact_has_match_table(rvu)) + return rvu_npc_exact_mac_addr_reset(rvu, req, rsp); + return cgx_lmac_addr_reset(cgx_id, lmac_id); } int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, struct cgx_mac_addr_update_req *req, - struct msg_rsp *rsp) + struct cgx_mac_addr_update_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return LMAC_AF_ERR_PERM_DENIED; + if (rvu_npc_exact_has_match_table(rvu)) + return rvu_npc_exact_mac_addr_update(rvu, req, rsp); + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index); } + +int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, + u8 rx_pause, u16 pfc_en) +{ + int pf = rvu_get_pf(rvu->pdev, pcifunc); + u8 rx_8023 = 0, tx_8023 = 0; + struct mac_ops *mac_ops; + u8 cgx_id, lmac_id; + void *cgxd; + + /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, + * if received from other PF/VF simply ACK, nothing to do. + */ + if (!is_pf_cgxmapped(rvu, pf)) + return -ENODEV; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + mac_ops = get_mac_ops(cgxd); + + mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023); + if (tx_8023 || rx_8023) { + dev_warn(rvu->dev, + "Can not configure PFC as 802.3X pause frames are enabled"); + return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED; + } + + mutex_lock(&rvu->rsrc_lock); + if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause, + pcifunc & RVU_PFVF_FUNC_MASK)) { + mutex_unlock(&rvu->rsrc_lock); + return LMAC_AF_ERR_PERM_DENIED; + } + mutex_unlock(&rvu->rsrc_lock); + + return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en); +} + +int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, + struct cgx_pfc_cfg *req, + struct cgx_pfc_rsp *rsp) +{ + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); + struct mac_ops *mac_ops; + u8 cgx_id, lmac_id; + void *cgxd; + int err; + + /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, + * if received from other PF/VF simply ACK, nothing to do. + */ + if (!is_pf_cgxmapped(rvu, pf)) + return -ENODEV; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + mac_ops = get_mac_ops(cgxd); + + err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause, + req->rx_pause, req->pfc_en); + + mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause); + return err; +} + +void rvu_mac_reset(struct rvu *rvu, u16 pcifunc) +{ + int pf = rvu_get_pf(rvu->pdev, pcifunc); + struct mac_ops *mac_ops; + struct cgx *cgxd; + u8 cgx, lmac; + + if (!is_pf_cgxmapped(rvu, pf)) + return; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); + cgxd = rvu_cgx_pdata(cgx, rvu); + mac_ops = get_mac_ops(cgxd); + + if (mac_ops->mac_reset(cgxd, lmac, !is_vf(pcifunc))) + dev_err(rvu->dev, "Failed to reset MAC\n"); +} |
