diff options
Diffstat (limited to 'drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c')
| -rw-r--r-- | drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c | 312 |
1 files changed, 302 insertions, 10 deletions
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c index dee0aa60b698..b4b2d3d1cbc2 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c @@ -5,6 +5,20 @@ #include "otx2_cptpf.h" #include "rvu_reg.h" +/* Fastpath ipsec opcode with inplace processing */ +#define CPT_INLINE_RX_OPCODE (0x26 | (1 << 6)) +#define CN10K_CPT_INLINE_RX_OPCODE (0x29 | (1 << 6)) + +#define cpt_inline_rx_opcode(pdev) \ +({ \ + u8 opcode; \ + if (is_dev_otx2(pdev)) \ + opcode = CPT_INLINE_RX_OPCODE; \ + else \ + opcode = CN10K_CPT_INLINE_RX_OPCODE; \ + (opcode); \ +}) + /* * CPT PF driver version, It will be incremented by 1 for every feature * addition in CPT mailbox messages. @@ -64,7 +78,7 @@ static int handle_msg_get_caps(struct otx2_cptpf_dev *cptpf, rsp->hdr.sig = OTX2_MBOX_RSP_SIG; rsp->hdr.pcifunc = req->pcifunc; rsp->cpt_pf_drv_version = OTX2_CPT_PF_DRV_VERSION; - rsp->cpt_revision = cptpf->pdev->revision; + rsp->cpt_revision = cptpf->eng_grps.rid; memcpy(&rsp->eng_caps, &cptpf->eng_caps, sizeof(rsp->eng_caps)); return 0; @@ -112,6 +126,182 @@ static int handle_msg_kvf_limits(struct otx2_cptpf_dev *cptpf, return 0; } +static int send_inline_ipsec_inbound_msg(struct otx2_cptpf_dev *cptpf, + int sso_pf_func, u8 slot) +{ + struct cpt_inline_ipsec_cfg_msg *req; + struct pci_dev *pdev = cptpf->pdev; + + req = (struct cpt_inline_ipsec_cfg_msg *) + otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0, + sizeof(*req), sizeof(struct msg_rsp)); + if (req == NULL) { + dev_err(&pdev->dev, "RVU MBOX failed to get message.\n"); + return -EFAULT; + } + memset(req, 0, sizeof(*req)); + req->hdr.id = MBOX_MSG_CPT_INLINE_IPSEC_CFG; + req->hdr.sig = OTX2_MBOX_REQ_SIG; + req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pdev, cptpf->pf_id, 0); + req->dir = CPT_INLINE_INBOUND; + req->slot = slot; + req->sso_pf_func_ovrd = cptpf->sso_pf_func_ovrd; + req->sso_pf_func = sso_pf_func; + req->enable = 1; + + return otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev); +} + +static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp, + struct otx2_cpt_rx_inline_lf_cfg *req) +{ + struct nix_inline_ipsec_cfg *nix_req; + struct pci_dev *pdev = cptpf->pdev; + int ret; + + nix_req = (struct nix_inline_ipsec_cfg *) + otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0, + sizeof(*nix_req), + sizeof(struct msg_rsp)); + if (nix_req == NULL) { + dev_err(&pdev->dev, "RVU MBOX failed to get message.\n"); + return -EFAULT; + } + memset(nix_req, 0, sizeof(*nix_req)); + nix_req->hdr.id = MBOX_MSG_NIX_INLINE_IPSEC_CFG; + nix_req->hdr.sig = OTX2_MBOX_REQ_SIG; + nix_req->enable = 1; + nix_req->credit_th = req->credit_th; + nix_req->bpid = req->bpid; + if (!req->credit || req->credit > OTX2_CPT_INST_QLEN_MSGS) + nix_req->cpt_credit = OTX2_CPT_INST_QLEN_MSGS - 1; + else + nix_req->cpt_credit = req->credit - 1; + nix_req->gen_cfg.egrp = egrp; + if (req->opcode) + nix_req->gen_cfg.opcode = req->opcode; + else + nix_req->gen_cfg.opcode = cpt_inline_rx_opcode(pdev); + nix_req->gen_cfg.param1 = req->param1; + nix_req->gen_cfg.param2 = req->param2; + nix_req->inst_qsel.cpt_pf_func = + OTX2_CPT_RVU_PFFUNC(cptpf->pdev, cptpf->pf_id, 0); + nix_req->inst_qsel.cpt_slot = 0; + ret = otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev); + if (ret) + return ret; + + if (cptpf->has_cpt1) { + ret = send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 1); + if (ret) + return ret; + } + + return send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 0); +} + +int +otx2_inline_cptlf_setup(struct otx2_cptpf_dev *cptpf, + struct otx2_cptlfs_info *lfs, u8 egrp, int num_lfs) +{ + int ret; + + ret = otx2_cptlf_init(lfs, 1 << egrp, OTX2_CPT_QUEUE_HI_PRIO, 1); + if (ret) { + dev_err(&cptpf->pdev->dev, + "LF configuration failed for RX inline ipsec.\n"); + return ret; + } + + /* Get msix offsets for attached LFs */ + ret = otx2_cpt_msix_offset_msg(lfs); + if (ret) + goto cleanup_lf; + + /* Register for CPT LF Misc interrupts */ + ret = otx2_cptlf_register_misc_interrupts(lfs); + if (ret) + goto free_irq; + + return 0; +free_irq: + otx2_cptlf_unregister_misc_interrupts(lfs); +cleanup_lf: + otx2_cptlf_shutdown(lfs); + return ret; +} + +void +otx2_inline_cptlf_cleanup(struct otx2_cptlfs_info *lfs) +{ + /* Unregister misc interrupt */ + otx2_cptlf_unregister_misc_interrupts(lfs); + + /* Cleanup LFs */ + otx2_cptlf_shutdown(lfs); +} + +static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, + struct mbox_msghdr *req) +{ + struct otx2_cpt_rx_inline_lf_cfg *cfg_req; + int num_lfs = 1, ret; + u8 egrp; + + cfg_req = (struct otx2_cpt_rx_inline_lf_cfg *)req; + if (cptpf->lfs.lfs_num) { + dev_err(&cptpf->pdev->dev, + "LF is already configured for RX inline ipsec.\n"); + return -EEXIST; + } + /* + * Allow LFs to execute requests destined to only grp IE_TYPES and + * set queue priority of each LF to high + */ + egrp = otx2_cpt_get_eng_grp(&cptpf->eng_grps, OTX2_CPT_IE_TYPES); + if (egrp == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) { + dev_err(&cptpf->pdev->dev, + "Engine group for inline ipsec is not available\n"); + return -ENOENT; + } + + cptpf->lfs.global_slot = 0; + cptpf->lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid; + cptpf->lfs.ctx_ilen = cfg_req->ctx_ilen; + + ret = otx2_inline_cptlf_setup(cptpf, &cptpf->lfs, egrp, num_lfs); + if (ret) { + dev_err(&cptpf->pdev->dev, "Inline-Ipsec CPT0 LF setup failed.\n"); + return ret; + } + + if (cptpf->has_cpt1) { + cptpf->rsrc_req_blkaddr = BLKADDR_CPT1; + cptpf->cpt1_lfs.global_slot = num_lfs; + cptpf->cpt1_lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid; + cptpf->cpt1_lfs.ctx_ilen = cfg_req->ctx_ilen; + ret = otx2_inline_cptlf_setup(cptpf, &cptpf->cpt1_lfs, egrp, + num_lfs); + if (ret) { + dev_err(&cptpf->pdev->dev, "Inline CPT1 LF setup failed.\n"); + goto lf_cleanup; + } + cptpf->rsrc_req_blkaddr = 0; + } + + ret = rx_inline_ipsec_lf_cfg(cptpf, egrp, cfg_req); + if (ret) + goto lf1_cleanup; + + return 0; + +lf1_cleanup: + otx2_inline_cptlf_cleanup(&cptpf->cpt1_lfs); +lf_cleanup: + otx2_inline_cptlf_cleanup(&cptpf->lfs); + return ret; +} + static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf, struct otx2_cptvf_info *vf, struct mbox_msghdr *req, int size) @@ -132,6 +322,10 @@ static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf, case MBOX_MSG_GET_KVF_LIMITS: err = handle_msg_kvf_limits(cptpf, vf, req); break; + case MBOX_MSG_RX_INLINE_IPSEC_LF_CFG: + err = handle_msg_rx_inline_ipsec_lf_cfg(cptpf, req); + break; + default: err = forward_to_af(cptpf, vf, req, size); break; @@ -199,9 +393,8 @@ void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work) msg = (struct mbox_msghdr *)(mdev->mbase + offset); /* Set which VF sent this message based on mbox IRQ */ - msg->pcifunc = ((u16)cptpf->pf_id << RVU_PFVF_PF_SHIFT) | - ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK); - + msg->pcifunc = rvu_make_pcifunc(cptpf->pdev, cptpf->pf_id, + (vf->vf_id + 1)); err = cptpf_handle_vf_req(cptpf, vf, msg, msg->next_msgoff - offset); /* @@ -224,14 +417,28 @@ void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work) irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg) { struct otx2_cptpf_dev *cptpf = arg; + struct otx2_mbox_dev *mdev; + struct otx2_mbox *mbox; + struct mbox_hdr *hdr; u64 intr; /* Read the interrupt bits */ intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT); if (intr & 0x1ULL) { - /* Schedule work queue function to process the MBOX request */ - queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work); + mbox = &cptpf->afpf_mbox; + mdev = &mbox->dev[0]; + hdr = mdev->mbase + mbox->rx_start; + if (hdr->num_msgs) + /* Schedule work queue function to process the MBOX request */ + queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work); + + mbox = &cptpf->afpf_mbox_up; + mdev = &mbox->dev[0]; + hdr = mdev->mbase + mbox->rx_start; + if (hdr->num_msgs) + /* Schedule work queue function to process the MBOX request */ + queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_up_work); /* Clear and ack the interrupt */ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL); @@ -242,8 +449,11 @@ irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg) static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf, struct mbox_msghdr *msg) { + struct otx2_cptlfs_info *lfs = &cptpf->lfs; struct device *dev = &cptpf->pdev->dev; struct cpt_rd_wr_reg_msg *rsp_rd_wr; + struct msix_offset_rsp *rsp_msix; + int i; if (msg->id >= MBOX_MSG_MAX) { dev_err(dev, "MBOX msg with unknown ID %d\n", msg->id); @@ -254,11 +464,20 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf, msg->sig, msg->id); return; } + if (cptpf->rsrc_req_blkaddr == BLKADDR_CPT1) + lfs = &cptpf->cpt1_lfs; switch (msg->id) { case MBOX_MSG_READY: - cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) & - RVU_PFVF_PF_MASK; + cptpf->pf_id = rvu_get_pf(cptpf->pdev, msg->pcifunc); + break; + case MBOX_MSG_MSIX_OFFSET: + rsp_msix = (struct msix_offset_rsp *) msg; + for (i = 0; i < rsp_msix->cptlfs; i++) + lfs->lf[i].msix_offset = rsp_msix->cptlf_msixoff[i]; + + for (i = 0; i < rsp_msix->cpt1_lfs; i++) + lfs->lf[i].msix_offset = rsp_msix->cpt1_lf_msixoff[i]; break; case MBOX_MSG_CPT_RD_WR_REGISTER: rsp_rd_wr = (struct cpt_rd_wr_reg_msg *)msg; @@ -273,11 +492,16 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf, break; case MBOX_MSG_ATTACH_RESOURCES: if (!msg->rc) - cptpf->lfs.are_lfs_attached = 1; + lfs->are_lfs_attached = 1; break; case MBOX_MSG_DETACH_RESOURCES: if (!msg->rc) - cptpf->lfs.are_lfs_attached = 0; + lfs->are_lfs_attached = 0; + break; + case MBOX_MSG_CPT_INLINE_IPSEC_CFG: + case MBOX_MSG_NIX_INLINE_IPSEC_CFG: + case MBOX_MSG_CPT_LF_RESET: + case MBOX_MSG_LMTST_TBL_SETUP: break; default: @@ -367,3 +591,71 @@ void otx2_cptpf_afpf_mbox_handler(struct work_struct *work) } otx2_mbox_reset(afpf_mbox, 0); } + +static void handle_msg_cpt_inst_lmtst(struct otx2_cptpf_dev *cptpf, + struct mbox_msghdr *msg) +{ + struct cpt_inst_lmtst_req *req = (struct cpt_inst_lmtst_req *)msg; + struct otx2_cptlfs_info *lfs = &cptpf->lfs; + struct msg_rsp *rsp; + + if (cptpf->lfs.lfs_num) + lfs->ops->send_cmd((union otx2_cpt_inst_s *)req->inst, 1, + &lfs->lf[0]); + + rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(&cptpf->afpf_mbox_up, 0, + sizeof(*rsp)); + if (!rsp) + return; + + rsp->hdr.id = msg->id; + rsp->hdr.sig = OTX2_MBOX_RSP_SIG; + rsp->hdr.pcifunc = 0; + rsp->hdr.rc = 0; +} + +static void process_afpf_mbox_up_msg(struct otx2_cptpf_dev *cptpf, + struct mbox_msghdr *msg) +{ + if (msg->id >= MBOX_MSG_MAX) { + dev_err(&cptpf->pdev->dev, + "MBOX msg with unknown ID %d\n", msg->id); + return; + } + + switch (msg->id) { + case MBOX_MSG_CPT_INST_LMTST: + handle_msg_cpt_inst_lmtst(cptpf, msg); + break; + default: + otx2_reply_invalid_msg(&cptpf->afpf_mbox_up, 0, 0, msg->id); + } +} + +void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work) +{ + struct otx2_cptpf_dev *cptpf; + struct otx2_mbox_dev *mdev; + struct mbox_hdr *rsp_hdr; + struct mbox_msghdr *msg; + struct otx2_mbox *mbox; + int offset, i; + + cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_up_work); + mbox = &cptpf->afpf_mbox_up; + mdev = &mbox->dev[0]; + /* Sync mbox data into memory */ + smp_wmb(); + + rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + + for (i = 0; i < rsp_hdr->num_msgs; i++) { + msg = (struct mbox_msghdr *)(mdev->mbase + offset); + + process_afpf_mbox_up_msg(cptpf, msg); + + offset = mbox->rx_start + msg->next_msgoff; + } + otx2_mbox_msg_send(mbox, 0); +} |
