diff options
author | David S. Miller <davem@davemloft.net> | 2015-05-23 01:22:35 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-05-23 01:22:35 -0400 |
commit | 36583eb54d46c36a447afd6c379839f292397429 (patch) | |
tree | 70f5399529dc2135a986947b37c655194da60e9d /drivers/infiniband/hw | |
parent | fa7912be967102cdbecd8ef172571b28eb22099e (diff) | |
parent | cf539cbd8a81e12880735a0912de8b99f46c84fd (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ethernet/cadence/macb.c
drivers/net/phy/phy.c
include/linux/skbuff.h
net/ipv4/tcp.c
net/switchdev/switchdev.c
Switchdev was a case of RTNH_H_{EXTERNAL --> OFFLOAD}
renaming overlapping with net-next changes of various
sorts.
phy.c was a case of two changes, one adding a local
variable to a function whilst the second was removing
one.
tcp.c overlapped a deadlock fix with the addition of new tcp_info
statistic values.
macb.c involved the addition of two zyncq device entries.
skbuff.h involved adding back ipv4_daddr to nf_bridge_info
whilst net-next changes put two other existing members of
that struct into a union.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/ocrdma/ocrdma.h | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/ocrdma/ocrdma_ah.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/ocrdma/ocrdma_hw.c | 83 | ||||
-rw-r--r-- | drivers/infiniband/hw/ocrdma/ocrdma_sli.h | 9 | ||||
-rw-r--r-- | drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 12 |
5 files changed, 79 insertions, 41 deletions
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index c9780d919769..b396344fae16 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -40,7 +40,7 @@ #include <be_roce.h> #include "ocrdma_sli.h" -#define OCRDMA_ROCE_DRV_VERSION "10.4.205.0u" +#define OCRDMA_ROCE_DRV_VERSION "10.6.0.0" #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" @@ -515,6 +515,8 @@ static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev, memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); if (rdma_is_multicast_addr(&in6)) rdma_get_mcast_mac(&in6, mac_addr); + else if (rdma_link_local_addr(&in6)) + rdma_get_ll_mac(&in6, mac_addr); else memcpy(mac_addr, ah_attr->dmac, ETH_ALEN); return 0; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index d812904f3984..f5a5ea836dbd 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c @@ -56,7 +56,13 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, vlan_tag = attr->vlan_id; if (!vlan_tag || (vlan_tag > 0xFFF)) vlan_tag = dev->pvid; - if (vlan_tag && (vlan_tag < 0x1000)) { + if (vlan_tag || dev->pfc_state) { + if (!vlan_tag) { + pr_err("ocrdma%d:Using VLAN with PFC is recommended\n", + dev->id); + pr_err("ocrdma%d:Using VLAN 0 for this connection\n", + dev->id); + } eth.eth_type = cpu_to_be16(0x8100); eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT; @@ -121,7 +127,9 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) goto av_conf_err; } - if (pd->uctx) { + if ((pd->uctx) && + (!rdma_is_multicast_addr((struct in6_addr *)attr->grh.dgid.raw)) && + (!rdma_link_local_addr((struct in6_addr *)attr->grh.dgid.raw))) { status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid, attr->dmac, &attr->vlan_id); if (status) { diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 0c9e95909a64..47615ff33bc6 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -933,12 +933,18 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle) struct ocrdma_eqe eqe; struct ocrdma_eqe *ptr; u16 cq_id; + u8 mcode; int budget = eq->cq_cnt; do { ptr = ocrdma_get_eqe(eq); eqe = *ptr; ocrdma_le32_to_cpu(&eqe, sizeof(eqe)); + mcode = (eqe.id_valid & OCRDMA_EQE_MAJOR_CODE_MASK) + >> OCRDMA_EQE_MAJOR_CODE_SHIFT; + if (mcode == OCRDMA_MAJOR_CODE_SENTINAL) + pr_err("EQ full on eqid = 0x%x, eqe = 0x%x\n", + eq->q.id, eqe.id_valid); if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0) break; @@ -1434,27 +1440,30 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) struct ocrdma_alloc_pd_range_rsp *rsp; /* Pre allocate the DPP PDs */ - cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd)); - if (!cmd) - return -ENOMEM; - cmd->pd_count = dev->attr.max_dpp_pds; - cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP; - status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); - if (status) - goto mbx_err; - rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd; - - if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) { - dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >> - OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT; - dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid & - OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; - dev->pd_mgr->max_dpp_pd = rsp->pd_count; - pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long); - dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size, - GFP_KERNEL); + if (dev->attr.max_dpp_pds) { + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, + sizeof(*cmd)); + if (!cmd) + return -ENOMEM; + cmd->pd_count = dev->attr.max_dpp_pds; + cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP; + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd; + + if (!status && (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && + rsp->pd_count) { + dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >> + OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT; + dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid & + OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; + dev->pd_mgr->max_dpp_pd = rsp->pd_count; + pd_bitmap_size = + BITS_TO_LONGS(rsp->pd_count) * sizeof(long); + dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size, + GFP_KERNEL); + } + kfree(cmd); } - kfree(cmd); cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd)); if (!cmd) @@ -1462,10 +1471,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds; status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); - if (status) - goto mbx_err; rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd; - if (rsp->pd_count) { + if (!status && rsp->pd_count) { dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; dev->pd_mgr->max_normal_pd = rsp->pd_count; @@ -1473,15 +1480,13 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size, GFP_KERNEL); } + kfree(cmd); if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) { /* Enable PD resource manager */ dev->pd_mgr->pd_prealloc_valid = true; - } else { - return -ENOMEM; + return 0; } -mbx_err: - kfree(cmd); return status; } @@ -2406,7 +2411,7 @@ int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp, struct ocrdma_query_qp *cmd; struct ocrdma_query_qp_rsp *rsp; - cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*cmd)); + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*rsp)); if (!cmd) return status; cmd->qp_id = qp->id; @@ -2428,7 +2433,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, int status; struct ib_ah_attr *ah_attr = &attrs->ah_attr; union ib_gid sgid, zgid; - u32 vlan_id; + u32 vlan_id = 0xFFFF; u8 mac_addr[6]; struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); @@ -2468,12 +2473,22 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8); if (attr_mask & IB_QP_VID) { vlan_id = attrs->vlan_id; + } else if (dev->pfc_state) { + vlan_id = 0; + pr_err("ocrdma%d:Using VLAN with PFC is recommended\n", + dev->id); + pr_err("ocrdma%d:Using VLAN 0 for this connection\n", + dev->id); + } + + if (vlan_id < 0x1000) { cmd->params.vlan_dmac_b4_to_b5 |= vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; cmd->params.rnt_rc_sl_fl |= (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; } + return 0; } @@ -2519,8 +2534,10 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp, cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID; } if (attr_mask & IB_QP_PATH_MTU) { - if (attrs->path_mtu < IB_MTU_256 || + if (attrs->path_mtu < IB_MTU_512 || attrs->path_mtu > IB_MTU_4096) { + pr_err("ocrdma%d: IB MTU %d is not supported\n", + dev->id, ib_mtu_enum_to_int(attrs->path_mtu)); status = -EINVAL; goto pmtu_err; } @@ -3147,9 +3164,9 @@ void ocrdma_cleanup_hw(struct ocrdma_dev *dev) ocrdma_free_pd_pool(dev); ocrdma_mbx_delete_ah_tbl(dev); - /* cleanup the eqs */ - ocrdma_destroy_eqs(dev); - /* cleanup the control path */ ocrdma_destroy_mq(dev); + + /* cleanup the eqs */ + ocrdma_destroy_eqs(dev); } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h index 243c87c8bd65..02ad0aee99af 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h @@ -1176,6 +1176,8 @@ struct ocrdma_query_qp_rsp { struct ocrdma_mqe_hdr hdr; struct ocrdma_mbx_rsp rsp; struct ocrdma_qp_params params; + u32 dpp_credits_cqid; + u32 rbq_id; }; enum { @@ -1624,12 +1626,19 @@ struct ocrdma_delete_ah_tbl_rsp { enum { OCRDMA_EQE_VALID_SHIFT = 0, OCRDMA_EQE_VALID_MASK = BIT(0), + OCRDMA_EQE_MAJOR_CODE_MASK = 0x0E, + OCRDMA_EQE_MAJOR_CODE_SHIFT = 0x01, OCRDMA_EQE_FOR_CQE_MASK = 0xFFFE, OCRDMA_EQE_RESOURCE_ID_SHIFT = 16, OCRDMA_EQE_RESOURCE_ID_MASK = 0xFFFF << OCRDMA_EQE_RESOURCE_ID_SHIFT, }; +enum major_code { + OCRDMA_MAJOR_CODE_COMPLETION = 0x00, + OCRDMA_MAJOR_CODE_SENTINAL = 0x01 +}; + struct ocrdma_eqe { u32 id_valid; }; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 877175563634..9dcb66077d6c 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -365,7 +365,7 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, if (!pd) return ERR_PTR(-ENOMEM); - if (udata && uctx) { + if (udata && uctx && dev->attr.max_dpp_pds) { pd->dpp_enabled = ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R; pd->num_dpp_qp = @@ -1721,18 +1721,20 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp) struct ocrdma_qp *qp; struct ocrdma_dev *dev; struct ib_qp_attr attrs; - int attr_mask = IB_QP_STATE; + int attr_mask; unsigned long flags; qp = get_ocrdma_qp(ibqp); dev = get_ocrdma_dev(ibqp->device); - attrs.qp_state = IB_QPS_ERR; pd = qp->pd; /* change the QP state to ERROR */ - _ocrdma_modify_qp(ibqp, &attrs, attr_mask); - + if (qp->state != OCRDMA_QPS_RST) { + attrs.qp_state = IB_QPS_ERR; + attr_mask = IB_QP_STATE; + _ocrdma_modify_qp(ibqp, &attrs, attr_mask); + } /* ensure that CQEs for newly created QP (whose id may be same with * one which just getting destroyed are same), dont get * discarded until the old CQEs are discarded. |