summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/cxgb4/cm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/cm.c')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c810
1 files changed, 509 insertions, 301 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index e49b34c3b136..b3b45c49077d 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -77,9 +77,9 @@ static int enable_ecn;
module_param(enable_ecn, int, 0644);
MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
-static int dack_mode = 1;
+static int dack_mode;
module_param(dack_mode, int, 0644);
-MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
+MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)");
uint c4iw_max_read_depth = 32;
module_param(c4iw_max_read_depth, int, 0644);
@@ -99,10 +99,6 @@ module_param(enable_tcp_window_scaling, int, 0644);
MODULE_PARM_DESC(enable_tcp_window_scaling,
"Enable tcp window scaling (default=1)");
-int c4iw_debug;
-module_param(c4iw_debug, int, 0644);
-MODULE_PARM_DESC(c4iw_debug, "obsolete");
-
static int peer2peer = 1;
module_param(peer2peer, int, 0644);
MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)");
@@ -144,12 +140,12 @@ static struct workqueue_struct *workq;
static struct sk_buff_head rxq;
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
-static void ep_timeout(unsigned long arg);
+static void ep_timeout(struct timer_list *t);
static void connect_reply_upcall(struct c4iw_ep *ep, int status);
static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
static LIST_HEAD(timeout_list);
-static spinlock_t timeout_lock;
+static DEFINE_SPINLOCK(timeout_lock);
static void deref_cm_id(struct c4iw_ep_common *epc)
{
@@ -180,7 +176,7 @@ static void ref_qp(struct c4iw_ep *ep)
static void start_ep_timer(struct c4iw_ep *ep)
{
- pr_debug("%s ep %p\n", __func__, ep);
+ pr_debug("ep %p\n", ep);
if (timer_pending(&ep->timer)) {
pr_err("%s timer already started! ep %p\n",
__func__, ep);
@@ -189,15 +185,13 @@ static void start_ep_timer(struct c4iw_ep *ep)
clear_bit(TIMEOUT, &ep->com.flags);
c4iw_get_ep(&ep->com);
ep->timer.expires = jiffies + ep_timeout_secs * HZ;
- ep->timer.data = (unsigned long)ep;
- ep->timer.function = ep_timeout;
add_timer(&ep->timer);
}
static int stop_ep_timer(struct c4iw_ep *ep)
{
- pr_debug("%s ep %p stopping\n", __func__, ep);
- del_timer_sync(&ep->timer);
+ pr_debug("ep %p stopping\n", ep);
+ timer_delete_sync(&ep->timer);
if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
c4iw_put_ep(&ep->com);
return 0;
@@ -212,7 +206,7 @@ static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
if (c4iw_fatal_error(rdev)) {
kfree_skb(skb);
- pr_debug("%s - device in error state - dropping\n", __func__);
+ pr_err("%s - device in error state - dropping\n", __func__);
return -EIO;
}
error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
@@ -229,7 +223,7 @@ int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
if (c4iw_fatal_error(rdev)) {
kfree_skb(skb);
- pr_debug("%s - device in error state - dropping\n", __func__);
+ pr_err("%s - device in error state - dropping\n", __func__);
return -EIO;
}
error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
@@ -265,8 +259,8 @@ static void set_emss(struct c4iw_ep *ep, u16 opt)
if (ep->emss & 7)
pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n",
TCPOPT_MSS_G(opt), ep->mss, ep->emss);
- pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
- ep->mss, ep->emss);
+ pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss,
+ ep->emss);
}
static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
@@ -287,7 +281,7 @@ static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
{
mutex_lock(&epc->mutex);
- pr_debug("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
+ pr_debug("%s -> %s\n", states[epc->state], states[new]);
__state_set(epc, new);
mutex_unlock(&epc->mutex);
return;
@@ -318,11 +312,18 @@ static void *alloc_ep(int size, gfp_t gfp)
epc = kzalloc(size, gfp);
if (epc) {
+ epc->wr_waitp = c4iw_alloc_wr_wait(gfp);
+ if (!epc->wr_waitp) {
+ kfree(epc);
+ epc = NULL;
+ goto out;
+ }
kref_init(&epc->kref);
mutex_init(&epc->mutex);
- c4iw_init_wr_wait(&epc->wr_wait);
+ c4iw_init_wr_wait(epc->wr_waitp);
}
- pr_debug("%s alloc ep %p\n", __func__, epc);
+ pr_debug("alloc ep %p\n", epc);
+out:
return epc;
}
@@ -330,20 +331,23 @@ static void remove_ep_tid(struct c4iw_ep *ep)
{
unsigned long flags;
- spin_lock_irqsave(&ep->com.dev->lock, flags);
- _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0);
- if (idr_is_empty(&ep->com.dev->hwtid_idr))
+ xa_lock_irqsave(&ep->com.dev->hwtids, flags);
+ __xa_erase(&ep->com.dev->hwtids, ep->hwtid);
+ if (xa_empty(&ep->com.dev->hwtids))
wake_up(&ep->com.dev->wait);
- spin_unlock_irqrestore(&ep->com.dev->lock, flags);
+ xa_unlock_irqrestore(&ep->com.dev->hwtids, flags);
}
-static void insert_ep_tid(struct c4iw_ep *ep)
+static int insert_ep_tid(struct c4iw_ep *ep)
{
unsigned long flags;
+ int err;
+
+ xa_lock_irqsave(&ep->com.dev->hwtids, flags);
+ err = __xa_insert(&ep->com.dev->hwtids, ep->hwtid, ep, GFP_KERNEL);
+ xa_unlock_irqrestore(&ep->com.dev->hwtids, flags);
- spin_lock_irqsave(&ep->com.dev->lock, flags);
- _insert_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep, ep->hwtid, 0);
- spin_unlock_irqrestore(&ep->com.dev->lock, flags);
+ return err;
}
/*
@@ -354,11 +358,11 @@ static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid)
struct c4iw_ep *ep;
unsigned long flags;
- spin_lock_irqsave(&dev->lock, flags);
- ep = idr_find(&dev->hwtid_idr, tid);
+ xa_lock_irqsave(&dev->hwtids, flags);
+ ep = xa_load(&dev->hwtids, tid);
if (ep)
c4iw_get_ep(&ep->com);
- spin_unlock_irqrestore(&dev->lock, flags);
+ xa_unlock_irqrestore(&dev->hwtids, flags);
return ep;
}
@@ -371,11 +375,11 @@ static struct c4iw_listen_ep *get_ep_from_stid(struct c4iw_dev *dev,
struct c4iw_listen_ep *ep;
unsigned long flags;
- spin_lock_irqsave(&dev->lock, flags);
- ep = idr_find(&dev->stid_idr, stid);
+ xa_lock_irqsave(&dev->stids, flags);
+ ep = xa_load(&dev->stids, stid);
if (ep)
c4iw_get_ep(&ep->com);
- spin_unlock_irqrestore(&dev->lock, flags);
+ xa_unlock_irqrestore(&dev->stids, flags);
return ep;
}
@@ -384,7 +388,7 @@ void _c4iw_free_ep(struct kref *kref)
struct c4iw_ep *ep;
ep = container_of(kref, struct c4iw_ep, com.kref);
- pr_debug("%s ep %p state %s\n", __func__, ep, states[ep->com.state]);
+ pr_debug("ep %p state %s\n", ep, states[ep->com.state]);
if (test_bit(QP_REFERENCED, &ep->com.flags))
deref_qp(ep);
if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
@@ -402,11 +406,11 @@ void _c4iw_free_ep(struct kref *kref)
ep->com.local_addr.ss_family);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
- if (ep->mpa_skb)
- kfree_skb(ep->mpa_skb);
+ kfree_skb(ep->mpa_skb);
}
if (!skb_queue_empty(&ep->com.ep_skb_list))
skb_queue_purge(&ep->com.ep_skb_list);
+ c4iw_put_wr_wait(ep->com.wr_waitp);
kfree(ep);
}
@@ -456,6 +460,8 @@ static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
skb_reset_transport_header(skb);
} else {
skb = alloc_skb(len, gfp);
+ if (!skb)
+ return NULL;
}
t4_set_arp_err_handler(skb, NULL, NULL);
return skb;
@@ -489,7 +495,6 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
release_ep_resources(ep);
- kfree_skb(skb);
return 0;
}
@@ -500,7 +505,6 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
c4iw_put_ep(&ep->parent_ep->com);
release_ep_resources(ep);
- kfree_skb(skb);
return 0;
}
@@ -554,7 +558,7 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
- remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
+ xa_erase_irq(&ep->com.dev->atids, ep->atid);
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
}
@@ -570,7 +574,7 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb)
struct c4iw_rdev *rdev = &ep->com.dev->rdev;
struct cpl_abort_req *req = cplhdr(skb);
- pr_debug("%s rdev %p\n", __func__, rdev);
+ pr_debug("rdev %p\n", rdev);
req->cmd = CPL_ABORT_NO_RST;
skb_get(skb);
ret = c4iw_ofld_send(rdev, skb);
@@ -585,24 +589,29 @@ static int send_flowc(struct c4iw_ep *ep)
{
struct fw_flowc_wr *flowc;
struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
- int i;
u16 vlan = ep->l2t->vlan;
int nparams;
+ int flowclen, flowclen16;
if (WARN_ON(!skb))
return -ENOMEM;
if (vlan == CPL_L2T_VLAN_NONE)
- nparams = 8;
- else
nparams = 9;
+ else
+ nparams = 10;
+
+ flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
+ flowclen16 = DIV_ROUND_UP(flowclen, 16);
+ flowclen = flowclen16 * 16;
- flowc = __skb_put(skb, FLOWC_LEN);
+ flowc = __skb_put(skb, flowclen);
+ memset(flowc, 0, flowclen);
flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
FW_FLOWC_WR_NPARAMS_V(nparams));
- flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(FLOWC_LEN,
- 16)) | FW_WR_FLOWID_V(ep->hwtid));
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
+ FW_WR_FLOWID_V(ep->hwtid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
@@ -621,21 +630,13 @@ static int send_flowc(struct c4iw_ep *ep)
flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
flowc->mnemval[7].val = cpu_to_be32(ep->emss);
- if (nparams == 9) {
+ flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
+ flowc->mnemval[8].val = cpu_to_be32(ep->snd_wscale);
+ if (nparams == 10) {
u16 pri;
-
pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
- flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
- flowc->mnemval[8].val = cpu_to_be32(pri);
- } else {
- /* Pad WR to 16 byte boundary */
- flowc->mnemval[8].mnemonic = 0;
- flowc->mnemval[8].val = 0;
- }
- for (i = 0; i < 9; i++) {
- flowc->mnemval[i].r4[0] = 0;
- flowc->mnemval[i].r4[1] = 0;
- flowc->mnemval[i].r4[2] = 0;
+ flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
+ flowc->mnemval[9].val = cpu_to_be32(pri);
}
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
@@ -647,7 +648,7 @@ static int send_halfclose(struct c4iw_ep *ep)
struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16);
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
if (WARN_ON(!skb))
return -ENOMEM;
@@ -657,12 +658,38 @@ static int send_halfclose(struct c4iw_ep *ep)
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
-static int send_abort(struct c4iw_ep *ep)
+static void read_tcb(struct c4iw_ep *ep)
+{
+ struct sk_buff *skb;
+ struct cpl_get_tcb *req;
+ int wrlen = roundup(sizeof(*req), 16);
+
+ skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+ if (WARN_ON(!skb))
+ return;
+
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
+ req = (struct cpl_get_tcb *) skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ INIT_TP_WR(req, ep->hwtid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_GET_TCB, ep->hwtid));
+ req->reply_ctrl = htons(REPLY_CHAN_V(0) | QUEUENO_V(ep->rss_qid));
+
+ /*
+ * keep a ref on the ep so the tcb is not unlocked before this
+ * cpl completes. The ref is released in read_tcb_rpl().
+ */
+ c4iw_get_ep(&ep->com);
+ if (WARN_ON(c4iw_ofld_send(&ep->com.dev->rdev, skb)))
+ c4iw_put_ep(&ep->com);
+}
+
+static int send_abort_req(struct c4iw_ep *ep)
{
u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16);
struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
if (WARN_ON(!req_skb))
return -ENOMEM;
@@ -672,6 +699,17 @@ static int send_abort(struct c4iw_ep *ep)
return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
}
+static int send_abort(struct c4iw_ep *ep)
+{
+ if (!ep->com.qp || !ep->com.qp->srq) {
+ send_abort_req(ep);
+ return 0;
+ }
+ set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags);
+ read_tcb(ep);
+ return 0;
+}
+
static int send_connect(struct c4iw_ep *ep)
{
struct cpl_act_open_req *req = NULL;
@@ -696,7 +734,7 @@ static int send_connect(struct c4iw_ep *ep)
&ep->com.remote_addr;
int ret;
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
- u32 isn = (prandom_u32() & ~7UL) - 1;
+ u32 isn = (get_random_u32() & ~7UL) - 1;
struct net_device *netdev;
u64 params;
@@ -725,7 +763,7 @@ static int send_connect(struct c4iw_ep *ep)
roundup(sizev4, 16) :
roundup(sizev6, 16);
- pr_debug("%s ep %p atid %u\n", __func__, ep, ep->atid);
+ pr_debug("ep %p atid %u\n", ep, ep->atid);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) {
@@ -824,13 +862,13 @@ static int send_connect(struct c4iw_ep *ep)
t5req->params =
cpu_to_be64(FILTER_TUPLE_V(params));
t5req->rsvd = cpu_to_be32(isn);
- pr_debug("%s snd_isn %u\n", __func__, t5req->rsvd);
+ pr_debug("snd_isn %u\n", t5req->rsvd);
t5req->opt2 = cpu_to_be32(opt2);
} else {
t6req->params =
cpu_to_be64(FILTER_TUPLE_V(params));
t6req->rsvd = cpu_to_be32(isn);
- pr_debug("%s snd_isn %u\n", __func__, t6req->rsvd);
+ pr_debug("snd_isn %u\n", t6req->rsvd);
t6req->opt2 = cpu_to_be32(opt2);
}
}
@@ -877,13 +915,13 @@ static int send_connect(struct c4iw_ep *ep)
t5req6->params =
cpu_to_be64(FILTER_TUPLE_V(params));
t5req6->rsvd = cpu_to_be32(isn);
- pr_debug("%s snd_isn %u\n", __func__, t5req6->rsvd);
+ pr_debug("snd_isn %u\n", t5req6->rsvd);
t5req6->opt2 = cpu_to_be32(opt2);
} else {
t6req6->params =
cpu_to_be64(FILTER_TUPLE_V(params));
t6req6->rsvd = cpu_to_be32(isn);
- pr_debug("%s snd_isn %u\n", __func__, t6req6->rsvd);
+ pr_debug("snd_isn %u\n", t6req6->rsvd);
t6req6->opt2 = cpu_to_be32(opt2);
}
@@ -907,15 +945,13 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
struct mpa_message *mpa;
struct mpa_v2_conn_params mpa_v2_params;
- pr_debug("%s ep %p tid %u pd_len %d\n",
- __func__, ep, ep->hwtid, ep->plen);
-
- BUG_ON(skb_cloned(skb));
+ pr_debug("ep %p tid %u pd_len %d\n",
+ ep, ep->hwtid, ep->plen);
mpalen = sizeof(*mpa) + ep->plen;
if (mpa_rev_to_use == 2)
mpalen += sizeof(struct mpa_v2_conn_params);
- wrlen = roundup(mpalen + sizeof *req, 16);
+ wrlen = roundup(mpalen + sizeof(*req), 16);
skb = get_skb(skb, wrlen, GFP_KERNEL);
if (!skb) {
connect_reply_upcall(ep, -ENOMEM);
@@ -959,9 +995,10 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
}
if (mpa_rev_to_use == 2) {
- mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
- sizeof (struct mpa_v2_conn_params));
- pr_debug("%s initiator ird %u ord %u\n", __func__, ep->ird,
+ mpa->private_data_size =
+ htons(ntohs(mpa->private_data_size) +
+ sizeof(struct mpa_v2_conn_params));
+ pr_debug("initiator ird %u ord %u\n", ep->ird,
ep->ord);
mpa_v2_params.ird = htons((u16)ep->ird);
mpa_v2_params.ord = htons((u16)ep->ord);
@@ -994,7 +1031,6 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
*/
skb_get(skb);
t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
- BUG_ON(ep->mpa_skb);
ep->mpa_skb = skb;
ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
if (ret)
@@ -1014,13 +1050,13 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
struct sk_buff *skb;
struct mpa_v2_conn_params mpa_v2_params;
- pr_debug("%s ep %p tid %u pd_len %d\n",
- __func__, ep, ep->hwtid, ep->plen);
+ pr_debug("ep %p tid %u pd_len %d\n",
+ ep, ep->hwtid, ep->plen);
mpalen = sizeof(*mpa) + plen;
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
mpalen += sizeof(struct mpa_v2_conn_params);
- wrlen = roundup(mpalen + sizeof *req, 16);
+ wrlen = roundup(mpalen + sizeof(*req), 16);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) {
@@ -1051,8 +1087,9 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
mpa->flags |= MPA_ENHANCED_RDMA_CONN;
- mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
- sizeof (struct mpa_v2_conn_params));
+ mpa->private_data_size =
+ htons(ntohs(mpa->private_data_size) +
+ sizeof(struct mpa_v2_conn_params));
mpa_v2_params.ird = htons(((u16)ep->ird) |
(peer2peer ? MPA_V2_PEER2PEER_MODEL :
0));
@@ -1080,7 +1117,6 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
skb_get(skb);
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
- BUG_ON(ep->mpa_skb);
ep->mpa_skb = skb;
ep->snd_seq += mpalen;
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
@@ -1094,13 +1130,13 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
struct sk_buff *skb;
struct mpa_v2_conn_params mpa_v2_params;
- pr_debug("%s ep %p tid %u pd_len %d\n",
- __func__, ep, ep->hwtid, ep->plen);
+ pr_debug("ep %p tid %u pd_len %d\n",
+ ep, ep->hwtid, ep->plen);
mpalen = sizeof(*mpa) + plen;
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
mpalen += sizeof(struct mpa_v2_conn_params);
- wrlen = roundup(mpalen + sizeof *req, 16);
+ wrlen = roundup(mpalen + sizeof(*req), 16);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) {
@@ -1135,8 +1171,9 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
mpa->flags |= MPA_ENHANCED_RDMA_CONN;
- mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
- sizeof (struct mpa_v2_conn_params));
+ mpa->private_data_size =
+ htons(ntohs(mpa->private_data_size) +
+ sizeof(struct mpa_v2_conn_params));
mpa_v2_params.ird = htons((u16)ep->ird);
mpa_v2_params.ord = htons((u16)ep->ord);
if (peer2peer && (ep->mpa_attr.p2p_type !=
@@ -1178,14 +1215,17 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *ep;
struct cpl_act_establish *req = cplhdr(skb);
+ unsigned short tcp_opt = ntohs(req->tcp_opt);
unsigned int tid = GET_TID(req);
unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
struct tid_info *t = dev->rdev.lldi.tids;
int ret;
ep = lookup_atid(t, atid);
+ if (!ep)
+ return -EINVAL;
- pr_debug("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
+ pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid,
be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
mutex_lock(&ep->com.mutex);
@@ -1198,11 +1238,12 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
+ ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
- set_emss(ep, ntohs(req->tcp_opt));
+ set_emss(ep, tcp_opt);
/* dealloc the atid */
- remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
+ xa_erase_irq(&ep->com.dev->atids, atid);
cxgb4_free_atid(t, atid);
set_bit(ACT_ESTAB, &ep->com.history);
@@ -1229,7 +1270,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)
{
struct iw_cm_event event;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CLOSE;
event.status = status;
@@ -1246,7 +1287,7 @@ static void peer_close_upcall(struct c4iw_ep *ep)
{
struct iw_cm_event event;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_DISCONNECT;
if (ep->com.cm_id) {
@@ -1261,7 +1302,7 @@ static void peer_abort_upcall(struct c4iw_ep *ep)
{
struct iw_cm_event event;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CLOSE;
event.status = -ECONNRESET;
@@ -1278,8 +1319,8 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
{
struct iw_cm_event event;
- pr_debug("%s ep %p tid %u status %d\n",
- __func__, ep, ep->hwtid, status);
+ pr_debug("ep %p tid %u status %d\n",
+ ep, ep->hwtid, status);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REPLY;
event.status = status;
@@ -1308,7 +1349,7 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
}
}
- pr_debug("%s ep %p tid %u status %d\n", __func__, ep,
+ pr_debug("ep %p tid %u status %d\n", ep,
ep->hwtid, status);
set_bit(CONN_RPL_UPCALL, &ep->com.history);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
@@ -1322,7 +1363,7 @@ static int connect_request_upcall(struct c4iw_ep *ep)
struct iw_cm_event event;
int ret;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REQUEST;
memcpy(&event.local_addr, &ep->com.local_addr,
@@ -1359,13 +1400,13 @@ static void established_upcall(struct c4iw_ep *ep)
{
struct iw_cm_event event;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_ESTABLISHED;
event.ird = ep->ord;
event.ord = ep->ird;
if (ep->com.cm_id) {
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
set_bit(ESTAB_UPCALL, &ep->com.history);
}
@@ -1377,8 +1418,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16);
u32 credit_dack;
- pr_debug("%s ep %p tid %u credits %u\n",
- __func__, ep, ep->hwtid, credits);
+ pr_debug("ep %p tid %u credits %u\n",
+ ep, ep->hwtid, credits);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) {
pr_err("update_rx_credits - cannot alloc skb!\n");
@@ -1429,7 +1470,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
int err;
int disconnect = 0;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
/*
* If we get more than the supported amount of private data
@@ -1527,8 +1568,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
MPA_V2_IRD_ORD_MASK;
resp_ord = ntohs(mpa_v2_params->ord) &
MPA_V2_IRD_ORD_MASK;
- pr_debug("%s responder ird %u ord %u ep ird %u ord %u\n",
- __func__,
+ pr_debug("responder ird %u ord %u ep ird %u ord %u\n",
resp_ird, resp_ord, ep->ird, ep->ord);
/*
@@ -1573,8 +1613,8 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
if (peer2peer)
ep->mpa_attr.p2p_type = p2p_type;
- pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n",
- __func__, ep->mpa_attr.crc_enabled,
+ pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n",
+ ep->mpa_attr.crc_enabled,
ep->mpa_attr.recv_marker_enabled,
ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
ep->mpa_attr.p2p_type, p2p_type);
@@ -1670,7 +1710,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
struct mpa_v2_conn_params *mpa_v2_params;
u16 plen;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
/*
* If we get more than the supported amount of private data
@@ -1679,7 +1719,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
goto err_stop_timer;
- pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
+ pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
/*
* Copy the new data into our accumulation buffer.
@@ -1695,7 +1735,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
if (ep->mpa_pkt_len < sizeof(*mpa))
return 0;
- pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
+ pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
mpa = (struct mpa_message *) ep->mpa_pkt;
/*
@@ -1758,8 +1798,8 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
MPA_V2_IRD_ORD_MASK;
ep->ord = min_t(u32, ep->ord,
cur_max_read_depth(ep->com.dev));
- pr_debug("%s initiator ird %u ord %u\n",
- __func__, ep->ird, ep->ord);
+ pr_debug("initiator ird %u ord %u\n",
+ ep->ird, ep->ord);
if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
if (peer2peer) {
if (ntohs(mpa_v2_params->ord) &
@@ -1776,8 +1816,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
if (peer2peer)
ep->mpa_attr.p2p_type = p2p_type;
- pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n",
- __func__,
+ pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n",
ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
ep->mpa_attr.p2p_type);
@@ -1816,7 +1855,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
ep = get_ep_from_tid(dev, tid);
if (!ep)
return 0;
- pr_debug("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
+ pr_debug("ep %p tid %u dlen %u\n", ep, ep->hwtid, dlen);
skb_pull(skb, sizeof(*hdr));
skb_trim(skb, dlen);
mutex_lock(&ep->com.mutex);
@@ -1836,7 +1875,6 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
update_rx_credits(ep, dlen);
- BUG_ON(!ep->com.qp);
if (status)
pr_err("%s Unexpected streaming data." \
" qpid %u ep %p state %d tid %u status %d\n",
@@ -1858,10 +1896,31 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
+static void complete_cached_srq_buffers(struct c4iw_ep *ep, u32 srqidx)
+{
+ enum chip_type adapter_type;
+
+ adapter_type = ep->com.dev->rdev.lldi.adapter_type;
+
+ /*
+ * If this TCB had a srq buffer cached, then we must complete
+ * it. For user mode, that means saving the srqidx in the
+ * user/kernel status page for this qp. For kernel mode, just
+ * synthesize the CQE now.
+ */
+ if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T5 && srqidx) {
+ if (ep->com.qp->ibqp.uobject)
+ t4_set_wq_in_error(&ep->com.qp->wq, srqidx);
+ else
+ c4iw_flush_srqidx(ep->com.qp, srqidx);
+ }
+}
+
static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
+ u32 srqidx;
struct c4iw_ep *ep;
- struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+ struct cpl_abort_rpl_rss6 *rpl = cplhdr(skb);
int release = 0;
unsigned int tid = GET_TID(rpl);
@@ -1870,11 +1929,17 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
pr_warn("Abort rpl to freed endpoint\n");
return 0;
}
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+
+ if (ep->com.qp && ep->com.qp->srq) {
+ srqidx = ABORT_RSS_SRQIDX_G(be32_to_cpu(rpl->srqidx_status));
+ complete_cached_srq_buffers(ep, srqidx ? srqidx : ep->srqe_idx);
+ }
+
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case ABORTING:
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
__state_set(&ep->com, DEAD);
release = 1;
break;
@@ -1884,8 +1949,10 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
}
mutex_unlock(&ep->com.mutex);
- if (release)
+ if (release) {
+ close_complete_upcall(ep, -ECONNRESET);
release_ep_resources(ep);
+ }
c4iw_put_ep(&ep->com);
return 0;
}
@@ -1900,6 +1967,9 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
int win;
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
req = __skb_put_zero(skb, sizeof(*req));
req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
@@ -1994,8 +2064,8 @@ static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
{
ep->snd_win = snd_win;
ep->rcv_win = rcv_win;
- pr_debug("%s snd_win %d rcv_win %d\n",
- __func__, ep->snd_win, ep->rcv_win);
+ pr_debug("snd_win %d rcv_win %d\n",
+ ep->snd_win, ep->rcv_win);
}
#define ACT_OPEN_RETRY_COUNT 2
@@ -2016,7 +2086,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
err = -ENOMEM;
if (n->dev->flags & IFF_LOOPBACK) {
if (iptype == 4)
- pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
+ pdev = __ip_dev_find(&init_net, *(__be32 *)peer_ip, false);
else if (IS_ENABLED(CONFIG_IPV6))
for_each_netdev(&init_net, pdev) {
if (ipv6_chk_addr(&init_net,
@@ -2031,16 +2101,15 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
err = -ENODEV;
goto out;
}
+ if (is_vlan_dev(pdev))
+ pdev = vlan_dev_real_dev(pdev);
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
n, pdev, rt_tos2priority(tos));
- if (!ep->l2t) {
- dev_put(pdev);
+ if (!ep->l2t)
goto out;
- }
ep->mtu = pdev->mtu;
ep->tx_chan = cxgb4_port_chan(pdev);
- ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
- cxgb4_port_viid(pdev));
+ ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
step = cdev->rdev.lldi.ntxq /
cdev->rdev.lldi.nchan;
ep->txq_idx = cxgb4_port_idx(pdev) * step;
@@ -2050,17 +2119,15 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
ep->rss_qid = cdev->rdev.lldi.rxq_ids[
cxgb4_port_idx(pdev) * step];
set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
- dev_put(pdev);
} else {
pdev = get_real_dev(n->dev);
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
- n, pdev, 0);
+ n, pdev, rt_tos2priority(tos));
if (!ep->l2t)
goto out;
ep->mtu = dst_mtu(dst);
ep->tx_chan = cxgb4_port_chan(pdev);
- ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
- cxgb4_port_viid(pdev));
+ ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
step = cdev->rdev.lldi.ntxq /
cdev->rdev.lldi.nchan;
ep->txq_idx = cxgb4_port_idx(pdev) * step;
@@ -2100,9 +2167,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
int iptype;
__u8 *ra;
- pr_debug("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
- init_timer(&ep->timer);
- c4iw_init_wr_wait(&ep->com.wr_wait);
+ pr_debug("qp %p cm_id %p\n", ep->com.qp, ep->com.cm_id);
+ c4iw_init_wr_wait(ep->com.wr_waitp);
/* When MPA revision is different on nodes, the node with MPA_rev=2
* tries to reconnect with MPA_rev 1 for the same EP through
@@ -2110,7 +2176,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
* further connection establishment. As we are using the same EP pointer
* for reconnect, few skbs are used during the previous c4iw_connect(),
* which leaves the EP with inadequate skbs for further
- * c4iw_reconnect(), Further causing an assert BUG_ON() due to empty
+ * c4iw_reconnect(), Further causing a crash due to an empty
* skb_list() during peer_abort(). Allocate skbs which is already used.
*/
size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list));
@@ -2128,7 +2194,9 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
err = -ENOMEM;
goto fail2;
}
- insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
+ err = xa_insert_irq(&ep->com.dev->atids, ep->atid, ep, GFP_KERNEL);
+ if (err)
+ goto fail2a;
/* find a route */
if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
@@ -2145,7 +2213,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
laddr6->sin6_addr.s6_addr,
raddr6->sin6_addr.s6_addr,
laddr6->sin6_port,
- raddr6->sin6_port, 0,
+ raddr6->sin6_port,
+ ep->com.cm_id->tos,
raddr6->sin6_scope_id);
iptype = 6;
ra = (__u8 *)&raddr6->sin6_addr;
@@ -2163,8 +2232,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
goto fail4;
}
- pr_debug("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
- __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
+ pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
+ ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
ep->l2t->idx);
state_set(&ep->com, CONNECTING);
@@ -2179,7 +2248,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
fail4:
dst_release(ep->dst);
fail3:
- remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
+ xa_erase_irq(&ep->com.dev->atids, ep->atid);
+fail2a:
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
fail2:
/*
@@ -2210,17 +2280,20 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
int ret = 0;
ep = lookup_atid(t, atid);
+ if (!ep)
+ return -EINVAL;
+
la = (struct sockaddr_in *)&ep->com.local_addr;
ra = (struct sockaddr_in *)&ep->com.remote_addr;
la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
- pr_debug("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
+ pr_debug("ep %p atid %u status %u errno %d\n", ep, atid,
status, status2errno(status));
if (cxgb_is_neg_adv(status)) {
- pr_debug("%s Connection problems for atid %u status %u (%s)\n",
- __func__, atid, status, neg_adv_str(status));
+ pr_debug("Connection problems for atid %u status %u (%s)\n",
+ atid, status, neg_adv_str(status));
ep->stats.connect_neg_adv++;
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.neg_adv++;
@@ -2262,8 +2335,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
(const u32 *)
&sin6->sin6_addr.s6_addr, 1);
}
- remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
- atid);
+ xa_erase_irq(&ep->com.dev->atids, atid);
cxgb4_free_atid(t, atid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
@@ -2300,7 +2372,7 @@ fail:
cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl),
ep->com.local_addr.ss_family);
- remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
+ xa_erase_irq(&ep->com.dev->atids, atid);
cxgb4_free_atid(t, atid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
@@ -2316,12 +2388,12 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
if (!ep) {
- pr_debug("%s stid %d lookup failure!\n", __func__, stid);
+ pr_warn("%s stid %d lookup failure!\n", __func__, stid);
goto out;
}
- pr_debug("%s ep %p status %d error %d\n", __func__, ep,
+ pr_debug("ep %p status %d error %d\n", ep,
rpl->status, status2errno(rpl->status));
- c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
+ c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
c4iw_put_ep(&ep->com);
out:
return 0;
@@ -2333,9 +2405,14 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int stid = GET_TID(rpl);
struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
- pr_debug("%s ep %p\n", __func__, ep);
- c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
+ if (!ep) {
+ pr_warn("%s stid %d lookup failure!\n", __func__, stid);
+ goto out;
+ }
+ pr_debug("ep %p\n", ep);
+ c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
c4iw_put_ep(&ep->com);
+out:
return 0;
}
@@ -2351,22 +2428,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
int win;
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- BUG_ON(skb_cloned(skb));
-
- skb_get(skb);
- rpl = cplhdr(skb);
- if (!is_t4(adapter_type)) {
- skb_trim(skb, roundup(sizeof(*rpl5), 16));
- rpl5 = (void *)rpl;
- INIT_TP_WR(rpl5, ep->hwtid);
- } else {
- skb_trim(skb, sizeof(*rpl));
- INIT_TP_WR(rpl, ep->hwtid);
- }
- OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
- ep->hwtid));
-
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
enable_tcp_timestamps && req->tcpopt.tstamp,
(ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
@@ -2412,17 +2474,25 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
if (tcph->ece && tcph->cwr)
opt2 |= CCTRL_ECN_V(1);
}
- if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
- u32 isn = (prandom_u32() & ~7UL) - 1;
+
+ if (!is_t4(adapter_type)) {
+ u32 isn = (get_random_u32() & ~7UL) - 1;
+
+ skb = get_skb(skb, roundup(sizeof(*rpl5), 16), GFP_KERNEL);
+ rpl5 = __skb_put_zero(skb, roundup(sizeof(*rpl5), 16));
+ rpl = (void *)rpl5;
+ INIT_TP_WR_CPL(rpl5, CPL_PASS_ACCEPT_RPL, ep->hwtid);
opt2 |= T5_OPT_2_VALID_F;
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= T5_ISS_F;
- rpl5 = (void *)rpl;
- memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
if (peer2peer)
isn += 4;
rpl5->iss = cpu_to_be32(isn);
- pr_debug("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss));
+ pr_debug("iss %u\n", be32_to_cpu(rpl5->iss));
+ } else {
+ skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
+ rpl = __skb_put_zero(skb, sizeof(*rpl));
+ INIT_TP_WR_CPL(rpl, CPL_PASS_ACCEPT_RPL, ep->hwtid);
}
rpl->opt0 = cpu_to_be64(opt0);
@@ -2435,8 +2505,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
{
- pr_debug("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
- BUG_ON(skb_cloned(skb));
+ pr_debug("c4iw_dev %p tid %u\n", dev, hwtid);
skb_trim(skb, sizeof(struct cpl_tid_release));
release_tid(&dev->rdev, hwtid, skb);
return;
@@ -2457,40 +2526,45 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
u16 peer_mss = ntohs(req->tcpopt.mss);
int iptype;
unsigned short hdrs;
- u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+ u8 tos;
parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
if (!parent_ep) {
- pr_debug("%s connect request on invalid stid %d\n",
- __func__, stid);
+ pr_err("%s connect request on invalid stid %d\n",
+ __func__, stid);
goto reject;
}
if (state_read(&parent_ep->com) != LISTEN) {
- pr_debug("%s - listening ep not in LISTEN\n", __func__);
+ pr_err("%s - listening ep not in LISTEN\n", __func__);
goto reject;
}
+ if (parent_ep->com.cm_id->tos_set)
+ tos = parent_ep->com.cm_id->tos;
+ else
+ tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+
cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type,
&iptype, local_ip, peer_ip, &local_port, &peer_port);
/* Find output route */
if (iptype == 4) {
- pr_debug("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
- , __func__, parent_ep, hwtid,
+ pr_debug("parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
+ , parent_ep, hwtid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
*(__be32 *)local_ip, *(__be32 *)peer_ip,
local_port, peer_port, tos);
} else {
- pr_debug("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
- , __func__, parent_ep, hwtid,
+ pr_debug("parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
+ , parent_ep, hwtid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
local_ip, peer_ip, local_port, peer_port,
- PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
+ tos,
((struct sockaddr_in6 *)
&parent_ep->com.local_addr)->sin6_scope_id);
}
@@ -2571,10 +2645,10 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
child_ep->dst = dst;
child_ep->hwtid = hwtid;
- pr_debug("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
+ pr_debug("tx_chan %u smac_idx %u rss_qid %u\n",
child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
- init_timer(&child_ep->timer);
+ timer_setup(&child_ep->timer, ep_timeout, 0);
cxgb4_insert_tid(t, child_ep, hwtid,
child_ep->com.local_addr.ss_family);
insert_ep_tid(child_ep);
@@ -2594,9 +2668,9 @@ fail:
c4iw_put_ep(&child_ep->com);
reject:
reject_cr(dev, hwtid, skb);
+out:
if (parent_ep)
c4iw_put_ep(&parent_ep->com);
-out:
return 0;
}
@@ -2606,16 +2680,20 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
struct cpl_pass_establish *req = cplhdr(skb);
unsigned int tid = GET_TID(req);
int ret;
+ u16 tcp_opt = ntohs(req->tcp_opt);
ep = get_ep_from_tid(dev, tid);
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ if (!ep)
+ return 0;
+
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
+ ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
- pr_debug("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
- ntohs(req->tcp_opt));
+ pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid, tcp_opt);
- set_emss(ep, ntohs(req->tcp_opt));
+ set_emss(ep, tcp_opt);
dst_confirm(ep->dst);
mutex_lock(&ep->com.mutex);
@@ -2645,7 +2723,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
if (!ep)
return 0;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
dst_confirm(ep->dst);
set_bit(PEER_CLOSE, &ep->com.history);
@@ -2668,12 +2746,12 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
*/
__state_set(&ep->com, CLOSING);
pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
break;
case MPA_REP_SENT:
__state_set(&ep->com, CLOSING);
pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
break;
case FPDU_MODE:
start_ep_timer(ep);
@@ -2709,7 +2787,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
disconnect = 0;
break;
default:
- BUG_ON(1);
+ WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
}
mutex_unlock(&ep->com.mutex);
if (disconnect)
@@ -2720,32 +2798,52 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
+static void finish_peer_abort(struct c4iw_dev *dev, struct c4iw_ep *ep)
+{
+ complete_cached_srq_buffers(ep, ep->srqe_idx);
+ if (ep->com.cm_id && ep->com.qp) {
+ struct c4iw_qp_attributes attrs;
+
+ attrs.next_state = C4IW_QP_STATE_ERROR;
+ c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ }
+ peer_abort_upcall(ep);
+ release_ep_resources(ep);
+ c4iw_put_ep(&ep->com);
+}
+
static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
{
- struct cpl_abort_req_rss *req = cplhdr(skb);
+ struct cpl_abort_req_rss6 *req = cplhdr(skb);
struct c4iw_ep *ep;
struct sk_buff *rpl_skb;
struct c4iw_qp_attributes attrs;
int ret;
int release = 0;
unsigned int tid = GET_TID(req);
+ u8 status;
+ u32 srqidx;
+
u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
ep = get_ep_from_tid(dev, tid);
if (!ep)
return 0;
- if (cxgb_is_neg_adv(req->status)) {
- pr_debug("%s Negative advice on abort- tid %u status %d (%s)\n",
- __func__, ep->hwtid, req->status,
- neg_adv_str(req->status));
+ status = ABORT_RSS_STATUS_G(be32_to_cpu(req->srqidx_status));
+
+ if (cxgb_is_neg_adv(status)) {
+ pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
+ ep->hwtid, status, neg_adv_str(status));
ep->stats.abort_neg_adv++;
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.neg_adv++;
mutex_unlock(&dev->rdev.stats.lock);
goto deref_ep;
}
- pr_debug("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
+
+ pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid,
ep->com.state);
set_bit(PEER_ABORT, &ep->com.history);
@@ -2755,7 +2853,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
* MPA_REQ_SENT
*/
if (ep->com.state != MPA_REQ_SENT)
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
@@ -2767,7 +2865,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
break;
case MPA_REQ_SENT:
(void)stop_ep_timer(ep);
- if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1))
+ if (status != CPL_ERR_CONN_RESET || mpa_rev == 1 ||
+ (mpa_rev == 2 && ep->tried_with_mpa_v1))
connect_reply_upcall(ep, -ECONNRESET);
else {
/*
@@ -2778,8 +2877,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
* do some housekeeping so as to re-initiate the
* connection
*/
- pr_debug("%s: mpa_rev=%d. Retrying with mpav1\n",
- __func__, mpa_rev);
+ pr_info("%s: mpa_rev=%d. Retrying with mpav1\n",
+ __func__, mpa_rev);
ep->retry_with_mpa_v1 = 1;
}
break;
@@ -2790,8 +2889,24 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
case MORIBUND:
case CLOSING:
stop_ep_timer(ep);
- /*FALLTHROUGH*/
+ fallthrough;
case FPDU_MODE:
+ if (ep->com.qp && ep->com.qp->srq) {
+ srqidx = ABORT_RSS_SRQIDX_G(
+ be32_to_cpu(req->srqidx_status));
+ if (srqidx) {
+ complete_cached_srq_buffers(ep, srqidx);
+ } else {
+ /* Hold ep ref until finish_peer_abort() */
+ c4iw_get_ep(&ep->com);
+ __state_set(&ep->com, ABORTING);
+ set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags);
+ read_tcb(ep);
+ break;
+
+ }
+ }
+
if (ep->com.cm_id && ep->com.qp) {
attrs.next_state = C4IW_QP_STATE_ERROR;
ret = c4iw_modify_qp(ep->com.qp->rhp,
@@ -2805,11 +2920,11 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
case ABORTING:
break;
case DEAD:
- pr_debug("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
+ pr_warn("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
mutex_unlock(&ep->com.mutex);
goto deref_ep;
default:
- BUG_ON(1);
+ WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
break;
}
dst_confirm(ep->dst);
@@ -2843,7 +2958,7 @@ out:
(const u32 *)&sin6->sin6_addr.s6_addr,
1);
}
- remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
+ xa_erase_irq(&ep->com.dev->hwtids, ep->hwtid);
cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid,
ep->com.local_addr.ss_family);
dst_release(ep->dst);
@@ -2870,8 +2985,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
if (!ep)
return 0;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- BUG_ON(!ep);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
/* The cm_id may be null if we failed to connect */
mutex_lock(&ep->com.mutex);
@@ -2897,7 +3011,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
case DEAD:
break;
default:
- BUG_ON(1);
+ WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
break;
}
mutex_unlock(&ep->com.mutex);
@@ -2915,17 +3029,23 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
ep = get_ep_from_tid(dev, tid);
- BUG_ON(!ep);
- if (ep && ep->com.qp) {
- pr_warn("TERM received tid %u qpid %u\n",
- tid, ep->com.qp->wq.sq.qid);
- attrs.next_state = C4IW_QP_STATE_TERMINATE;
- c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
- C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ if (ep) {
+ if (ep->com.qp) {
+ pr_warn("TERM received tid %u qpid %u\n", tid,
+ ep->com.qp->wq.sq.qid);
+ attrs.next_state = C4IW_QP_STATE_TERMINATE;
+ c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ }
+
+ /* As per draft-hilland-iwarp-verbs-v1.0, sec 6.2.3,
+ * when entering the TERM state the RNIC MUST initiate a CLOSE.
+ */
+ c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
+ c4iw_put_ep(&ep->com);
} else
pr_warn("TERM received tid %u no ep/qp\n", tid);
- c4iw_put_ep(&ep->com);
return 0;
}
@@ -2946,19 +3066,19 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
ep = get_ep_from_tid(dev, tid);
if (!ep)
return 0;
- pr_debug("%s ep %p tid %u credits %u\n",
- __func__, ep, ep->hwtid, credits);
+ pr_debug("ep %p tid %u credits %u\n",
+ ep, ep->hwtid, credits);
if (credits == 0) {
- pr_debug("%s 0 credit ack ep %p tid %u state %u\n",
- __func__, ep, ep->hwtid, state_read(&ep->com));
+ pr_debug("0 credit ack ep %p tid %u state %u\n",
+ ep, ep->hwtid, state_read(&ep->com));
goto out;
}
dst_confirm(ep->dst);
if (ep->mpa_skb) {
- pr_debug("%s last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n",
- __func__, ep, ep->hwtid,
- state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
+ pr_debug("last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n",
+ ep, ep->hwtid, state_read(&ep->com),
+ ep->mpa_attr.initiator ? 1 : 0);
mutex_lock(&ep->com.mutex);
kfree_skb(ep->mpa_skb);
ep->mpa_skb = NULL;
@@ -2976,7 +3096,7 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
int abort;
struct c4iw_ep *ep = to_ep(cm_id);
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
if (ep->com.state != MPA_REQ_RCVD) {
@@ -3007,7 +3127,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
int abort = 0;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
if (ep->com.state != MPA_REQ_RCVD) {
@@ -3015,7 +3135,10 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto err_out;
}
- BUG_ON(!qp);
+ if (!qp) {
+ err = -EINVAL;
+ goto err_out;
+ }
set_bit(ULP_ACCEPT, &ep->com.history);
if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
@@ -3060,7 +3183,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->ird = 1;
}
- pr_debug("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
+ pr_debug("ird %d ord %d\n", ep->ird, ep->ord);
ep->com.cm_id = cm_id;
ref_cm_id(&ep->com);
@@ -3115,17 +3238,22 @@ static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
int found = 0;
struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
+ const struct in_ifaddr *ifa;
ind = in_dev_get(dev->rdev.lldi.ports[0]);
if (!ind)
return -EADDRNOTAVAIL;
- for_primary_ifa(ind) {
+ rcu_read_lock();
+ in_dev_for_each_ifa_rcu(ifa, ind) {
+ if (ifa->ifa_flags & IFA_F_SECONDARY)
+ continue;
laddr->sin_addr.s_addr = ifa->ifa_address;
raddr->sin_addr.s_addr = ifa->ifa_address;
found = 1;
break;
}
- endfor_ifa(ind);
+ rcu_read_unlock();
+
in_dev_put(ind);
return found ? 0 : -EADDRNOTAVAIL;
}
@@ -3158,7 +3286,7 @@ static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
{
- struct in6_addr uninitialized_var(addr);
+ struct in6_addr addr;
struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
@@ -3200,7 +3328,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto fail1;
}
- init_timer(&ep->timer);
+ timer_setup(&ep->timer, ep_timeout, 0);
ep->plen = conn_param->private_data_len;
if (ep->plen)
memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
@@ -3213,15 +3341,16 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->com.cm_id = cm_id;
ref_cm_id(&ep->com);
+ cm_id->provider_data = ep;
ep->com.dev = dev;
ep->com.qp = get_qhp(dev, conn_param->qpn);
if (!ep->com.qp) {
- pr_debug("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
+ pr_warn("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
err = -EINVAL;
goto fail2;
}
ref_qp(ep);
- pr_debug("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
+ pr_debug("qpn 0x%x qp %p cm_id %p\n", conn_param->qpn,
ep->com.qp, cm_id);
/*
@@ -3233,7 +3362,9 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err = -ENOMEM;
goto fail2;
}
- insert_handle(dev, &dev->atid_idr, ep, ep->atid);
+ err = xa_insert_irq(&dev->atids, ep->atid, ep, GFP_KERNEL);
+ if (err)
+ goto fail5;
memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
sizeof(ep->com.local_addr));
@@ -3255,12 +3386,12 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
err = pick_local_ipaddrs(dev, cm_id);
if (err)
- goto fail2;
+ goto fail3;
}
/* find a route */
- pr_debug("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
- __func__, &laddr->sin_addr, ntohs(laddr->sin_port),
+ pr_debug("saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
+ &laddr->sin_addr, ntohs(laddr->sin_port),
ra, ntohs(raddr->sin_port));
ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
laddr->sin_addr.s_addr,
@@ -3277,19 +3408,19 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
err = pick_local_ip6addrs(dev, cm_id);
if (err)
- goto fail2;
+ goto fail3;
}
/* find a route */
- pr_debug("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
- __func__, laddr6->sin6_addr.s6_addr,
+ pr_debug("saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
+ laddr6->sin6_addr.s6_addr,
ntohs(laddr6->sin6_port),
raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
laddr6->sin6_addr.s6_addr,
raddr6->sin6_addr.s6_addr,
laddr6->sin6_port,
- raddr6->sin6_port, 0,
+ raddr6->sin6_port, cm_id->tos,
raddr6->sin6_scope_id);
}
if (!ep->dst) {
@@ -3305,8 +3436,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto fail4;
}
- pr_debug("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
- __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
+ pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
+ ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
ep->l2t->idx);
state_set(&ep->com, CONNECTING);
@@ -3321,7 +3452,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
fail4:
dst_release(ep->dst);
fail3:
- remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
+ xa_erase_irq(&ep->com.dev->atids, ep->atid);
+fail5:
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
fail2:
skb_queue_purge(&ep->com.ep_skb_list);
@@ -3344,14 +3476,14 @@ static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
if (err)
return err;
}
- c4iw_init_wr_wait(&ep->com.wr_wait);
+ c4iw_init_wr_wait(ep->com.wr_waitp);
err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
ep->stid, &sin6->sin6_addr,
sin6->sin6_port,
ep->com.dev->rdev.lldi.rxq_ids[0]);
if (!err)
err = c4iw_wait_for_reply(&ep->com.dev->rdev,
- &ep->com.wr_wait,
+ ep->com.wr_waitp,
0, 0, __func__);
else if (err > 0)
err = net_xmit_errno(err);
@@ -3387,13 +3519,13 @@ static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
}
} while (err == -EBUSY);
} else {
- c4iw_init_wr_wait(&ep->com.wr_wait);
+ c4iw_init_wr_wait(ep->com.wr_waitp);
err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
ep->stid, sin->sin_addr.s_addr, sin->sin_port,
0, ep->com.dev->rdev.lldi.rxq_ids[0]);
if (!err)
err = c4iw_wait_for_reply(&ep->com.dev->rdev,
- &ep->com.wr_wait,
+ ep->com.wr_waitp,
0, 0, __func__);
else if (err > 0)
err = net_xmit_errno(err);
@@ -3420,7 +3552,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
goto fail1;
}
skb_queue_head_init(&ep->com.ep_skb_list);
- pr_debug("%s ep %p\n", __func__, ep);
+ pr_debug("ep %p\n", ep);
ep->com.cm_id = cm_id;
ref_cm_id(&ep->com);
ep->com.dev = dev;
@@ -3444,10 +3576,9 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
err = -ENOMEM;
goto fail2;
}
- insert_handle(dev, &dev->stid_idr, ep, ep->stid);
-
- memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
- sizeof(ep->com.local_addr));
+ err = xa_insert_irq(&dev->stids, ep->stid, ep, GFP_KERNEL);
+ if (err)
+ goto fail3;
state_set(&ep->com, LISTEN);
if (ep->com.local_addr.ss_family == AF_INET)
@@ -3458,7 +3589,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->provider_data = ep;
goto out;
}
-
+ xa_erase_irq(&ep->com.dev->stids, ep->stid);
+fail3:
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
ep->com.local_addr.ss_family);
fail2:
@@ -3474,7 +3606,7 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
int err;
struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
- pr_debug("%s ep %p\n", __func__, ep);
+ pr_debug("ep %p\n", ep);
might_sleep();
state_set(&ep->com, DEAD);
@@ -3482,22 +3614,23 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
ep->com.local_addr.ss_family == AF_INET) {
err = cxgb4_remove_server_filter(
ep->com.dev->rdev.lldi.ports[0], ep->stid,
- ep->com.dev->rdev.lldi.rxq_ids[0], 0);
+ ep->com.dev->rdev.lldi.rxq_ids[0], false);
} else {
struct sockaddr_in6 *sin6;
- c4iw_init_wr_wait(&ep->com.wr_wait);
+ c4iw_init_wr_wait(ep->com.wr_waitp);
err = cxgb4_remove_server(
ep->com.dev->rdev.lldi.ports[0], ep->stid,
- ep->com.dev->rdev.lldi.rxq_ids[0], 0);
+ ep->com.dev->rdev.lldi.rxq_ids[0],
+ ep->com.local_addr.ss_family == AF_INET6);
if (err)
goto done;
- err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
+ err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
0, 0, __func__);
sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
- remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
+ xa_erase_irq(&ep->com.dev->stids, ep->stid);
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
ep->com.local_addr.ss_family);
done:
@@ -3515,7 +3648,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
mutex_lock(&ep->com.mutex);
- pr_debug("%s ep %p state %s, abrupt %d\n", __func__, ep,
+ pr_debug("ep %p state %s, abrupt %d\n", ep,
states[ep->com.state], abrupt);
/*
@@ -3569,18 +3702,17 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
case MORIBUND:
case ABORTING:
case DEAD:
- pr_debug("%s ignoring disconnect ep %p state %u\n",
- __func__, ep, ep->com.state);
+ pr_debug("ignoring disconnect ep %p state %u\n",
+ ep, ep->com.state);
break;
default:
- BUG();
+ WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
break;
}
if (close) {
if (abrupt) {
set_bit(EP_DISC_ABORT, &ep->com.history);
- close_complete_upcall(ep, -ECONNRESET);
ret = send_abort(ep);
} else {
set_bit(EP_DISC_CLOSE, &ep->com.history);
@@ -3632,6 +3764,7 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
send_fw_act_open_req(ep, atid);
return;
}
+ fallthrough;
case FW_EADDRINUSE:
set_bit(ACT_RETRY_INUSE, &ep->com.history);
if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
@@ -3657,7 +3790,7 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
- remove_handle(dev, &dev->atid_idr, atid);
+ xa_erase_irq(&dev->atids, atid);
cxgb4_free_atid(dev->rdev.lldi.tids, atid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
@@ -3672,9 +3805,8 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
int ret;
rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
- BUG_ON(!rpl_skb);
if (req->retval) {
- pr_debug("%s passive open failure %d\n", __func__, req->retval);
+ pr_err("%s passive open failure %d\n", __func__, req->retval);
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.pas_ofld_conn_fails++;
mutex_unlock(&dev->rdev.stats.lock);
@@ -3691,6 +3823,80 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
return;
}
+static inline u64 t4_tcb_get_field64(__be64 *tcb, u16 word)
+{
+ u64 tlo = be64_to_cpu(tcb[((31 - word) / 2)]);
+ u64 thi = be64_to_cpu(tcb[((31 - word) / 2) - 1]);
+ u64 t;
+ u32 shift = 32;
+
+ t = (thi << shift) | (tlo >> shift);
+
+ return t;
+}
+
+static inline u32 t4_tcb_get_field32(__be64 *tcb, u16 word, u32 mask, u32 shift)
+{
+ u32 v;
+ u64 t = be64_to_cpu(tcb[(31 - word) / 2]);
+
+ if (word & 0x1)
+ shift += 32;
+ v = (t >> shift) & mask;
+ return v;
+}
+
+static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_get_tcb_rpl *rpl = cplhdr(skb);
+ __be64 *tcb = (__be64 *)(rpl + 1);
+ unsigned int tid = GET_TID(rpl);
+ struct c4iw_ep *ep;
+ u64 t_flags_64;
+ u32 rx_pdu_out;
+
+ ep = get_ep_from_tid(dev, tid);
+ if (!ep)
+ return 0;
+ /* Examine the TF_RX_PDU_OUT (bit 49 of the t_flags) in order to
+ * determine if there's a rx PDU feedback event pending.
+ *
+ * If that bit is set, it means we'll need to re-read the TCB's
+ * rq_start value. The final value is the one present in a TCB
+ * with the TF_RX_PDU_OUT bit cleared.
+ */
+
+ t_flags_64 = t4_tcb_get_field64(tcb, TCB_T_FLAGS_W);
+ rx_pdu_out = (t_flags_64 & TF_RX_PDU_OUT_V(1)) >> TF_RX_PDU_OUT_S;
+
+ c4iw_put_ep(&ep->com); /* from get_ep_from_tid() */
+ c4iw_put_ep(&ep->com); /* from read_tcb() */
+
+ /* If TF_RX_PDU_OUT bit is set, re-read the TCB */
+ if (rx_pdu_out) {
+ if (++ep->rx_pdu_out_cnt >= 2) {
+ WARN_ONCE(1, "tcb re-read() reached the guard limit, finishing the cleanup\n");
+ goto cleanup;
+ }
+ read_tcb(ep);
+ return 0;
+ }
+
+ ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M,
+ TCB_RQ_START_S);
+cleanup:
+ pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx);
+
+ if (test_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags))
+ finish_peer_abort(dev, ep);
+ else if (test_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags))
+ send_abort_req(ep);
+ else
+ WARN_ONCE(1, "unexpected state!");
+
+ return 0;
+}
+
static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_fw6_msg *rpl = cplhdr(skb);
@@ -3870,7 +4076,6 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
struct net_device *pdev;
u16 rss_qid, eth_hdr_len;
int step;
- u32 tx_chan;
struct neighbour *neigh;
/* Drop all non-SYN packets */
@@ -3891,8 +4096,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
if (!lep) {
- pr_debug("%s connect request on invalid stid %d\n",
- __func__, stid);
+ pr_warn("%s connect request on invalid stid %d\n",
+ __func__, stid);
goto reject;
}
@@ -3918,7 +4123,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
} else {
vlan_eh = (struct vlan_ethhdr *)(req + 1);
iph = (struct iphdr *)(vlan_eh + 1);
- skb->vlan_tci = ntohs(cpl->vlan);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
}
if (iph->version != 0x4)
@@ -3929,7 +4134,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
skb_set_transport_header(skb, (void *)tcph - (void *)rss);
skb_get(skb);
- pr_debug("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
+ pr_debug("lip 0x%x lport %u pip 0x%x pport %u tos %d\n",
ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
ntohs(tcph->source), iph->tos);
@@ -3937,31 +4142,31 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
iph->daddr, iph->saddr, tcph->dest,
tcph->source, iph->tos);
if (!dst) {
- pr_err("%s - failed to find dst entry!\n",
- __func__);
+ pr_err("%s - failed to find dst entry!\n", __func__);
goto reject;
}
neigh = dst_neigh_lookup_skb(dst, skb);
if (!neigh) {
- pr_err("%s - failed to allocate neigh!\n",
- __func__);
+ pr_err("%s - failed to allocate neigh!\n", __func__);
goto free_dst;
}
if (neigh->dev->flags & IFF_LOOPBACK) {
pdev = ip_dev_find(&init_net, iph->daddr);
+ if (!pdev) {
+ pr_err("%s - failed to find device!\n", __func__);
+ goto free_dst;
+ }
e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
pdev, 0);
pi = (struct port_info *)netdev_priv(pdev);
- tx_chan = cxgb4_port_chan(pdev);
dev_put(pdev);
} else {
pdev = get_real_dev(neigh->dev);
e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
pdev, 0);
pi = (struct port_info *)netdev_priv(pdev);
- tx_chan = cxgb4_port_chan(pdev);
}
neigh_release(neigh);
if (!e) {
@@ -4016,6 +4221,7 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = {
[CPL_CLOSE_CON_RPL] = close_con_rpl,
[CPL_RDMA_TERMINATE] = terminate,
[CPL_FW4_ACK] = fw4_ack,
+ [CPL_GET_TCB_RPL] = read_tcb_rpl,
[CPL_FW6_MSG] = deferred_fw6_msg,
[CPL_RX_PKT] = rx_pkt,
[FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe,
@@ -4028,8 +4234,7 @@ static void process_timeout(struct c4iw_ep *ep)
int abort = 1;
mutex_lock(&ep->com.mutex);
- pr_debug("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
- ep->com.state);
+ pr_debug("ep %p tid %u state %d\n", ep, ep->hwtid, ep->com.state);
set_bit(TIMEDOUT, &ep->com.history);
switch (ep->com.state) {
case MPA_REQ_SENT:
@@ -4105,19 +4310,24 @@ static void process_work(struct work_struct *work)
dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
opcode = rpl->ot.opcode;
- BUG_ON(!work_handlers[opcode]);
- ret = work_handlers[opcode](dev, skb);
- if (!ret)
+ if (opcode >= ARRAY_SIZE(work_handlers) ||
+ !work_handlers[opcode]) {
+ pr_err("No handler for opcode 0x%x.\n", opcode);
kfree_skb(skb);
+ } else {
+ ret = work_handlers[opcode](dev, skb);
+ if (!ret)
+ kfree_skb(skb);
+ }
process_timedout_eps();
}
}
static DECLARE_WORK(skb_work, process_work);
-static void ep_timeout(unsigned long arg)
+static void ep_timeout(struct timer_list *t)
{
- struct c4iw_ep *ep = (struct c4iw_ep *)arg;
+ struct c4iw_ep *ep = timer_container_of(ep, t, timer);
int kickit = 0;
spin_lock(&timeout_lock);
@@ -4172,15 +4382,15 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_wr_wait *wr_waitp;
int ret;
- pr_debug("%s type %u\n", __func__, rpl->type);
+ pr_debug("type %u\n", rpl->type);
switch (rpl->type) {
case FW6_TYPE_WR_RPL:
ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
- pr_debug("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
+ pr_debug("wr_waitp %p ret %u\n", wr_waitp, ret);
if (wr_waitp)
- c4iw_wake_up(wr_waitp, ret ? -ret : 0);
+ c4iw_wake_up_deref(wr_waitp, ret ? -ret : 0);
kfree_skb(skb);
break;
case FW6_TYPE_CQE:
@@ -4210,15 +4420,14 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
if (cxgb_is_neg_adv(req->status)) {
- pr_debug("%s Negative advice on abort- tid %u status %d (%s)\n",
- __func__, ep->hwtid, req->status,
+ pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
+ ep->hwtid, req->status,
neg_adv_str(req->status));
goto out;
}
- pr_debug("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
- ep->com.state);
+ pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, ep->com.state);
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
out:
sched(dev, skb);
return 0;
@@ -4244,13 +4453,13 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
[CPL_RDMA_TERMINATE] = sched,
[CPL_FW4_ACK] = sched,
[CPL_SET_TCB_RPL] = set_tcb_rpl,
+ [CPL_GET_TCB_RPL] = sched,
[CPL_FW6_MSG] = fw6_msg,
[CPL_RX_PKT] = sched
};
int __init c4iw_cm_init(void)
{
- spin_lock_init(&timeout_lock);
skb_queue_head_init(&rxq);
workq = alloc_ordered_workqueue("iw_cxgb4", WQ_MEM_RECLAIM);
@@ -4263,6 +4472,5 @@ int __init c4iw_cm_init(void)
void c4iw_cm_term(void)
{
WARN_ON(!list_empty(&timeout_list));
- flush_workqueue(workq);
destroy_workqueue(workq);
}