1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
7 #include <linux/skbuff.h>
8 #include <crypto/hash.h>
12 #include "rxe_queue.h"
14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
17 static inline void retry_first_write_send(struct rxe_qp *qp,
18 struct rxe_send_wqe *wqe, int npsn)
22 for (i = 0; i < npsn; i++) {
23 int to_send = (wqe->dma.resid > qp->mtu) ?
24 qp->mtu : wqe->dma.resid;
26 qp->req.opcode = next_opcode(qp, wqe,
29 if (wqe->wr.send_flags & IB_SEND_INLINE) {
30 wqe->dma.resid -= to_send;
31 wqe->dma.sge_offset += to_send;
33 advance_dma_data(&wqe->dma, to_send);
38 static void req_retry(struct rxe_qp *qp)
40 struct rxe_send_wqe *wqe;
41 unsigned int wqe_index;
45 struct rxe_queue *q = qp->sq.queue;
49 cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
50 prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
52 qp->req.wqe_index = cons;
53 qp->req.psn = qp->comp.psn;
56 for (wqe_index = cons; wqe_index != prod;
57 wqe_index = queue_next_index(q, wqe_index)) {
58 wqe = queue_addr_from_index(qp->sq.queue, wqe_index);
59 mask = wr_opcode_mask(wqe->wr.opcode, qp);
61 if (wqe->state == wqe_state_posted)
64 if (wqe->state == wqe_state_done)
67 wqe->iova = (mask & WR_ATOMIC_MASK) ?
68 wqe->wr.wr.atomic.remote_addr :
69 (mask & WR_READ_OR_WRITE_MASK) ?
70 wqe->wr.wr.rdma.remote_addr :
73 if (!first || (mask & WR_READ_MASK) == 0) {
74 wqe->dma.resid = wqe->dma.length;
76 wqe->dma.sge_offset = 0;
82 if (mask & WR_WRITE_OR_SEND_MASK) {
83 npsn = (qp->comp.psn - wqe->first_psn) &
85 retry_first_write_send(qp, wqe, npsn);
88 if (mask & WR_READ_MASK) {
89 npsn = (wqe->dma.length - wqe->dma.resid) /
91 wqe->iova += npsn * qp->mtu;
95 wqe->state = wqe_state_posted;
99 void rnr_nak_timer(struct timer_list *t)
101 struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
103 rxe_dbg_qp(qp, "nak timer fired\n");
105 /* request a send queue retry */
106 qp->req.need_retry = 1;
107 qp->req.wait_for_rnr_timer = 0;
108 rxe_sched_task(&qp->req.task);
111 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
113 struct rxe_send_wqe *wqe;
114 struct rxe_queue *q = qp->sq.queue;
115 unsigned int index = qp->req.wqe_index;
119 wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
120 cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
121 prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
123 if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
124 /* check to see if we are drained;
125 * state_lock used by requester and completer
127 spin_lock_bh(&qp->state_lock);
129 if (qp->req.state != QP_STATE_DRAIN) {
130 /* comp just finished */
131 spin_unlock_bh(&qp->state_lock);
135 if (wqe && ((index != cons) ||
136 (wqe->state != wqe_state_posted))) {
137 /* comp not done yet */
138 spin_unlock_bh(&qp->state_lock);
142 qp->req.state = QP_STATE_DRAINED;
143 spin_unlock_bh(&qp->state_lock);
145 if (qp->ibqp.event_handler) {
148 ev.device = qp->ibqp.device;
149 ev.element.qp = &qp->ibqp;
150 ev.event = IB_EVENT_SQ_DRAINED;
151 qp->ibqp.event_handler(&ev,
152 qp->ibqp.qp_context);
160 wqe = queue_addr_from_index(q, index);
162 if (unlikely((qp->req.state == QP_STATE_DRAIN ||
163 qp->req.state == QP_STATE_DRAINED) &&
164 (wqe->state != wqe_state_processing)))
167 wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
172 * rxe_wqe_is_fenced - check if next wqe is fenced
173 * @qp: the queue pair
176 * Returns: 1 if wqe needs to wait
177 * 0 if wqe is ready to go
179 static int rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
181 /* Local invalidate fence (LIF) see IBA 10.6.5.1
182 * Requires ALL previous operations on the send queue
183 * are complete. Make mandatory for the rxe driver.
185 if (wqe->wr.opcode == IB_WR_LOCAL_INV)
186 return qp->req.wqe_index != queue_get_consumer(qp->sq.queue,
187 QUEUE_TYPE_FROM_CLIENT);
189 /* Fence see IBA 10.8.3.3
190 * Requires that all previous read and atomic operations
193 return (wqe->wr.send_flags & IB_SEND_FENCE) &&
194 atomic_read(&qp->req.rd_atomic) != qp->attr.max_rd_atomic;
197 static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
200 case IB_WR_RDMA_WRITE:
201 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
202 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
204 IB_OPCODE_RC_RDMA_WRITE_LAST :
205 IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
208 IB_OPCODE_RC_RDMA_WRITE_ONLY :
209 IB_OPCODE_RC_RDMA_WRITE_FIRST;
211 case IB_WR_RDMA_WRITE_WITH_IMM:
212 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
213 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
215 IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
216 IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
219 IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
220 IB_OPCODE_RC_RDMA_WRITE_FIRST;
223 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
224 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
226 IB_OPCODE_RC_SEND_LAST :
227 IB_OPCODE_RC_SEND_MIDDLE;
230 IB_OPCODE_RC_SEND_ONLY :
231 IB_OPCODE_RC_SEND_FIRST;
233 case IB_WR_SEND_WITH_IMM:
234 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
235 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
237 IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
238 IB_OPCODE_RC_SEND_MIDDLE;
241 IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
242 IB_OPCODE_RC_SEND_FIRST;
244 case IB_WR_RDMA_READ:
245 return IB_OPCODE_RC_RDMA_READ_REQUEST;
247 case IB_WR_ATOMIC_CMP_AND_SWP:
248 return IB_OPCODE_RC_COMPARE_SWAP;
250 case IB_WR_ATOMIC_FETCH_AND_ADD:
251 return IB_OPCODE_RC_FETCH_ADD;
253 case IB_WR_SEND_WITH_INV:
254 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
255 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
256 return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
257 IB_OPCODE_RC_SEND_MIDDLE;
259 return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
260 IB_OPCODE_RC_SEND_FIRST;
262 case IB_WR_ATOMIC_WRITE:
263 return IB_OPCODE_RC_ATOMIC_WRITE;
266 case IB_WR_LOCAL_INV:
273 static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
276 case IB_WR_RDMA_WRITE:
277 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
278 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
280 IB_OPCODE_UC_RDMA_WRITE_LAST :
281 IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
284 IB_OPCODE_UC_RDMA_WRITE_ONLY :
285 IB_OPCODE_UC_RDMA_WRITE_FIRST;
287 case IB_WR_RDMA_WRITE_WITH_IMM:
288 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
289 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
291 IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
292 IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
295 IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
296 IB_OPCODE_UC_RDMA_WRITE_FIRST;
299 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
300 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
302 IB_OPCODE_UC_SEND_LAST :
303 IB_OPCODE_UC_SEND_MIDDLE;
306 IB_OPCODE_UC_SEND_ONLY :
307 IB_OPCODE_UC_SEND_FIRST;
309 case IB_WR_SEND_WITH_IMM:
310 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
311 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
313 IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
314 IB_OPCODE_UC_SEND_MIDDLE;
317 IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
318 IB_OPCODE_UC_SEND_FIRST;
324 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
327 int fits = (wqe->dma.resid <= qp->mtu);
329 switch (qp_type(qp)) {
331 return next_opcode_rc(qp, opcode, fits);
334 return next_opcode_uc(qp, opcode, fits);
340 return IB_OPCODE_UD_SEND_ONLY;
342 case IB_WR_SEND_WITH_IMM:
343 return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
354 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
358 if (wqe->has_rd_atomic)
361 qp->req.need_rd_atomic = 1;
362 depth = atomic_dec_return(&qp->req.rd_atomic);
365 qp->req.need_rd_atomic = 0;
366 wqe->has_rd_atomic = 1;
370 atomic_inc(&qp->req.rd_atomic);
374 static inline int get_mtu(struct rxe_qp *qp)
376 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
378 if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
381 return rxe->port.mtu_cap;
384 static struct sk_buff *init_req_packet(struct rxe_qp *qp,
386 struct rxe_send_wqe *wqe,
387 int opcode, u32 payload,
388 struct rxe_pkt_info *pkt)
390 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
392 struct rxe_send_wr *ibwr = &wqe->wr;
393 int pad = (-payload) & 0x3;
399 /* length from start of bth to end of icrc */
400 paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
401 pkt->paylen = paylen;
404 skb = rxe_init_packet(rxe, av, paylen, pkt);
409 solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
410 (pkt->mask & RXE_END_MASK) &&
411 ((pkt->mask & (RXE_SEND_MASK)) ||
412 (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
413 (RXE_WRITE_MASK | RXE_IMMDT_MASK));
415 qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
416 qp->attr.dest_qp_num;
418 ack_req = ((pkt->mask & RXE_END_MASK) ||
419 (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
421 qp->req.noack_pkts = 0;
423 bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num,
426 /* init optional headers */
427 if (pkt->mask & RXE_RETH_MASK) {
428 reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
429 reth_set_va(pkt, wqe->iova);
430 reth_set_len(pkt, wqe->dma.resid);
433 if (pkt->mask & RXE_IMMDT_MASK)
434 immdt_set_imm(pkt, ibwr->ex.imm_data);
436 if (pkt->mask & RXE_IETH_MASK)
437 ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
439 if (pkt->mask & RXE_ATMETH_MASK) {
440 atmeth_set_va(pkt, wqe->iova);
441 if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
442 atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
443 atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
445 atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
447 atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
450 if (pkt->mask & RXE_DETH_MASK) {
451 if (qp->ibqp.qp_num == 1)
452 deth_set_qkey(pkt, GSI_QKEY);
454 deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
455 deth_set_sqp(pkt, qp->ibqp.qp_num);
461 static int finish_packet(struct rxe_qp *qp, struct rxe_av *av,
462 struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt,
463 struct sk_buff *skb, u32 payload)
467 err = rxe_prepare(av, pkt, skb);
471 if (pkt->mask & RXE_WRITE_OR_SEND_MASK) {
472 if (wqe->wr.send_flags & IB_SEND_INLINE) {
473 u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
475 memcpy(payload_addr(pkt), tmp, payload);
477 wqe->dma.resid -= payload;
478 wqe->dma.sge_offset += payload;
480 err = copy_data(qp->pd, 0, &wqe->dma,
481 payload_addr(pkt), payload,
487 u8 *pad = payload_addr(pkt) + payload;
489 memset(pad, 0, bth_pad(pkt));
493 if (pkt->mask & RXE_ATOMIC_WRITE_MASK) {
494 memcpy(payload_addr(pkt), wqe->dma.atomic_wr, payload);
495 wqe->dma.resid -= payload;
501 static void update_wqe_state(struct rxe_qp *qp,
502 struct rxe_send_wqe *wqe,
503 struct rxe_pkt_info *pkt)
505 if (pkt->mask & RXE_END_MASK) {
506 if (qp_type(qp) == IB_QPT_RC)
507 wqe->state = wqe_state_pending;
509 wqe->state = wqe_state_processing;
513 static void update_wqe_psn(struct rxe_qp *qp,
514 struct rxe_send_wqe *wqe,
515 struct rxe_pkt_info *pkt,
518 /* number of packets left to send including current one */
519 int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
521 /* handle zero length packet case */
525 if (pkt->mask & RXE_START_MASK) {
526 wqe->first_psn = qp->req.psn;
527 wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
530 if (pkt->mask & RXE_READ_MASK)
531 qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
533 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
536 static void save_state(struct rxe_send_wqe *wqe,
538 struct rxe_send_wqe *rollback_wqe,
541 rollback_wqe->state = wqe->state;
542 rollback_wqe->first_psn = wqe->first_psn;
543 rollback_wqe->last_psn = wqe->last_psn;
544 *rollback_psn = qp->req.psn;
547 static void rollback_state(struct rxe_send_wqe *wqe,
549 struct rxe_send_wqe *rollback_wqe,
552 wqe->state = rollback_wqe->state;
553 wqe->first_psn = rollback_wqe->first_psn;
554 wqe->last_psn = rollback_wqe->last_psn;
555 qp->req.psn = rollback_psn;
558 static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
560 qp->req.opcode = pkt->opcode;
562 if (pkt->mask & RXE_END_MASK)
563 qp->req.wqe_index = queue_next_index(qp->sq.queue,
566 qp->need_req_skb = 0;
568 if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
569 mod_timer(&qp->retrans_timer,
570 jiffies + qp->qp_timeout_jiffies);
573 static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
575 u8 opcode = wqe->wr.opcode;
580 case IB_WR_LOCAL_INV:
581 rkey = wqe->wr.ex.invalidate_rkey;
582 if (rkey_is_mw(rkey))
583 ret = rxe_invalidate_mw(qp, rkey);
585 ret = rxe_invalidate_mr(qp, rkey);
588 wqe->status = IB_WC_LOC_QP_OP_ERR;
593 ret = rxe_reg_fast_mr(qp, wqe);
595 wqe->status = IB_WC_LOC_QP_OP_ERR;
600 ret = rxe_bind_mw(qp, wqe);
602 wqe->status = IB_WC_MW_BIND_ERR;
607 rxe_dbg_qp(qp, "Unexpected send wqe opcode %d\n", opcode);
608 wqe->status = IB_WC_LOC_QP_OP_ERR;
612 wqe->state = wqe_state_done;
613 wqe->status = IB_WC_SUCCESS;
614 qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
616 /* There is no ack coming for local work requests
617 * which can lead to a deadlock. So go ahead and complete
620 rxe_sched_task(&qp->comp.task);
625 int rxe_requester(void *arg)
627 struct rxe_qp *qp = (struct rxe_qp *)arg;
628 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
629 struct rxe_pkt_info pkt;
631 struct rxe_send_wqe *wqe;
632 enum rxe_hdr_mask mask;
638 struct rxe_send_wqe rollback_wqe;
640 struct rxe_queue *q = qp->sq.queue;
647 if (unlikely(!qp->valid))
650 if (unlikely(qp->req.state == QP_STATE_ERROR)) {
651 wqe = req_next_wqe(qp);
654 * Generate an error completion for error qp state
661 if (unlikely(qp->req.state == QP_STATE_RESET)) {
662 qp->req.wqe_index = queue_get_consumer(q,
663 QUEUE_TYPE_FROM_CLIENT);
665 qp->req.need_rd_atomic = 0;
666 qp->req.wait_psn = 0;
667 qp->req.need_retry = 0;
668 qp->req.wait_for_rnr_timer = 0;
672 /* we come here if the retransmit timer has fired
673 * or if the rnr timer has fired. If the retransmit
674 * timer fires while we are processing an RNR NAK wait
675 * until the rnr timer has fired before starting the
678 if (unlikely(qp->req.need_retry && !qp->req.wait_for_rnr_timer)) {
680 qp->req.need_retry = 0;
683 wqe = req_next_wqe(qp);
687 if (rxe_wqe_is_fenced(qp, wqe)) {
688 qp->req.wait_fence = 1;
692 if (wqe->mask & WR_LOCAL_OP_MASK) {
693 err = rxe_do_local_ops(qp, wqe);
700 if (unlikely(qp_type(qp) == IB_QPT_RC &&
701 psn_compare(qp->req.psn, (qp->comp.psn +
702 RXE_MAX_UNACKED_PSNS)) > 0)) {
703 qp->req.wait_psn = 1;
707 /* Limit the number of inflight SKBs per QP */
708 if (unlikely(atomic_read(&qp->skb_out) >
709 RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
710 qp->need_req_skb = 1;
714 opcode = next_opcode(qp, wqe, wqe->wr.opcode);
715 if (unlikely(opcode < 0)) {
716 wqe->status = IB_WC_LOC_QP_OP_ERR;
720 mask = rxe_opcode[opcode].mask;
721 if (unlikely(mask & (RXE_READ_OR_ATOMIC_MASK |
722 RXE_ATOMIC_WRITE_MASK))) {
723 if (check_init_depth(qp, wqe))
728 payload = (mask & (RXE_WRITE_OR_SEND_MASK | RXE_ATOMIC_WRITE_MASK)) ?
731 if (qp_type(qp) == IB_QPT_UD) {
732 /* C10-93.1.1: If the total sum of all the buffer lengths specified for a
733 * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
734 * shall not emit any packets for this message. Further, the CI shall not
735 * generate an error due to this condition.
738 /* fake a successful UD send */
739 wqe->first_psn = qp->req.psn;
740 wqe->last_psn = qp->req.psn;
741 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
742 qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
743 qp->req.wqe_index = queue_next_index(qp->sq.queue,
745 wqe->state = wqe_state_done;
746 wqe->status = IB_WC_SUCCESS;
747 rxe_run_task(&qp->comp.task);
756 pkt.psn = qp->req.psn;
757 pkt.mask = rxe_opcode[opcode].mask;
760 av = rxe_get_av(&pkt, &ah);
762 rxe_dbg_qp(qp, "Failed no address vector\n");
763 wqe->status = IB_WC_LOC_QP_OP_ERR;
767 skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt);
768 if (unlikely(!skb)) {
769 rxe_dbg_qp(qp, "Failed allocating skb\n");
770 wqe->status = IB_WC_LOC_QP_OP_ERR;
776 err = finish_packet(qp, av, wqe, &pkt, skb, payload);
778 rxe_dbg_qp(qp, "Error during finish packet\n");
780 wqe->status = IB_WC_LOC_PROT_ERR;
782 wqe->status = IB_WC_LOC_QP_OP_ERR;
793 * To prevent a race on wqe access between requester and completer,
794 * wqe members state and psn need to be set before calling
796 * Otherwise, completer might initiate an unjustified retry flow.
798 save_state(wqe, qp, &rollback_wqe, &rollback_psn);
799 update_wqe_state(qp, wqe, &pkt);
800 update_wqe_psn(qp, wqe, &pkt, payload);
802 err = rxe_xmit_packet(qp, &pkt, skb);
804 qp->need_req_skb = 1;
806 rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
808 if (err == -EAGAIN) {
809 rxe_sched_task(&qp->req.task);
813 wqe->status = IB_WC_LOC_QP_OP_ERR;
817 update_state(qp, &pkt);
819 /* A non-zero return value will cause rxe_do_task to
820 * exit its loop and end the tasklet. A zero return
821 * will continue looping and return to rxe_requester
827 /* update wqe_index for each wqe completion */
828 qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
829 wqe->state = wqe_state_error;
830 qp->req.state = QP_STATE_ERROR;
831 rxe_run_task(&qp->comp.task);