1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
7 #include <linux/skbuff.h>
8 #include <linux/delay.h>
9 #include <linux/sched.h>
10 #include <linux/vmalloc.h>
11 #include <rdma/uverbs_ioctl.h>
15 #include "rxe_queue.h"
18 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
21 if (cap->max_send_wr > rxe->attr.max_qp_wr) {
22 pr_warn("invalid send wr = %u > %d\n",
23 cap->max_send_wr, rxe->attr.max_qp_wr);
27 if (cap->max_send_sge > rxe->attr.max_send_sge) {
28 pr_warn("invalid send sge = %u > %d\n",
29 cap->max_send_sge, rxe->attr.max_send_sge);
34 if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
35 pr_warn("invalid recv wr = %u > %d\n",
36 cap->max_recv_wr, rxe->attr.max_qp_wr);
40 if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
41 pr_warn("invalid recv sge = %u > %d\n",
42 cap->max_recv_sge, rxe->attr.max_recv_sge);
47 if (cap->max_inline_data > rxe->max_inline_data) {
48 pr_warn("invalid max inline data = %u > %d\n",
49 cap->max_inline_data, rxe->max_inline_data);
59 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
61 struct ib_qp_cap *cap = &init->cap;
62 struct rxe_port *port;
63 int port_num = init->port_num;
65 switch (init->qp_type) {
75 if (!init->recv_cq || !init->send_cq) {
76 pr_warn("missing cq\n");
80 if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
83 if (init->qp_type == IB_QPT_GSI) {
84 if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
85 pr_warn("invalid port = %d\n", port_num);
91 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
92 pr_warn("GSI QP exists for port %d\n", port_num);
103 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
105 qp->resp.res_head = 0;
106 qp->resp.res_tail = 0;
107 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
109 if (!qp->resp.resources)
115 static void free_rd_atomic_resources(struct rxe_qp *qp)
117 if (qp->resp.resources) {
120 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
121 struct resp_res *res = &qp->resp.resources[i];
123 free_rd_atomic_resource(res);
125 kfree(qp->resp.resources);
126 qp->resp.resources = NULL;
130 void free_rd_atomic_resource(struct resp_res *res)
135 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
138 struct resp_res *res;
140 if (qp->resp.resources) {
141 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
142 res = &qp->resp.resources[i];
143 free_rd_atomic_resource(res);
148 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
149 struct ib_qp_init_attr *init)
151 struct rxe_port *port;
154 qp->sq_sig_type = init->sq_sig_type;
155 qp->attr.path_mtu = 1;
156 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
158 qpn = qp->elem.index;
161 switch (init->qp_type) {
164 port->qp_gsi_index = qpn;
165 qp->attr.port_num = init->port_num;
169 qp->ibqp.qp_num = qpn;
173 spin_lock_init(&qp->state_lock);
175 spin_lock_init(&qp->req.task.state_lock);
176 spin_lock_init(&qp->resp.task.state_lock);
177 spin_lock_init(&qp->comp.task.state_lock);
179 spin_lock_init(&qp->sq.sq_lock);
180 spin_lock_init(&qp->rq.producer_lock);
181 spin_lock_init(&qp->rq.consumer_lock);
183 atomic_set(&qp->ssn, 0);
184 atomic_set(&qp->skb_out, 0);
187 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
188 struct ib_qp_init_attr *init, struct ib_udata *udata,
189 struct rxe_create_qp_resp __user *uresp)
193 enum queue_type type;
195 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
198 qp->sk->sk->sk_user_data = qp;
200 /* pick a source UDP port number for this QP based on
201 * the source QPN. this spreads traffic for different QPs
202 * across different NIC RX queues (while using a single
203 * flow for a given QP to maintain packet order).
204 * the port number must be in the Dynamic Ports range
207 qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
208 qp->sq.max_wr = init->cap.max_send_wr;
210 /* These caps are limited by rxe_qp_chk_cap() done by the caller */
211 wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
212 init->cap.max_inline_data);
213 qp->sq.max_sge = init->cap.max_send_sge =
214 wqe_size / sizeof(struct ib_sge);
215 qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
216 wqe_size += sizeof(struct rxe_send_wqe);
218 type = QUEUE_TYPE_FROM_CLIENT;
219 qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
224 err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
225 qp->sq.queue->buf, qp->sq.queue->buf_size,
229 vfree(qp->sq.queue->buf);
235 qp->req.wqe_index = queue_get_producer(qp->sq.queue,
236 QUEUE_TYPE_FROM_CLIENT);
238 qp->req.state = QP_STATE_RESET;
239 qp->comp.state = QP_STATE_RESET;
241 qp->comp.opcode = -1;
243 skb_queue_head_init(&qp->req_pkts);
245 rxe_init_task(&qp->req.task, qp,
246 rxe_requester, "req");
247 rxe_init_task(&qp->comp.task, qp,
248 rxe_completer, "comp");
250 qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
251 if (init->qp_type == IB_QPT_RC) {
252 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
253 timer_setup(&qp->retrans_timer, retransmit_timer, 0);
258 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
259 struct ib_qp_init_attr *init,
260 struct ib_udata *udata,
261 struct rxe_create_qp_resp __user *uresp)
265 enum queue_type type;
268 qp->rq.max_wr = init->cap.max_recv_wr;
269 qp->rq.max_sge = init->cap.max_recv_sge;
271 wqe_size = rcv_wqe_size(qp->rq.max_sge);
273 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
274 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
276 type = QUEUE_TYPE_FROM_CLIENT;
277 qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
282 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
283 qp->rq.queue->buf, qp->rq.queue->buf_size,
286 vfree(qp->rq.queue->buf);
293 skb_queue_head_init(&qp->resp_pkts);
295 rxe_init_task(&qp->resp.task, qp,
296 rxe_responder, "resp");
298 qp->resp.opcode = OPCODE_NONE;
300 qp->resp.state = QP_STATE_RESET;
305 /* called by the create qp verb */
306 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
307 struct ib_qp_init_attr *init,
308 struct rxe_create_qp_resp __user *uresp,
310 struct ib_udata *udata)
313 struct rxe_cq *rcq = to_rcq(init->recv_cq);
314 struct rxe_cq *scq = to_rcq(init->send_cq);
315 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
328 atomic_inc(&rcq->num_wq);
329 atomic_inc(&scq->num_wq);
331 rxe_qp_init_misc(rxe, qp, init);
333 err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
337 err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
341 qp->attr.qp_state = IB_QPS_RESET;
347 rxe_queue_cleanup(qp->sq.queue);
350 atomic_dec(&rcq->num_wq);
351 atomic_dec(&scq->num_wq);
367 /* called by the query qp verb */
368 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
370 init->event_handler = qp->ibqp.event_handler;
371 init->qp_context = qp->ibqp.qp_context;
372 init->send_cq = qp->ibqp.send_cq;
373 init->recv_cq = qp->ibqp.recv_cq;
374 init->srq = qp->ibqp.srq;
376 init->cap.max_send_wr = qp->sq.max_wr;
377 init->cap.max_send_sge = qp->sq.max_sge;
378 init->cap.max_inline_data = qp->sq.max_inline;
381 init->cap.max_recv_wr = qp->rq.max_wr;
382 init->cap.max_recv_sge = qp->rq.max_sge;
385 init->sq_sig_type = qp->sq_sig_type;
387 init->qp_type = qp->ibqp.qp_type;
393 /* called by the modify qp verb, this routine checks all the parameters before
396 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
397 struct ib_qp_attr *attr, int mask)
399 enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
400 attr->cur_qp_state : qp->attr.qp_state;
401 enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
402 attr->qp_state : cur_state;
404 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
405 pr_warn("invalid mask or state for qp\n");
409 if (mask & IB_QP_STATE) {
410 if (cur_state == IB_QPS_SQD) {
411 if (qp->req.state == QP_STATE_DRAIN &&
412 new_state != IB_QPS_ERR)
417 if (mask & IB_QP_PORT) {
418 if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
419 pr_warn("invalid port %d\n", attr->port_num);
424 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
427 if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
430 if (mask & IB_QP_ALT_PATH) {
431 if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
433 if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) {
434 pr_warn("invalid alt port %d\n", attr->alt_port_num);
437 if (attr->alt_timeout > 31) {
438 pr_warn("invalid QP alt timeout %d > 31\n",
444 if (mask & IB_QP_PATH_MTU) {
445 struct rxe_port *port = &rxe->port;
447 enum ib_mtu max_mtu = port->attr.max_mtu;
448 enum ib_mtu mtu = attr->path_mtu;
451 pr_debug("invalid mtu (%d) > (%d)\n",
452 ib_mtu_enum_to_int(mtu),
453 ib_mtu_enum_to_int(max_mtu));
458 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
459 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
460 pr_warn("invalid max_rd_atomic %d > %d\n",
462 rxe->attr.max_qp_rd_atom);
467 if (mask & IB_QP_TIMEOUT) {
468 if (attr->timeout > 31) {
469 pr_warn("invalid QP timeout %d > 31\n",
481 /* move the qp to the reset state */
482 static void rxe_qp_reset(struct rxe_qp *qp)
484 /* stop tasks from running */
485 rxe_disable_task(&qp->resp.task);
487 /* stop request/comp */
489 if (qp_type(qp) == IB_QPT_RC)
490 rxe_disable_task(&qp->comp.task);
491 rxe_disable_task(&qp->req.task);
494 /* move qp to the reset state */
495 qp->req.state = QP_STATE_RESET;
496 qp->comp.state = QP_STATE_RESET;
497 qp->resp.state = QP_STATE_RESET;
499 /* let state machines reset themselves drain work and packet queues
502 __rxe_do_task(&qp->resp.task);
505 __rxe_do_task(&qp->comp.task);
506 __rxe_do_task(&qp->req.task);
507 rxe_queue_reset(qp->sq.queue);
510 /* cleanup attributes */
511 atomic_set(&qp->ssn, 0);
513 qp->req.need_retry = 0;
514 qp->req.wait_for_rnr_timer = 0;
515 qp->req.noack_pkts = 0;
517 qp->resp.opcode = -1;
518 qp->resp.drop_msg = 0;
519 qp->resp.goto_error = 0;
520 qp->resp.sent_psn_nak = 0;
523 rxe_put(qp->resp.mr);
527 cleanup_rd_atomic_resources(qp);
530 rxe_enable_task(&qp->resp.task);
533 if (qp_type(qp) == IB_QPT_RC)
534 rxe_enable_task(&qp->comp.task);
536 rxe_enable_task(&qp->req.task);
540 /* drain the send queue */
541 static void rxe_qp_drain(struct rxe_qp *qp)
544 if (qp->req.state != QP_STATE_DRAINED) {
545 qp->req.state = QP_STATE_DRAIN;
546 if (qp_type(qp) == IB_QPT_RC)
547 rxe_run_task(&qp->comp.task, 1);
549 __rxe_do_task(&qp->comp.task);
550 rxe_run_task(&qp->req.task, 1);
555 /* move the qp to the error state */
556 void rxe_qp_error(struct rxe_qp *qp)
558 qp->req.state = QP_STATE_ERROR;
559 qp->resp.state = QP_STATE_ERROR;
560 qp->comp.state = QP_STATE_ERROR;
561 qp->attr.qp_state = IB_QPS_ERR;
563 /* drain work and packet queues */
564 rxe_run_task(&qp->resp.task, 1);
566 if (qp_type(qp) == IB_QPT_RC)
567 rxe_run_task(&qp->comp.task, 1);
569 __rxe_do_task(&qp->comp.task);
570 rxe_run_task(&qp->req.task, 1);
573 /* called by the modify qp verb */
574 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
575 struct ib_udata *udata)
579 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
580 int max_rd_atomic = attr->max_rd_atomic ?
581 roundup_pow_of_two(attr->max_rd_atomic) : 0;
583 qp->attr.max_rd_atomic = max_rd_atomic;
584 atomic_set(&qp->req.rd_atomic, max_rd_atomic);
587 if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
588 int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
589 roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
591 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
593 free_rd_atomic_resources(qp);
595 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
600 if (mask & IB_QP_CUR_STATE)
601 qp->attr.cur_qp_state = attr->qp_state;
603 if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
604 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
606 if (mask & IB_QP_ACCESS_FLAGS)
607 qp->attr.qp_access_flags = attr->qp_access_flags;
609 if (mask & IB_QP_PKEY_INDEX)
610 qp->attr.pkey_index = attr->pkey_index;
612 if (mask & IB_QP_PORT)
613 qp->attr.port_num = attr->port_num;
615 if (mask & IB_QP_QKEY)
616 qp->attr.qkey = attr->qkey;
619 rxe_init_av(&attr->ah_attr, &qp->pri_av);
621 if (mask & IB_QP_ALT_PATH) {
622 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
623 qp->attr.alt_port_num = attr->alt_port_num;
624 qp->attr.alt_pkey_index = attr->alt_pkey_index;
625 qp->attr.alt_timeout = attr->alt_timeout;
628 if (mask & IB_QP_PATH_MTU) {
629 qp->attr.path_mtu = attr->path_mtu;
630 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
633 if (mask & IB_QP_TIMEOUT) {
634 qp->attr.timeout = attr->timeout;
635 if (attr->timeout == 0) {
636 qp->qp_timeout_jiffies = 0;
638 /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
639 int j = nsecs_to_jiffies(4096ULL << attr->timeout);
641 qp->qp_timeout_jiffies = j ? j : 1;
645 if (mask & IB_QP_RETRY_CNT) {
646 qp->attr.retry_cnt = attr->retry_cnt;
647 qp->comp.retry_cnt = attr->retry_cnt;
648 pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
652 if (mask & IB_QP_RNR_RETRY) {
653 qp->attr.rnr_retry = attr->rnr_retry;
654 qp->comp.rnr_retry = attr->rnr_retry;
655 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
659 if (mask & IB_QP_RQ_PSN) {
660 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
661 qp->resp.psn = qp->attr.rq_psn;
662 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
666 if (mask & IB_QP_MIN_RNR_TIMER) {
667 qp->attr.min_rnr_timer = attr->min_rnr_timer;
668 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
669 attr->min_rnr_timer);
672 if (mask & IB_QP_SQ_PSN) {
673 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
674 qp->req.psn = qp->attr.sq_psn;
675 qp->comp.psn = qp->attr.sq_psn;
676 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
679 if (mask & IB_QP_PATH_MIG_STATE)
680 qp->attr.path_mig_state = attr->path_mig_state;
682 if (mask & IB_QP_DEST_QPN)
683 qp->attr.dest_qp_num = attr->dest_qp_num;
685 if (mask & IB_QP_STATE) {
686 qp->attr.qp_state = attr->qp_state;
688 switch (attr->qp_state) {
690 pr_debug("qp#%d state -> RESET\n", qp_num(qp));
695 pr_debug("qp#%d state -> INIT\n", qp_num(qp));
696 qp->req.state = QP_STATE_INIT;
697 qp->resp.state = QP_STATE_INIT;
698 qp->comp.state = QP_STATE_INIT;
702 pr_debug("qp#%d state -> RTR\n", qp_num(qp));
703 qp->resp.state = QP_STATE_READY;
707 pr_debug("qp#%d state -> RTS\n", qp_num(qp));
708 qp->req.state = QP_STATE_READY;
709 qp->comp.state = QP_STATE_READY;
713 pr_debug("qp#%d state -> SQD\n", qp_num(qp));
718 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
719 /* Not possible from modify_qp. */
723 pr_debug("qp#%d state -> ERR\n", qp_num(qp));
732 /* called by the query qp verb */
733 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
737 attr->rq_psn = qp->resp.psn;
738 attr->sq_psn = qp->req.psn;
740 attr->cap.max_send_wr = qp->sq.max_wr;
741 attr->cap.max_send_sge = qp->sq.max_sge;
742 attr->cap.max_inline_data = qp->sq.max_inline;
745 attr->cap.max_recv_wr = qp->rq.max_wr;
746 attr->cap.max_recv_sge = qp->rq.max_sge;
749 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
750 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
752 if (qp->req.state == QP_STATE_DRAIN) {
753 attr->sq_draining = 1;
754 /* applications that get this state
755 * typically spin on it. yield the
760 attr->sq_draining = 0;
763 pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
768 int rxe_qp_chk_destroy(struct rxe_qp *qp)
771 * An attempt to destroy a QP while attached to a mcast group
772 * will fail immediately.
774 if (atomic_read(&qp->mcg_num)) {
775 pr_debug("Attempt to destroy QP while attached to multicast group\n");
782 /* called when the last reference to the qp is dropped */
783 static void rxe_qp_do_cleanup(struct work_struct *work)
785 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
788 qp->qp_timeout_jiffies = 0;
789 rxe_cleanup_task(&qp->resp.task);
791 if (qp_type(qp) == IB_QPT_RC) {
792 del_timer_sync(&qp->retrans_timer);
793 del_timer_sync(&qp->rnr_nak_timer);
796 rxe_cleanup_task(&qp->req.task);
797 rxe_cleanup_task(&qp->comp.task);
799 /* flush out any receive wr's or pending requests */
800 if (qp->req.task.func)
801 __rxe_do_task(&qp->req.task);
804 __rxe_do_task(&qp->comp.task);
805 __rxe_do_task(&qp->req.task);
809 rxe_queue_cleanup(qp->sq.queue);
815 rxe_queue_cleanup(qp->rq.queue);
818 atomic_dec(&qp->scq->num_wq);
823 atomic_dec(&qp->rcq->num_wq);
831 rxe_put(qp->resp.mr);
833 if (qp_type(qp) == IB_QPT_RC)
834 sk_dst_reset(qp->sk->sk);
836 free_rd_atomic_resources(qp);
839 kernel_sock_shutdown(qp->sk, SHUT_RDWR);
840 sock_release(qp->sk);
844 /* called when the last reference to the qp is dropped */
845 void rxe_qp_cleanup(struct rxe_pool_elem *elem)
847 struct rxe_qp *qp = container_of(elem, typeof(*qp), elem);
849 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);