RDMA/rxe: use %u to print u32 variables
[sfrench/cifs-2.6.git] / drivers / infiniband / sw / rxe / rxe_qp.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6
7 #include <linux/skbuff.h>
8 #include <linux/delay.h>
9 #include <linux/sched.h>
10 #include <linux/vmalloc.h>
11 #include <rdma/uverbs_ioctl.h>
12
13 #include "rxe.h"
14 #include "rxe_loc.h"
15 #include "rxe_queue.h"
16 #include "rxe_task.h"
17
18 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
19                           int has_srq)
20 {
21         if (cap->max_send_wr > rxe->attr.max_qp_wr) {
22                 pr_warn("invalid send wr = %u > %d\n",
23                         cap->max_send_wr, rxe->attr.max_qp_wr);
24                 goto err1;
25         }
26
27         if (cap->max_send_sge > rxe->attr.max_send_sge) {
28                 pr_warn("invalid send sge = %u > %d\n",
29                         cap->max_send_sge, rxe->attr.max_send_sge);
30                 goto err1;
31         }
32
33         if (!has_srq) {
34                 if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
35                         pr_warn("invalid recv wr = %u > %d\n",
36                                 cap->max_recv_wr, rxe->attr.max_qp_wr);
37                         goto err1;
38                 }
39
40                 if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
41                         pr_warn("invalid recv sge = %u > %d\n",
42                                 cap->max_recv_sge, rxe->attr.max_recv_sge);
43                         goto err1;
44                 }
45         }
46
47         if (cap->max_inline_data > rxe->max_inline_data) {
48                 pr_warn("invalid max inline data = %u > %d\n",
49                         cap->max_inline_data, rxe->max_inline_data);
50                 goto err1;
51         }
52
53         return 0;
54
55 err1:
56         return -EINVAL;
57 }
58
59 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
60 {
61         struct ib_qp_cap *cap = &init->cap;
62         struct rxe_port *port;
63         int port_num = init->port_num;
64
65         switch (init->qp_type) {
66         case IB_QPT_GSI:
67         case IB_QPT_RC:
68         case IB_QPT_UC:
69         case IB_QPT_UD:
70                 break;
71         default:
72                 return -EOPNOTSUPP;
73         }
74
75         if (!init->recv_cq || !init->send_cq) {
76                 pr_warn("missing cq\n");
77                 goto err1;
78         }
79
80         if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
81                 goto err1;
82
83         if (init->qp_type == IB_QPT_GSI) {
84                 if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
85                         pr_warn("invalid port = %d\n", port_num);
86                         goto err1;
87                 }
88
89                 port = &rxe->port;
90
91                 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
92                         pr_warn("GSI QP exists for port %d\n", port_num);
93                         goto err1;
94                 }
95         }
96
97         return 0;
98
99 err1:
100         return -EINVAL;
101 }
102
103 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
104 {
105         qp->resp.res_head = 0;
106         qp->resp.res_tail = 0;
107         qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
108
109         if (!qp->resp.resources)
110                 return -ENOMEM;
111
112         return 0;
113 }
114
115 static void free_rd_atomic_resources(struct rxe_qp *qp)
116 {
117         if (qp->resp.resources) {
118                 int i;
119
120                 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
121                         struct resp_res *res = &qp->resp.resources[i];
122
123                         free_rd_atomic_resource(res);
124                 }
125                 kfree(qp->resp.resources);
126                 qp->resp.resources = NULL;
127         }
128 }
129
130 void free_rd_atomic_resource(struct resp_res *res)
131 {
132         res->type = 0;
133 }
134
135 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
136 {
137         int i;
138         struct resp_res *res;
139
140         if (qp->resp.resources) {
141                 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
142                         res = &qp->resp.resources[i];
143                         free_rd_atomic_resource(res);
144                 }
145         }
146 }
147
148 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
149                              struct ib_qp_init_attr *init)
150 {
151         struct rxe_port *port;
152         u32 qpn;
153
154         qp->sq_sig_type         = init->sq_sig_type;
155         qp->attr.path_mtu       = 1;
156         qp->mtu                 = ib_mtu_enum_to_int(qp->attr.path_mtu);
157
158         qpn                     = qp->elem.index;
159         port                    = &rxe->port;
160
161         switch (init->qp_type) {
162         case IB_QPT_GSI:
163                 qp->ibqp.qp_num         = 1;
164                 port->qp_gsi_index      = qpn;
165                 qp->attr.port_num       = init->port_num;
166                 break;
167
168         default:
169                 qp->ibqp.qp_num         = qpn;
170                 break;
171         }
172
173         spin_lock_init(&qp->state_lock);
174
175         spin_lock_init(&qp->req.task.state_lock);
176         spin_lock_init(&qp->resp.task.state_lock);
177         spin_lock_init(&qp->comp.task.state_lock);
178
179         spin_lock_init(&qp->sq.sq_lock);
180         spin_lock_init(&qp->rq.producer_lock);
181         spin_lock_init(&qp->rq.consumer_lock);
182
183         atomic_set(&qp->ssn, 0);
184         atomic_set(&qp->skb_out, 0);
185 }
186
187 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
188                            struct ib_qp_init_attr *init, struct ib_udata *udata,
189                            struct rxe_create_qp_resp __user *uresp)
190 {
191         int err;
192         int wqe_size;
193         enum queue_type type;
194
195         err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
196         if (err < 0)
197                 return err;
198         qp->sk->sk->sk_user_data = qp;
199
200         /* pick a source UDP port number for this QP based on
201          * the source QPN. this spreads traffic for different QPs
202          * across different NIC RX queues (while using a single
203          * flow for a given QP to maintain packet order).
204          * the port number must be in the Dynamic Ports range
205          * (0xc000 - 0xffff).
206          */
207         qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
208         qp->sq.max_wr           = init->cap.max_send_wr;
209
210         /* These caps are limited by rxe_qp_chk_cap() done by the caller */
211         wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
212                          init->cap.max_inline_data);
213         qp->sq.max_sge = init->cap.max_send_sge =
214                 wqe_size / sizeof(struct ib_sge);
215         qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
216         wqe_size += sizeof(struct rxe_send_wqe);
217
218         type = QUEUE_TYPE_FROM_CLIENT;
219         qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
220                                 wqe_size, type);
221         if (!qp->sq.queue)
222                 return -ENOMEM;
223
224         err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
225                            qp->sq.queue->buf, qp->sq.queue->buf_size,
226                            &qp->sq.queue->ip);
227
228         if (err) {
229                 vfree(qp->sq.queue->buf);
230                 kfree(qp->sq.queue);
231                 qp->sq.queue = NULL;
232                 return err;
233         }
234
235         qp->req.wqe_index = queue_get_producer(qp->sq.queue,
236                                                QUEUE_TYPE_FROM_CLIENT);
237
238         qp->req.state           = QP_STATE_RESET;
239         qp->comp.state          = QP_STATE_RESET;
240         qp->req.opcode          = -1;
241         qp->comp.opcode         = -1;
242
243         skb_queue_head_init(&qp->req_pkts);
244
245         rxe_init_task(&qp->req.task, qp,
246                       rxe_requester, "req");
247         rxe_init_task(&qp->comp.task, qp,
248                       rxe_completer, "comp");
249
250         qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
251         if (init->qp_type == IB_QPT_RC) {
252                 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
253                 timer_setup(&qp->retrans_timer, retransmit_timer, 0);
254         }
255         return 0;
256 }
257
258 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
259                             struct ib_qp_init_attr *init,
260                             struct ib_udata *udata,
261                             struct rxe_create_qp_resp __user *uresp)
262 {
263         int err;
264         int wqe_size;
265         enum queue_type type;
266
267         if (!qp->srq) {
268                 qp->rq.max_wr           = init->cap.max_recv_wr;
269                 qp->rq.max_sge          = init->cap.max_recv_sge;
270
271                 wqe_size = rcv_wqe_size(qp->rq.max_sge);
272
273                 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
274                          qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
275
276                 type = QUEUE_TYPE_FROM_CLIENT;
277                 qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
278                                         wqe_size, type);
279                 if (!qp->rq.queue)
280                         return -ENOMEM;
281
282                 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
283                                    qp->rq.queue->buf, qp->rq.queue->buf_size,
284                                    &qp->rq.queue->ip);
285                 if (err) {
286                         vfree(qp->rq.queue->buf);
287                         kfree(qp->rq.queue);
288                         qp->rq.queue = NULL;
289                         return err;
290                 }
291         }
292
293         skb_queue_head_init(&qp->resp_pkts);
294
295         rxe_init_task(&qp->resp.task, qp,
296                       rxe_responder, "resp");
297
298         qp->resp.opcode         = OPCODE_NONE;
299         qp->resp.msn            = 0;
300         qp->resp.state          = QP_STATE_RESET;
301
302         return 0;
303 }
304
305 /* called by the create qp verb */
306 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
307                      struct ib_qp_init_attr *init,
308                      struct rxe_create_qp_resp __user *uresp,
309                      struct ib_pd *ibpd,
310                      struct ib_udata *udata)
311 {
312         int err;
313         struct rxe_cq *rcq = to_rcq(init->recv_cq);
314         struct rxe_cq *scq = to_rcq(init->send_cq);
315         struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
316
317         rxe_get(pd);
318         rxe_get(rcq);
319         rxe_get(scq);
320         if (srq)
321                 rxe_get(srq);
322
323         qp->pd                  = pd;
324         qp->rcq                 = rcq;
325         qp->scq                 = scq;
326         qp->srq                 = srq;
327
328         atomic_inc(&rcq->num_wq);
329         atomic_inc(&scq->num_wq);
330
331         rxe_qp_init_misc(rxe, qp, init);
332
333         err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
334         if (err)
335                 goto err1;
336
337         err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
338         if (err)
339                 goto err2;
340
341         qp->attr.qp_state = IB_QPS_RESET;
342         qp->valid = 1;
343
344         return 0;
345
346 err2:
347         rxe_queue_cleanup(qp->sq.queue);
348         qp->sq.queue = NULL;
349 err1:
350         atomic_dec(&rcq->num_wq);
351         atomic_dec(&scq->num_wq);
352
353         qp->pd = NULL;
354         qp->rcq = NULL;
355         qp->scq = NULL;
356         qp->srq = NULL;
357
358         if (srq)
359                 rxe_put(srq);
360         rxe_put(scq);
361         rxe_put(rcq);
362         rxe_put(pd);
363
364         return err;
365 }
366
367 /* called by the query qp verb */
368 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
369 {
370         init->event_handler             = qp->ibqp.event_handler;
371         init->qp_context                = qp->ibqp.qp_context;
372         init->send_cq                   = qp->ibqp.send_cq;
373         init->recv_cq                   = qp->ibqp.recv_cq;
374         init->srq                       = qp->ibqp.srq;
375
376         init->cap.max_send_wr           = qp->sq.max_wr;
377         init->cap.max_send_sge          = qp->sq.max_sge;
378         init->cap.max_inline_data       = qp->sq.max_inline;
379
380         if (!qp->srq) {
381                 init->cap.max_recv_wr           = qp->rq.max_wr;
382                 init->cap.max_recv_sge          = qp->rq.max_sge;
383         }
384
385         init->sq_sig_type               = qp->sq_sig_type;
386
387         init->qp_type                   = qp->ibqp.qp_type;
388         init->port_num                  = 1;
389
390         return 0;
391 }
392
393 /* called by the modify qp verb, this routine checks all the parameters before
394  * making any changes
395  */
396 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
397                     struct ib_qp_attr *attr, int mask)
398 {
399         enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
400                                         attr->cur_qp_state : qp->attr.qp_state;
401         enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
402                                         attr->qp_state : cur_state;
403
404         if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
405                 pr_warn("invalid mask or state for qp\n");
406                 goto err1;
407         }
408
409         if (mask & IB_QP_STATE) {
410                 if (cur_state == IB_QPS_SQD) {
411                         if (qp->req.state == QP_STATE_DRAIN &&
412                             new_state != IB_QPS_ERR)
413                                 goto err1;
414                 }
415         }
416
417         if (mask & IB_QP_PORT) {
418                 if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
419                         pr_warn("invalid port %d\n", attr->port_num);
420                         goto err1;
421                 }
422         }
423
424         if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
425                 goto err1;
426
427         if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
428                 goto err1;
429
430         if (mask & IB_QP_ALT_PATH) {
431                 if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
432                         goto err1;
433                 if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num))  {
434                         pr_warn("invalid alt port %d\n", attr->alt_port_num);
435                         goto err1;
436                 }
437                 if (attr->alt_timeout > 31) {
438                         pr_warn("invalid QP alt timeout %d > 31\n",
439                                 attr->alt_timeout);
440                         goto err1;
441                 }
442         }
443
444         if (mask & IB_QP_PATH_MTU) {
445                 struct rxe_port *port = &rxe->port;
446
447                 enum ib_mtu max_mtu = port->attr.max_mtu;
448                 enum ib_mtu mtu = attr->path_mtu;
449
450                 if (mtu > max_mtu) {
451                         pr_debug("invalid mtu (%d) > (%d)\n",
452                                  ib_mtu_enum_to_int(mtu),
453                                  ib_mtu_enum_to_int(max_mtu));
454                         goto err1;
455                 }
456         }
457
458         if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
459                 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
460                         pr_warn("invalid max_rd_atomic %d > %d\n",
461                                 attr->max_rd_atomic,
462                                 rxe->attr.max_qp_rd_atom);
463                         goto err1;
464                 }
465         }
466
467         if (mask & IB_QP_TIMEOUT) {
468                 if (attr->timeout > 31) {
469                         pr_warn("invalid QP timeout %d > 31\n",
470                                 attr->timeout);
471                         goto err1;
472                 }
473         }
474
475         return 0;
476
477 err1:
478         return -EINVAL;
479 }
480
481 /* move the qp to the reset state */
482 static void rxe_qp_reset(struct rxe_qp *qp)
483 {
484         /* stop tasks from running */
485         rxe_disable_task(&qp->resp.task);
486
487         /* stop request/comp */
488         if (qp->sq.queue) {
489                 if (qp_type(qp) == IB_QPT_RC)
490                         rxe_disable_task(&qp->comp.task);
491                 rxe_disable_task(&qp->req.task);
492         }
493
494         /* move qp to the reset state */
495         qp->req.state = QP_STATE_RESET;
496         qp->comp.state = QP_STATE_RESET;
497         qp->resp.state = QP_STATE_RESET;
498
499         /* let state machines reset themselves drain work and packet queues
500          * etc.
501          */
502         __rxe_do_task(&qp->resp.task);
503
504         if (qp->sq.queue) {
505                 __rxe_do_task(&qp->comp.task);
506                 __rxe_do_task(&qp->req.task);
507                 rxe_queue_reset(qp->sq.queue);
508         }
509
510         /* cleanup attributes */
511         atomic_set(&qp->ssn, 0);
512         qp->req.opcode = -1;
513         qp->req.need_retry = 0;
514         qp->req.wait_for_rnr_timer = 0;
515         qp->req.noack_pkts = 0;
516         qp->resp.msn = 0;
517         qp->resp.opcode = -1;
518         qp->resp.drop_msg = 0;
519         qp->resp.goto_error = 0;
520         qp->resp.sent_psn_nak = 0;
521
522         if (qp->resp.mr) {
523                 rxe_put(qp->resp.mr);
524                 qp->resp.mr = NULL;
525         }
526
527         cleanup_rd_atomic_resources(qp);
528
529         /* reenable tasks */
530         rxe_enable_task(&qp->resp.task);
531
532         if (qp->sq.queue) {
533                 if (qp_type(qp) == IB_QPT_RC)
534                         rxe_enable_task(&qp->comp.task);
535
536                 rxe_enable_task(&qp->req.task);
537         }
538 }
539
540 /* drain the send queue */
541 static void rxe_qp_drain(struct rxe_qp *qp)
542 {
543         if (qp->sq.queue) {
544                 if (qp->req.state != QP_STATE_DRAINED) {
545                         qp->req.state = QP_STATE_DRAIN;
546                         if (qp_type(qp) == IB_QPT_RC)
547                                 rxe_run_task(&qp->comp.task, 1);
548                         else
549                                 __rxe_do_task(&qp->comp.task);
550                         rxe_run_task(&qp->req.task, 1);
551                 }
552         }
553 }
554
555 /* move the qp to the error state */
556 void rxe_qp_error(struct rxe_qp *qp)
557 {
558         qp->req.state = QP_STATE_ERROR;
559         qp->resp.state = QP_STATE_ERROR;
560         qp->comp.state = QP_STATE_ERROR;
561         qp->attr.qp_state = IB_QPS_ERR;
562
563         /* drain work and packet queues */
564         rxe_run_task(&qp->resp.task, 1);
565
566         if (qp_type(qp) == IB_QPT_RC)
567                 rxe_run_task(&qp->comp.task, 1);
568         else
569                 __rxe_do_task(&qp->comp.task);
570         rxe_run_task(&qp->req.task, 1);
571 }
572
573 /* called by the modify qp verb */
574 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
575                      struct ib_udata *udata)
576 {
577         int err;
578
579         if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
580                 int max_rd_atomic = attr->max_rd_atomic ?
581                         roundup_pow_of_two(attr->max_rd_atomic) : 0;
582
583                 qp->attr.max_rd_atomic = max_rd_atomic;
584                 atomic_set(&qp->req.rd_atomic, max_rd_atomic);
585         }
586
587         if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
588                 int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
589                         roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
590
591                 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
592
593                 free_rd_atomic_resources(qp);
594
595                 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
596                 if (err)
597                         return err;
598         }
599
600         if (mask & IB_QP_CUR_STATE)
601                 qp->attr.cur_qp_state = attr->qp_state;
602
603         if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
604                 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
605
606         if (mask & IB_QP_ACCESS_FLAGS)
607                 qp->attr.qp_access_flags = attr->qp_access_flags;
608
609         if (mask & IB_QP_PKEY_INDEX)
610                 qp->attr.pkey_index = attr->pkey_index;
611
612         if (mask & IB_QP_PORT)
613                 qp->attr.port_num = attr->port_num;
614
615         if (mask & IB_QP_QKEY)
616                 qp->attr.qkey = attr->qkey;
617
618         if (mask & IB_QP_AV)
619                 rxe_init_av(&attr->ah_attr, &qp->pri_av);
620
621         if (mask & IB_QP_ALT_PATH) {
622                 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
623                 qp->attr.alt_port_num = attr->alt_port_num;
624                 qp->attr.alt_pkey_index = attr->alt_pkey_index;
625                 qp->attr.alt_timeout = attr->alt_timeout;
626         }
627
628         if (mask & IB_QP_PATH_MTU) {
629                 qp->attr.path_mtu = attr->path_mtu;
630                 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
631         }
632
633         if (mask & IB_QP_TIMEOUT) {
634                 qp->attr.timeout = attr->timeout;
635                 if (attr->timeout == 0) {
636                         qp->qp_timeout_jiffies = 0;
637                 } else {
638                         /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
639                         int j = nsecs_to_jiffies(4096ULL << attr->timeout);
640
641                         qp->qp_timeout_jiffies = j ? j : 1;
642                 }
643         }
644
645         if (mask & IB_QP_RETRY_CNT) {
646                 qp->attr.retry_cnt = attr->retry_cnt;
647                 qp->comp.retry_cnt = attr->retry_cnt;
648                 pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
649                          attr->retry_cnt);
650         }
651
652         if (mask & IB_QP_RNR_RETRY) {
653                 qp->attr.rnr_retry = attr->rnr_retry;
654                 qp->comp.rnr_retry = attr->rnr_retry;
655                 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
656                          attr->rnr_retry);
657         }
658
659         if (mask & IB_QP_RQ_PSN) {
660                 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
661                 qp->resp.psn = qp->attr.rq_psn;
662                 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
663                          qp->resp.psn);
664         }
665
666         if (mask & IB_QP_MIN_RNR_TIMER) {
667                 qp->attr.min_rnr_timer = attr->min_rnr_timer;
668                 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
669                          attr->min_rnr_timer);
670         }
671
672         if (mask & IB_QP_SQ_PSN) {
673                 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
674                 qp->req.psn = qp->attr.sq_psn;
675                 qp->comp.psn = qp->attr.sq_psn;
676                 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
677         }
678
679         if (mask & IB_QP_PATH_MIG_STATE)
680                 qp->attr.path_mig_state = attr->path_mig_state;
681
682         if (mask & IB_QP_DEST_QPN)
683                 qp->attr.dest_qp_num = attr->dest_qp_num;
684
685         if (mask & IB_QP_STATE) {
686                 qp->attr.qp_state = attr->qp_state;
687
688                 switch (attr->qp_state) {
689                 case IB_QPS_RESET:
690                         pr_debug("qp#%d state -> RESET\n", qp_num(qp));
691                         rxe_qp_reset(qp);
692                         break;
693
694                 case IB_QPS_INIT:
695                         pr_debug("qp#%d state -> INIT\n", qp_num(qp));
696                         qp->req.state = QP_STATE_INIT;
697                         qp->resp.state = QP_STATE_INIT;
698                         qp->comp.state = QP_STATE_INIT;
699                         break;
700
701                 case IB_QPS_RTR:
702                         pr_debug("qp#%d state -> RTR\n", qp_num(qp));
703                         qp->resp.state = QP_STATE_READY;
704                         break;
705
706                 case IB_QPS_RTS:
707                         pr_debug("qp#%d state -> RTS\n", qp_num(qp));
708                         qp->req.state = QP_STATE_READY;
709                         qp->comp.state = QP_STATE_READY;
710                         break;
711
712                 case IB_QPS_SQD:
713                         pr_debug("qp#%d state -> SQD\n", qp_num(qp));
714                         rxe_qp_drain(qp);
715                         break;
716
717                 case IB_QPS_SQE:
718                         pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
719                         /* Not possible from modify_qp. */
720                         break;
721
722                 case IB_QPS_ERR:
723                         pr_debug("qp#%d state -> ERR\n", qp_num(qp));
724                         rxe_qp_error(qp);
725                         break;
726                 }
727         }
728
729         return 0;
730 }
731
732 /* called by the query qp verb */
733 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
734 {
735         *attr = qp->attr;
736
737         attr->rq_psn                            = qp->resp.psn;
738         attr->sq_psn                            = qp->req.psn;
739
740         attr->cap.max_send_wr                   = qp->sq.max_wr;
741         attr->cap.max_send_sge                  = qp->sq.max_sge;
742         attr->cap.max_inline_data               = qp->sq.max_inline;
743
744         if (!qp->srq) {
745                 attr->cap.max_recv_wr           = qp->rq.max_wr;
746                 attr->cap.max_recv_sge          = qp->rq.max_sge;
747         }
748
749         rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
750         rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
751
752         if (qp->req.state == QP_STATE_DRAIN) {
753                 attr->sq_draining = 1;
754                 /* applications that get this state
755                  * typically spin on it. yield the
756                  * processor
757                  */
758                 cond_resched();
759         } else {
760                 attr->sq_draining = 0;
761         }
762
763         pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
764
765         return 0;
766 }
767
768 int rxe_qp_chk_destroy(struct rxe_qp *qp)
769 {
770         /* See IBA o10-2.2.3
771          * An attempt to destroy a QP while attached to a mcast group
772          * will fail immediately.
773          */
774         if (atomic_read(&qp->mcg_num)) {
775                 pr_debug("Attempt to destroy QP while attached to multicast group\n");
776                 return -EBUSY;
777         }
778
779         return 0;
780 }
781
782 /* called when the last reference to the qp is dropped */
783 static void rxe_qp_do_cleanup(struct work_struct *work)
784 {
785         struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
786
787         qp->valid = 0;
788         qp->qp_timeout_jiffies = 0;
789         rxe_cleanup_task(&qp->resp.task);
790
791         if (qp_type(qp) == IB_QPT_RC) {
792                 del_timer_sync(&qp->retrans_timer);
793                 del_timer_sync(&qp->rnr_nak_timer);
794         }
795
796         rxe_cleanup_task(&qp->req.task);
797         rxe_cleanup_task(&qp->comp.task);
798
799         /* flush out any receive wr's or pending requests */
800         if (qp->req.task.func)
801                 __rxe_do_task(&qp->req.task);
802
803         if (qp->sq.queue) {
804                 __rxe_do_task(&qp->comp.task);
805                 __rxe_do_task(&qp->req.task);
806         }
807
808         if (qp->sq.queue)
809                 rxe_queue_cleanup(qp->sq.queue);
810
811         if (qp->srq)
812                 rxe_put(qp->srq);
813
814         if (qp->rq.queue)
815                 rxe_queue_cleanup(qp->rq.queue);
816
817         if (qp->scq) {
818                 atomic_dec(&qp->scq->num_wq);
819                 rxe_put(qp->scq);
820         }
821
822         if (qp->rcq) {
823                 atomic_dec(&qp->rcq->num_wq);
824                 rxe_put(qp->rcq);
825         }
826
827         if (qp->pd)
828                 rxe_put(qp->pd);
829
830         if (qp->resp.mr)
831                 rxe_put(qp->resp.mr);
832
833         if (qp_type(qp) == IB_QPT_RC)
834                 sk_dst_reset(qp->sk->sk);
835
836         free_rd_atomic_resources(qp);
837
838         if (qp->sk) {
839                 kernel_sock_shutdown(qp->sk, SHUT_RDWR);
840                 sock_release(qp->sk);
841         }
842 }
843
844 /* called when the last reference to the qp is dropped */
845 void rxe_qp_cleanup(struct rxe_pool_elem *elem)
846 {
847         struct rxe_qp *qp = container_of(elem, typeof(*qp), elem);
848
849         execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
850 }