Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[sfrench/cifs-2.6.git] / drivers / nvme / host / tcp.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe over Fabrics TCP host.
4  * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/key.h>
12 #include <linux/nvme-tcp.h>
13 #include <linux/nvme-keyring.h>
14 #include <net/sock.h>
15 #include <net/tcp.h>
16 #include <net/tls.h>
17 #include <net/tls_prot.h>
18 #include <net/handshake.h>
19 #include <linux/blk-mq.h>
20 #include <crypto/hash.h>
21 #include <net/busy_poll.h>
22 #include <trace/events/sock.h>
23
24 #include "nvme.h"
25 #include "fabrics.h"
26
27 struct nvme_tcp_queue;
28
29 /* Define the socket priority to use for connections were it is desirable
30  * that the NIC consider performing optimized packet processing or filtering.
31  * A non-zero value being sufficient to indicate general consideration of any
32  * possible optimization.  Making it a module param allows for alternative
33  * values that may be unique for some NIC implementations.
34  */
35 static int so_priority;
36 module_param(so_priority, int, 0644);
37 MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
38
39 /*
40  * TLS handshake timeout
41  */
42 static int tls_handshake_timeout = 10;
43 #ifdef CONFIG_NVME_TCP_TLS
44 module_param(tls_handshake_timeout, int, 0644);
45 MODULE_PARM_DESC(tls_handshake_timeout,
46                  "nvme TLS handshake timeout in seconds (default 10)");
47 #endif
48
49 #ifdef CONFIG_DEBUG_LOCK_ALLOC
50 /* lockdep can detect a circular dependency of the form
51  *   sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
52  * because dependencies are tracked for both nvme-tcp and user contexts. Using
53  * a separate class prevents lockdep from conflating nvme-tcp socket use with
54  * user-space socket API use.
55  */
56 static struct lock_class_key nvme_tcp_sk_key[2];
57 static struct lock_class_key nvme_tcp_slock_key[2];
58
59 static void nvme_tcp_reclassify_socket(struct socket *sock)
60 {
61         struct sock *sk = sock->sk;
62
63         if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
64                 return;
65
66         switch (sk->sk_family) {
67         case AF_INET:
68                 sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
69                                               &nvme_tcp_slock_key[0],
70                                               "sk_lock-AF_INET-NVME",
71                                               &nvme_tcp_sk_key[0]);
72                 break;
73         case AF_INET6:
74                 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
75                                               &nvme_tcp_slock_key[1],
76                                               "sk_lock-AF_INET6-NVME",
77                                               &nvme_tcp_sk_key[1]);
78                 break;
79         default:
80                 WARN_ON_ONCE(1);
81         }
82 }
83 #else
84 static void nvme_tcp_reclassify_socket(struct socket *sock) { }
85 #endif
86
87 enum nvme_tcp_send_state {
88         NVME_TCP_SEND_CMD_PDU = 0,
89         NVME_TCP_SEND_H2C_PDU,
90         NVME_TCP_SEND_DATA,
91         NVME_TCP_SEND_DDGST,
92 };
93
94 struct nvme_tcp_request {
95         struct nvme_request     req;
96         void                    *pdu;
97         struct nvme_tcp_queue   *queue;
98         u32                     data_len;
99         u32                     pdu_len;
100         u32                     pdu_sent;
101         u32                     h2cdata_left;
102         u32                     h2cdata_offset;
103         u16                     ttag;
104         __le16                  status;
105         struct list_head        entry;
106         struct llist_node       lentry;
107         __le32                  ddgst;
108
109         struct bio              *curr_bio;
110         struct iov_iter         iter;
111
112         /* send state */
113         size_t                  offset;
114         size_t                  data_sent;
115         enum nvme_tcp_send_state state;
116 };
117
118 enum nvme_tcp_queue_flags {
119         NVME_TCP_Q_ALLOCATED    = 0,
120         NVME_TCP_Q_LIVE         = 1,
121         NVME_TCP_Q_POLLING      = 2,
122 };
123
124 enum nvme_tcp_recv_state {
125         NVME_TCP_RECV_PDU = 0,
126         NVME_TCP_RECV_DATA,
127         NVME_TCP_RECV_DDGST,
128 };
129
130 struct nvme_tcp_ctrl;
131 struct nvme_tcp_queue {
132         struct socket           *sock;
133         struct work_struct      io_work;
134         int                     io_cpu;
135
136         struct mutex            queue_lock;
137         struct mutex            send_mutex;
138         struct llist_head       req_list;
139         struct list_head        send_list;
140
141         /* recv state */
142         void                    *pdu;
143         int                     pdu_remaining;
144         int                     pdu_offset;
145         size_t                  data_remaining;
146         size_t                  ddgst_remaining;
147         unsigned int            nr_cqe;
148
149         /* send state */
150         struct nvme_tcp_request *request;
151
152         u32                     maxh2cdata;
153         size_t                  cmnd_capsule_len;
154         struct nvme_tcp_ctrl    *ctrl;
155         unsigned long           flags;
156         bool                    rd_enabled;
157
158         bool                    hdr_digest;
159         bool                    data_digest;
160         struct ahash_request    *rcv_hash;
161         struct ahash_request    *snd_hash;
162         __le32                  exp_ddgst;
163         __le32                  recv_ddgst;
164         struct completion       tls_complete;
165         int                     tls_err;
166         struct page_frag_cache  pf_cache;
167
168         void (*state_change)(struct sock *);
169         void (*data_ready)(struct sock *);
170         void (*write_space)(struct sock *);
171 };
172
173 struct nvme_tcp_ctrl {
174         /* read only in the hot path */
175         struct nvme_tcp_queue   *queues;
176         struct blk_mq_tag_set   tag_set;
177
178         /* other member variables */
179         struct list_head        list;
180         struct blk_mq_tag_set   admin_tag_set;
181         struct sockaddr_storage addr;
182         struct sockaddr_storage src_addr;
183         struct nvme_ctrl        ctrl;
184
185         struct work_struct      err_work;
186         struct delayed_work     connect_work;
187         struct nvme_tcp_request async_req;
188         u32                     io_queues[HCTX_MAX_TYPES];
189 };
190
191 static LIST_HEAD(nvme_tcp_ctrl_list);
192 static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
193 static struct workqueue_struct *nvme_tcp_wq;
194 static const struct blk_mq_ops nvme_tcp_mq_ops;
195 static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
196 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
197
198 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
199 {
200         return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
201 }
202
203 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
204 {
205         return queue - queue->ctrl->queues;
206 }
207
208 static inline bool nvme_tcp_tls(struct nvme_ctrl *ctrl)
209 {
210         if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
211                 return 0;
212
213         return ctrl->opts->tls;
214 }
215
216 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
217 {
218         u32 queue_idx = nvme_tcp_queue_id(queue);
219
220         if (queue_idx == 0)
221                 return queue->ctrl->admin_tag_set.tags[queue_idx];
222         return queue->ctrl->tag_set.tags[queue_idx - 1];
223 }
224
225 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
226 {
227         return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
228 }
229
230 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
231 {
232         return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
233 }
234
235 static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request *req)
236 {
237         return req->pdu;
238 }
239
240 static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request *req)
241 {
242         /* use the pdu space in the back for the data pdu */
243         return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) -
244                 sizeof(struct nvme_tcp_data_pdu);
245 }
246
247 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
248 {
249         if (nvme_is_fabrics(req->req.cmd))
250                 return NVME_TCP_ADMIN_CCSZ;
251         return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
252 }
253
254 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
255 {
256         return req == &req->queue->ctrl->async_req;
257 }
258
259 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
260 {
261         struct request *rq;
262
263         if (unlikely(nvme_tcp_async_req(req)))
264                 return false; /* async events don't have a request */
265
266         rq = blk_mq_rq_from_pdu(req);
267
268         return rq_data_dir(rq) == WRITE && req->data_len &&
269                 req->data_len <= nvme_tcp_inline_data_size(req);
270 }
271
272 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
273 {
274         return req->iter.bvec->bv_page;
275 }
276
277 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
278 {
279         return req->iter.bvec->bv_offset + req->iter.iov_offset;
280 }
281
282 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
283 {
284         return min_t(size_t, iov_iter_single_seg_count(&req->iter),
285                         req->pdu_len - req->pdu_sent);
286 }
287
288 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
289 {
290         return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
291                         req->pdu_len - req->pdu_sent : 0;
292 }
293
294 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
295                 int len)
296 {
297         return nvme_tcp_pdu_data_left(req) <= len;
298 }
299
300 static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
301                 unsigned int dir)
302 {
303         struct request *rq = blk_mq_rq_from_pdu(req);
304         struct bio_vec *vec;
305         unsigned int size;
306         int nr_bvec;
307         size_t offset;
308
309         if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
310                 vec = &rq->special_vec;
311                 nr_bvec = 1;
312                 size = blk_rq_payload_bytes(rq);
313                 offset = 0;
314         } else {
315                 struct bio *bio = req->curr_bio;
316                 struct bvec_iter bi;
317                 struct bio_vec bv;
318
319                 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
320                 nr_bvec = 0;
321                 bio_for_each_bvec(bv, bio, bi) {
322                         nr_bvec++;
323                 }
324                 size = bio->bi_iter.bi_size;
325                 offset = bio->bi_iter.bi_bvec_done;
326         }
327
328         iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
329         req->iter.iov_offset = offset;
330 }
331
332 static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
333                 int len)
334 {
335         req->data_sent += len;
336         req->pdu_sent += len;
337         iov_iter_advance(&req->iter, len);
338         if (!iov_iter_count(&req->iter) &&
339             req->data_sent < req->data_len) {
340                 req->curr_bio = req->curr_bio->bi_next;
341                 nvme_tcp_init_iter(req, ITER_SOURCE);
342         }
343 }
344
345 static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
346 {
347         int ret;
348
349         /* drain the send queue as much as we can... */
350         do {
351                 ret = nvme_tcp_try_send(queue);
352         } while (ret > 0);
353 }
354
355 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
356 {
357         return !list_empty(&queue->send_list) ||
358                 !llist_empty(&queue->req_list);
359 }
360
361 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
362                 bool sync, bool last)
363 {
364         struct nvme_tcp_queue *queue = req->queue;
365         bool empty;
366
367         empty = llist_add(&req->lentry, &queue->req_list) &&
368                 list_empty(&queue->send_list) && !queue->request;
369
370         /*
371          * if we're the first on the send_list and we can try to send
372          * directly, otherwise queue io_work. Also, only do that if we
373          * are on the same cpu, so we don't introduce contention.
374          */
375         if (queue->io_cpu == raw_smp_processor_id() &&
376             sync && empty && mutex_trylock(&queue->send_mutex)) {
377                 nvme_tcp_send_all(queue);
378                 mutex_unlock(&queue->send_mutex);
379         }
380
381         if (last && nvme_tcp_queue_more(queue))
382                 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
383 }
384
385 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
386 {
387         struct nvme_tcp_request *req;
388         struct llist_node *node;
389
390         for (node = llist_del_all(&queue->req_list); node; node = node->next) {
391                 req = llist_entry(node, struct nvme_tcp_request, lentry);
392                 list_add(&req->entry, &queue->send_list);
393         }
394 }
395
396 static inline struct nvme_tcp_request *
397 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
398 {
399         struct nvme_tcp_request *req;
400
401         req = list_first_entry_or_null(&queue->send_list,
402                         struct nvme_tcp_request, entry);
403         if (!req) {
404                 nvme_tcp_process_req_list(queue);
405                 req = list_first_entry_or_null(&queue->send_list,
406                                 struct nvme_tcp_request, entry);
407                 if (unlikely(!req))
408                         return NULL;
409         }
410
411         list_del(&req->entry);
412         return req;
413 }
414
415 static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
416                 __le32 *dgst)
417 {
418         ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
419         crypto_ahash_final(hash);
420 }
421
422 static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
423                 struct page *page, off_t off, size_t len)
424 {
425         struct scatterlist sg;
426
427         sg_init_table(&sg, 1);
428         sg_set_page(&sg, page, len, off);
429         ahash_request_set_crypt(hash, &sg, NULL, len);
430         crypto_ahash_update(hash);
431 }
432
433 static inline void nvme_tcp_hdgst(struct ahash_request *hash,
434                 void *pdu, size_t len)
435 {
436         struct scatterlist sg;
437
438         sg_init_one(&sg, pdu, len);
439         ahash_request_set_crypt(hash, &sg, pdu + len, len);
440         crypto_ahash_digest(hash);
441 }
442
443 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
444                 void *pdu, size_t pdu_len)
445 {
446         struct nvme_tcp_hdr *hdr = pdu;
447         __le32 recv_digest;
448         __le32 exp_digest;
449
450         if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
451                 dev_err(queue->ctrl->ctrl.device,
452                         "queue %d: header digest flag is cleared\n",
453                         nvme_tcp_queue_id(queue));
454                 return -EPROTO;
455         }
456
457         recv_digest = *(__le32 *)(pdu + hdr->hlen);
458         nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
459         exp_digest = *(__le32 *)(pdu + hdr->hlen);
460         if (recv_digest != exp_digest) {
461                 dev_err(queue->ctrl->ctrl.device,
462                         "header digest error: recv %#x expected %#x\n",
463                         le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
464                 return -EIO;
465         }
466
467         return 0;
468 }
469
470 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
471 {
472         struct nvme_tcp_hdr *hdr = pdu;
473         u8 digest_len = nvme_tcp_hdgst_len(queue);
474         u32 len;
475
476         len = le32_to_cpu(hdr->plen) - hdr->hlen -
477                 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
478
479         if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
480                 dev_err(queue->ctrl->ctrl.device,
481                         "queue %d: data digest flag is cleared\n",
482                 nvme_tcp_queue_id(queue));
483                 return -EPROTO;
484         }
485         crypto_ahash_init(queue->rcv_hash);
486
487         return 0;
488 }
489
490 static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
491                 struct request *rq, unsigned int hctx_idx)
492 {
493         struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
494
495         page_frag_free(req->pdu);
496 }
497
498 static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
499                 struct request *rq, unsigned int hctx_idx,
500                 unsigned int numa_node)
501 {
502         struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
503         struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
504         struct nvme_tcp_cmd_pdu *pdu;
505         int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
506         struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
507         u8 hdgst = nvme_tcp_hdgst_len(queue);
508
509         req->pdu = page_frag_alloc(&queue->pf_cache,
510                 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
511                 GFP_KERNEL | __GFP_ZERO);
512         if (!req->pdu)
513                 return -ENOMEM;
514
515         pdu = req->pdu;
516         req->queue = queue;
517         nvme_req(rq)->ctrl = &ctrl->ctrl;
518         nvme_req(rq)->cmd = &pdu->cmd;
519
520         return 0;
521 }
522
523 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
524                 unsigned int hctx_idx)
525 {
526         struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
527         struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
528
529         hctx->driver_data = queue;
530         return 0;
531 }
532
533 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
534                 unsigned int hctx_idx)
535 {
536         struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
537         struct nvme_tcp_queue *queue = &ctrl->queues[0];
538
539         hctx->driver_data = queue;
540         return 0;
541 }
542
543 static enum nvme_tcp_recv_state
544 nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
545 {
546         return  (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
547                 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
548                 NVME_TCP_RECV_DATA;
549 }
550
551 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
552 {
553         queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
554                                 nvme_tcp_hdgst_len(queue);
555         queue->pdu_offset = 0;
556         queue->data_remaining = -1;
557         queue->ddgst_remaining = 0;
558 }
559
560 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
561 {
562         if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
563                 return;
564
565         dev_warn(ctrl->device, "starting error recovery\n");
566         queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
567 }
568
569 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
570                 struct nvme_completion *cqe)
571 {
572         struct nvme_tcp_request *req;
573         struct request *rq;
574
575         rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
576         if (!rq) {
577                 dev_err(queue->ctrl->ctrl.device,
578                         "got bad cqe.command_id %#x on queue %d\n",
579                         cqe->command_id, nvme_tcp_queue_id(queue));
580                 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
581                 return -EINVAL;
582         }
583
584         req = blk_mq_rq_to_pdu(rq);
585         if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
586                 req->status = cqe->status;
587
588         if (!nvme_try_complete_req(rq, req->status, cqe->result))
589                 nvme_complete_rq(rq);
590         queue->nr_cqe++;
591
592         return 0;
593 }
594
595 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
596                 struct nvme_tcp_data_pdu *pdu)
597 {
598         struct request *rq;
599
600         rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
601         if (!rq) {
602                 dev_err(queue->ctrl->ctrl.device,
603                         "got bad c2hdata.command_id %#x on queue %d\n",
604                         pdu->command_id, nvme_tcp_queue_id(queue));
605                 return -ENOENT;
606         }
607
608         if (!blk_rq_payload_bytes(rq)) {
609                 dev_err(queue->ctrl->ctrl.device,
610                         "queue %d tag %#x unexpected data\n",
611                         nvme_tcp_queue_id(queue), rq->tag);
612                 return -EIO;
613         }
614
615         queue->data_remaining = le32_to_cpu(pdu->data_length);
616
617         if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
618             unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
619                 dev_err(queue->ctrl->ctrl.device,
620                         "queue %d tag %#x SUCCESS set but not last PDU\n",
621                         nvme_tcp_queue_id(queue), rq->tag);
622                 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
623                 return -EPROTO;
624         }
625
626         return 0;
627 }
628
629 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
630                 struct nvme_tcp_rsp_pdu *pdu)
631 {
632         struct nvme_completion *cqe = &pdu->cqe;
633         int ret = 0;
634
635         /*
636          * AEN requests are special as they don't time out and can
637          * survive any kind of queue freeze and often don't respond to
638          * aborts.  We don't even bother to allocate a struct request
639          * for them but rather special case them here.
640          */
641         if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
642                                      cqe->command_id)))
643                 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
644                                 &cqe->result);
645         else
646                 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
647
648         return ret;
649 }
650
651 static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
652 {
653         struct nvme_tcp_data_pdu *data = nvme_tcp_req_data_pdu(req);
654         struct nvme_tcp_queue *queue = req->queue;
655         struct request *rq = blk_mq_rq_from_pdu(req);
656         u32 h2cdata_sent = req->pdu_len;
657         u8 hdgst = nvme_tcp_hdgst_len(queue);
658         u8 ddgst = nvme_tcp_ddgst_len(queue);
659
660         req->state = NVME_TCP_SEND_H2C_PDU;
661         req->offset = 0;
662         req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
663         req->pdu_sent = 0;
664         req->h2cdata_left -= req->pdu_len;
665         req->h2cdata_offset += h2cdata_sent;
666
667         memset(data, 0, sizeof(*data));
668         data->hdr.type = nvme_tcp_h2c_data;
669         if (!req->h2cdata_left)
670                 data->hdr.flags = NVME_TCP_F_DATA_LAST;
671         if (queue->hdr_digest)
672                 data->hdr.flags |= NVME_TCP_F_HDGST;
673         if (queue->data_digest)
674                 data->hdr.flags |= NVME_TCP_F_DDGST;
675         data->hdr.hlen = sizeof(*data);
676         data->hdr.pdo = data->hdr.hlen + hdgst;
677         data->hdr.plen =
678                 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
679         data->ttag = req->ttag;
680         data->command_id = nvme_cid(rq);
681         data->data_offset = cpu_to_le32(req->h2cdata_offset);
682         data->data_length = cpu_to_le32(req->pdu_len);
683 }
684
685 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
686                 struct nvme_tcp_r2t_pdu *pdu)
687 {
688         struct nvme_tcp_request *req;
689         struct request *rq;
690         u32 r2t_length = le32_to_cpu(pdu->r2t_length);
691         u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
692
693         rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
694         if (!rq) {
695                 dev_err(queue->ctrl->ctrl.device,
696                         "got bad r2t.command_id %#x on queue %d\n",
697                         pdu->command_id, nvme_tcp_queue_id(queue));
698                 return -ENOENT;
699         }
700         req = blk_mq_rq_to_pdu(rq);
701
702         if (unlikely(!r2t_length)) {
703                 dev_err(queue->ctrl->ctrl.device,
704                         "req %d r2t len is %u, probably a bug...\n",
705                         rq->tag, r2t_length);
706                 return -EPROTO;
707         }
708
709         if (unlikely(req->data_sent + r2t_length > req->data_len)) {
710                 dev_err(queue->ctrl->ctrl.device,
711                         "req %d r2t len %u exceeded data len %u (%zu sent)\n",
712                         rq->tag, r2t_length, req->data_len, req->data_sent);
713                 return -EPROTO;
714         }
715
716         if (unlikely(r2t_offset < req->data_sent)) {
717                 dev_err(queue->ctrl->ctrl.device,
718                         "req %d unexpected r2t offset %u (expected %zu)\n",
719                         rq->tag, r2t_offset, req->data_sent);
720                 return -EPROTO;
721         }
722
723         req->pdu_len = 0;
724         req->h2cdata_left = r2t_length;
725         req->h2cdata_offset = r2t_offset;
726         req->ttag = pdu->ttag;
727
728         nvme_tcp_setup_h2c_data_pdu(req);
729         nvme_tcp_queue_request(req, false, true);
730
731         return 0;
732 }
733
734 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
735                 unsigned int *offset, size_t *len)
736 {
737         struct nvme_tcp_hdr *hdr;
738         char *pdu = queue->pdu;
739         size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
740         int ret;
741
742         ret = skb_copy_bits(skb, *offset,
743                 &pdu[queue->pdu_offset], rcv_len);
744         if (unlikely(ret))
745                 return ret;
746
747         queue->pdu_remaining -= rcv_len;
748         queue->pdu_offset += rcv_len;
749         *offset += rcv_len;
750         *len -= rcv_len;
751         if (queue->pdu_remaining)
752                 return 0;
753
754         hdr = queue->pdu;
755         if (queue->hdr_digest) {
756                 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
757                 if (unlikely(ret))
758                         return ret;
759         }
760
761
762         if (queue->data_digest) {
763                 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
764                 if (unlikely(ret))
765                         return ret;
766         }
767
768         switch (hdr->type) {
769         case nvme_tcp_c2h_data:
770                 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
771         case nvme_tcp_rsp:
772                 nvme_tcp_init_recv_ctx(queue);
773                 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
774         case nvme_tcp_r2t:
775                 nvme_tcp_init_recv_ctx(queue);
776                 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
777         default:
778                 dev_err(queue->ctrl->ctrl.device,
779                         "unsupported pdu type (%d)\n", hdr->type);
780                 return -EINVAL;
781         }
782 }
783
784 static inline void nvme_tcp_end_request(struct request *rq, u16 status)
785 {
786         union nvme_result res = {};
787
788         if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
789                 nvme_complete_rq(rq);
790 }
791
792 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
793                               unsigned int *offset, size_t *len)
794 {
795         struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
796         struct request *rq =
797                 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
798         struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
799
800         while (true) {
801                 int recv_len, ret;
802
803                 recv_len = min_t(size_t, *len, queue->data_remaining);
804                 if (!recv_len)
805                         break;
806
807                 if (!iov_iter_count(&req->iter)) {
808                         req->curr_bio = req->curr_bio->bi_next;
809
810                         /*
811                          * If we don`t have any bios it means that controller
812                          * sent more data than we requested, hence error
813                          */
814                         if (!req->curr_bio) {
815                                 dev_err(queue->ctrl->ctrl.device,
816                                         "queue %d no space in request %#x",
817                                         nvme_tcp_queue_id(queue), rq->tag);
818                                 nvme_tcp_init_recv_ctx(queue);
819                                 return -EIO;
820                         }
821                         nvme_tcp_init_iter(req, ITER_DEST);
822                 }
823
824                 /* we can read only from what is left in this bio */
825                 recv_len = min_t(size_t, recv_len,
826                                 iov_iter_count(&req->iter));
827
828                 if (queue->data_digest)
829                         ret = skb_copy_and_hash_datagram_iter(skb, *offset,
830                                 &req->iter, recv_len, queue->rcv_hash);
831                 else
832                         ret = skb_copy_datagram_iter(skb, *offset,
833                                         &req->iter, recv_len);
834                 if (ret) {
835                         dev_err(queue->ctrl->ctrl.device,
836                                 "queue %d failed to copy request %#x data",
837                                 nvme_tcp_queue_id(queue), rq->tag);
838                         return ret;
839                 }
840
841                 *len -= recv_len;
842                 *offset += recv_len;
843                 queue->data_remaining -= recv_len;
844         }
845
846         if (!queue->data_remaining) {
847                 if (queue->data_digest) {
848                         nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
849                         queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
850                 } else {
851                         if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
852                                 nvme_tcp_end_request(rq,
853                                                 le16_to_cpu(req->status));
854                                 queue->nr_cqe++;
855                         }
856                         nvme_tcp_init_recv_ctx(queue);
857                 }
858         }
859
860         return 0;
861 }
862
863 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
864                 struct sk_buff *skb, unsigned int *offset, size_t *len)
865 {
866         struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
867         char *ddgst = (char *)&queue->recv_ddgst;
868         size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
869         off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
870         int ret;
871
872         ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
873         if (unlikely(ret))
874                 return ret;
875
876         queue->ddgst_remaining -= recv_len;
877         *offset += recv_len;
878         *len -= recv_len;
879         if (queue->ddgst_remaining)
880                 return 0;
881
882         if (queue->recv_ddgst != queue->exp_ddgst) {
883                 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
884                                         pdu->command_id);
885                 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
886
887                 req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
888
889                 dev_err(queue->ctrl->ctrl.device,
890                         "data digest error: recv %#x expected %#x\n",
891                         le32_to_cpu(queue->recv_ddgst),
892                         le32_to_cpu(queue->exp_ddgst));
893         }
894
895         if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
896                 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
897                                         pdu->command_id);
898                 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
899
900                 nvme_tcp_end_request(rq, le16_to_cpu(req->status));
901                 queue->nr_cqe++;
902         }
903
904         nvme_tcp_init_recv_ctx(queue);
905         return 0;
906 }
907
908 static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
909                              unsigned int offset, size_t len)
910 {
911         struct nvme_tcp_queue *queue = desc->arg.data;
912         size_t consumed = len;
913         int result;
914
915         if (unlikely(!queue->rd_enabled))
916                 return -EFAULT;
917
918         while (len) {
919                 switch (nvme_tcp_recv_state(queue)) {
920                 case NVME_TCP_RECV_PDU:
921                         result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
922                         break;
923                 case NVME_TCP_RECV_DATA:
924                         result = nvme_tcp_recv_data(queue, skb, &offset, &len);
925                         break;
926                 case NVME_TCP_RECV_DDGST:
927                         result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
928                         break;
929                 default:
930                         result = -EFAULT;
931                 }
932                 if (result) {
933                         dev_err(queue->ctrl->ctrl.device,
934                                 "receive failed:  %d\n", result);
935                         queue->rd_enabled = false;
936                         nvme_tcp_error_recovery(&queue->ctrl->ctrl);
937                         return result;
938                 }
939         }
940
941         return consumed;
942 }
943
944 static void nvme_tcp_data_ready(struct sock *sk)
945 {
946         struct nvme_tcp_queue *queue;
947
948         trace_sk_data_ready(sk);
949
950         read_lock_bh(&sk->sk_callback_lock);
951         queue = sk->sk_user_data;
952         if (likely(queue && queue->rd_enabled) &&
953             !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
954                 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
955         read_unlock_bh(&sk->sk_callback_lock);
956 }
957
958 static void nvme_tcp_write_space(struct sock *sk)
959 {
960         struct nvme_tcp_queue *queue;
961
962         read_lock_bh(&sk->sk_callback_lock);
963         queue = sk->sk_user_data;
964         if (likely(queue && sk_stream_is_writeable(sk))) {
965                 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
966                 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
967         }
968         read_unlock_bh(&sk->sk_callback_lock);
969 }
970
971 static void nvme_tcp_state_change(struct sock *sk)
972 {
973         struct nvme_tcp_queue *queue;
974
975         read_lock_bh(&sk->sk_callback_lock);
976         queue = sk->sk_user_data;
977         if (!queue)
978                 goto done;
979
980         switch (sk->sk_state) {
981         case TCP_CLOSE:
982         case TCP_CLOSE_WAIT:
983         case TCP_LAST_ACK:
984         case TCP_FIN_WAIT1:
985         case TCP_FIN_WAIT2:
986                 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
987                 break;
988         default:
989                 dev_info(queue->ctrl->ctrl.device,
990                         "queue %d socket state %d\n",
991                         nvme_tcp_queue_id(queue), sk->sk_state);
992         }
993
994         queue->state_change(sk);
995 done:
996         read_unlock_bh(&sk->sk_callback_lock);
997 }
998
999 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
1000 {
1001         queue->request = NULL;
1002 }
1003
1004 static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
1005 {
1006         if (nvme_tcp_async_req(req)) {
1007                 union nvme_result res = {};
1008
1009                 nvme_complete_async_event(&req->queue->ctrl->ctrl,
1010                                 cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
1011         } else {
1012                 nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
1013                                 NVME_SC_HOST_PATH_ERROR);
1014         }
1015 }
1016
1017 static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
1018 {
1019         struct nvme_tcp_queue *queue = req->queue;
1020         int req_data_len = req->data_len;
1021         u32 h2cdata_left = req->h2cdata_left;
1022
1023         while (true) {
1024                 struct bio_vec bvec;
1025                 struct msghdr msg = {
1026                         .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
1027                 };
1028                 struct page *page = nvme_tcp_req_cur_page(req);
1029                 size_t offset = nvme_tcp_req_cur_offset(req);
1030                 size_t len = nvme_tcp_req_cur_length(req);
1031                 bool last = nvme_tcp_pdu_last_send(req, len);
1032                 int req_data_sent = req->data_sent;
1033                 int ret;
1034
1035                 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
1036                         msg.msg_flags |= MSG_EOR;
1037                 else
1038                         msg.msg_flags |= MSG_MORE;
1039
1040                 if (!sendpage_ok(page))
1041                         msg.msg_flags &= ~MSG_SPLICE_PAGES;
1042
1043                 bvec_set_page(&bvec, page, len, offset);
1044                 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1045                 ret = sock_sendmsg(queue->sock, &msg);
1046                 if (ret <= 0)
1047                         return ret;
1048
1049                 if (queue->data_digest)
1050                         nvme_tcp_ddgst_update(queue->snd_hash, page,
1051                                         offset, ret);
1052
1053                 /*
1054                  * update the request iterator except for the last payload send
1055                  * in the request where we don't want to modify it as we may
1056                  * compete with the RX path completing the request.
1057                  */
1058                 if (req_data_sent + ret < req_data_len)
1059                         nvme_tcp_advance_req(req, ret);
1060
1061                 /* fully successful last send in current PDU */
1062                 if (last && ret == len) {
1063                         if (queue->data_digest) {
1064                                 nvme_tcp_ddgst_final(queue->snd_hash,
1065                                         &req->ddgst);
1066                                 req->state = NVME_TCP_SEND_DDGST;
1067                                 req->offset = 0;
1068                         } else {
1069                                 if (h2cdata_left)
1070                                         nvme_tcp_setup_h2c_data_pdu(req);
1071                                 else
1072                                         nvme_tcp_done_send_req(queue);
1073                         }
1074                         return 1;
1075                 }
1076         }
1077         return -EAGAIN;
1078 }
1079
1080 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
1081 {
1082         struct nvme_tcp_queue *queue = req->queue;
1083         struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
1084         struct bio_vec bvec;
1085         struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
1086         bool inline_data = nvme_tcp_has_inline_data(req);
1087         u8 hdgst = nvme_tcp_hdgst_len(queue);
1088         int len = sizeof(*pdu) + hdgst - req->offset;
1089         int ret;
1090
1091         if (inline_data || nvme_tcp_queue_more(queue))
1092                 msg.msg_flags |= MSG_MORE;
1093         else
1094                 msg.msg_flags |= MSG_EOR;
1095
1096         if (queue->hdr_digest && !req->offset)
1097                 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1098
1099         bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
1100         iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1101         ret = sock_sendmsg(queue->sock, &msg);
1102         if (unlikely(ret <= 0))
1103                 return ret;
1104
1105         len -= ret;
1106         if (!len) {
1107                 if (inline_data) {
1108                         req->state = NVME_TCP_SEND_DATA;
1109                         if (queue->data_digest)
1110                                 crypto_ahash_init(queue->snd_hash);
1111                 } else {
1112                         nvme_tcp_done_send_req(queue);
1113                 }
1114                 return 1;
1115         }
1116         req->offset += ret;
1117
1118         return -EAGAIN;
1119 }
1120
1121 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
1122 {
1123         struct nvme_tcp_queue *queue = req->queue;
1124         struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req);
1125         struct bio_vec bvec;
1126         struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_MORE, };
1127         u8 hdgst = nvme_tcp_hdgst_len(queue);
1128         int len = sizeof(*pdu) - req->offset + hdgst;
1129         int ret;
1130
1131         if (queue->hdr_digest && !req->offset)
1132                 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1133
1134         if (!req->h2cdata_left)
1135                 msg.msg_flags |= MSG_SPLICE_PAGES;
1136
1137         bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
1138         iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1139         ret = sock_sendmsg(queue->sock, &msg);
1140         if (unlikely(ret <= 0))
1141                 return ret;
1142
1143         len -= ret;
1144         if (!len) {
1145                 req->state = NVME_TCP_SEND_DATA;
1146                 if (queue->data_digest)
1147                         crypto_ahash_init(queue->snd_hash);
1148                 return 1;
1149         }
1150         req->offset += ret;
1151
1152         return -EAGAIN;
1153 }
1154
1155 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1156 {
1157         struct nvme_tcp_queue *queue = req->queue;
1158         size_t offset = req->offset;
1159         u32 h2cdata_left = req->h2cdata_left;
1160         int ret;
1161         struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1162         struct kvec iov = {
1163                 .iov_base = (u8 *)&req->ddgst + req->offset,
1164                 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1165         };
1166
1167         if (nvme_tcp_queue_more(queue))
1168                 msg.msg_flags |= MSG_MORE;
1169         else
1170                 msg.msg_flags |= MSG_EOR;
1171
1172         ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1173         if (unlikely(ret <= 0))
1174                 return ret;
1175
1176         if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
1177                 if (h2cdata_left)
1178                         nvme_tcp_setup_h2c_data_pdu(req);
1179                 else
1180                         nvme_tcp_done_send_req(queue);
1181                 return 1;
1182         }
1183
1184         req->offset += ret;
1185         return -EAGAIN;
1186 }
1187
1188 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1189 {
1190         struct nvme_tcp_request *req;
1191         unsigned int noreclaim_flag;
1192         int ret = 1;
1193
1194         if (!queue->request) {
1195                 queue->request = nvme_tcp_fetch_request(queue);
1196                 if (!queue->request)
1197                         return 0;
1198         }
1199         req = queue->request;
1200
1201         noreclaim_flag = memalloc_noreclaim_save();
1202         if (req->state == NVME_TCP_SEND_CMD_PDU) {
1203                 ret = nvme_tcp_try_send_cmd_pdu(req);
1204                 if (ret <= 0)
1205                         goto done;
1206                 if (!nvme_tcp_has_inline_data(req))
1207                         goto out;
1208         }
1209
1210         if (req->state == NVME_TCP_SEND_H2C_PDU) {
1211                 ret = nvme_tcp_try_send_data_pdu(req);
1212                 if (ret <= 0)
1213                         goto done;
1214         }
1215
1216         if (req->state == NVME_TCP_SEND_DATA) {
1217                 ret = nvme_tcp_try_send_data(req);
1218                 if (ret <= 0)
1219                         goto done;
1220         }
1221
1222         if (req->state == NVME_TCP_SEND_DDGST)
1223                 ret = nvme_tcp_try_send_ddgst(req);
1224 done:
1225         if (ret == -EAGAIN) {
1226                 ret = 0;
1227         } else if (ret < 0) {
1228                 dev_err(queue->ctrl->ctrl.device,
1229                         "failed to send request %d\n", ret);
1230                 nvme_tcp_fail_request(queue->request);
1231                 nvme_tcp_done_send_req(queue);
1232         }
1233 out:
1234         memalloc_noreclaim_restore(noreclaim_flag);
1235         return ret;
1236 }
1237
1238 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1239 {
1240         struct socket *sock = queue->sock;
1241         struct sock *sk = sock->sk;
1242         read_descriptor_t rd_desc;
1243         int consumed;
1244
1245         rd_desc.arg.data = queue;
1246         rd_desc.count = 1;
1247         lock_sock(sk);
1248         queue->nr_cqe = 0;
1249         consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1250         release_sock(sk);
1251         return consumed;
1252 }
1253
1254 static void nvme_tcp_io_work(struct work_struct *w)
1255 {
1256         struct nvme_tcp_queue *queue =
1257                 container_of(w, struct nvme_tcp_queue, io_work);
1258         unsigned long deadline = jiffies + msecs_to_jiffies(1);
1259
1260         do {
1261                 bool pending = false;
1262                 int result;
1263
1264                 if (mutex_trylock(&queue->send_mutex)) {
1265                         result = nvme_tcp_try_send(queue);
1266                         mutex_unlock(&queue->send_mutex);
1267                         if (result > 0)
1268                                 pending = true;
1269                         else if (unlikely(result < 0))
1270                                 break;
1271                 }
1272
1273                 result = nvme_tcp_try_recv(queue);
1274                 if (result > 0)
1275                         pending = true;
1276                 else if (unlikely(result < 0))
1277                         return;
1278
1279                 if (!pending || !queue->rd_enabled)
1280                         return;
1281
1282         } while (!time_after(jiffies, deadline)); /* quota is exhausted */
1283
1284         queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1285 }
1286
1287 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1288 {
1289         struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1290
1291         ahash_request_free(queue->rcv_hash);
1292         ahash_request_free(queue->snd_hash);
1293         crypto_free_ahash(tfm);
1294 }
1295
1296 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1297 {
1298         struct crypto_ahash *tfm;
1299
1300         tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1301         if (IS_ERR(tfm))
1302                 return PTR_ERR(tfm);
1303
1304         queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1305         if (!queue->snd_hash)
1306                 goto free_tfm;
1307         ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1308
1309         queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1310         if (!queue->rcv_hash)
1311                 goto free_snd_hash;
1312         ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1313
1314         return 0;
1315 free_snd_hash:
1316         ahash_request_free(queue->snd_hash);
1317 free_tfm:
1318         crypto_free_ahash(tfm);
1319         return -ENOMEM;
1320 }
1321
1322 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1323 {
1324         struct nvme_tcp_request *async = &ctrl->async_req;
1325
1326         page_frag_free(async->pdu);
1327 }
1328
1329 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1330 {
1331         struct nvme_tcp_queue *queue = &ctrl->queues[0];
1332         struct nvme_tcp_request *async = &ctrl->async_req;
1333         u8 hdgst = nvme_tcp_hdgst_len(queue);
1334
1335         async->pdu = page_frag_alloc(&queue->pf_cache,
1336                 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1337                 GFP_KERNEL | __GFP_ZERO);
1338         if (!async->pdu)
1339                 return -ENOMEM;
1340
1341         async->queue = &ctrl->queues[0];
1342         return 0;
1343 }
1344
1345 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1346 {
1347         struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1348         struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1349         unsigned int noreclaim_flag;
1350
1351         if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1352                 return;
1353
1354         if (queue->hdr_digest || queue->data_digest)
1355                 nvme_tcp_free_crypto(queue);
1356
1357         page_frag_cache_drain(&queue->pf_cache);
1358
1359         noreclaim_flag = memalloc_noreclaim_save();
1360         /* ->sock will be released by fput() */
1361         fput(queue->sock->file);
1362         queue->sock = NULL;
1363         memalloc_noreclaim_restore(noreclaim_flag);
1364
1365         kfree(queue->pdu);
1366         mutex_destroy(&queue->send_mutex);
1367         mutex_destroy(&queue->queue_lock);
1368 }
1369
1370 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1371 {
1372         struct nvme_tcp_icreq_pdu *icreq;
1373         struct nvme_tcp_icresp_pdu *icresp;
1374         char cbuf[CMSG_LEN(sizeof(char))] = {};
1375         u8 ctype;
1376         struct msghdr msg = {};
1377         struct kvec iov;
1378         bool ctrl_hdgst, ctrl_ddgst;
1379         u32 maxh2cdata;
1380         int ret;
1381
1382         icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1383         if (!icreq)
1384                 return -ENOMEM;
1385
1386         icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1387         if (!icresp) {
1388                 ret = -ENOMEM;
1389                 goto free_icreq;
1390         }
1391
1392         icreq->hdr.type = nvme_tcp_icreq;
1393         icreq->hdr.hlen = sizeof(*icreq);
1394         icreq->hdr.pdo = 0;
1395         icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1396         icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1397         icreq->maxr2t = 0; /* single inflight r2t supported */
1398         icreq->hpda = 0; /* no alignment constraint */
1399         if (queue->hdr_digest)
1400                 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1401         if (queue->data_digest)
1402                 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1403
1404         iov.iov_base = icreq;
1405         iov.iov_len = sizeof(*icreq);
1406         ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1407         if (ret < 0) {
1408                 pr_warn("queue %d: failed to send icreq, error %d\n",
1409                         nvme_tcp_queue_id(queue), ret);
1410                 goto free_icresp;
1411         }
1412
1413         memset(&msg, 0, sizeof(msg));
1414         iov.iov_base = icresp;
1415         iov.iov_len = sizeof(*icresp);
1416         if (nvme_tcp_tls(&queue->ctrl->ctrl)) {
1417                 msg.msg_control = cbuf;
1418                 msg.msg_controllen = sizeof(cbuf);
1419         }
1420         ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1421                         iov.iov_len, msg.msg_flags);
1422         if (ret < 0) {
1423                 pr_warn("queue %d: failed to receive icresp, error %d\n",
1424                         nvme_tcp_queue_id(queue), ret);
1425                 goto free_icresp;
1426         }
1427         ret = -ENOTCONN;
1428         if (nvme_tcp_tls(&queue->ctrl->ctrl)) {
1429                 ctype = tls_get_record_type(queue->sock->sk,
1430                                             (struct cmsghdr *)cbuf);
1431                 if (ctype != TLS_RECORD_TYPE_DATA) {
1432                         pr_err("queue %d: unhandled TLS record %d\n",
1433                                nvme_tcp_queue_id(queue), ctype);
1434                         goto free_icresp;
1435                 }
1436         }
1437         ret = -EINVAL;
1438         if (icresp->hdr.type != nvme_tcp_icresp) {
1439                 pr_err("queue %d: bad type returned %d\n",
1440                         nvme_tcp_queue_id(queue), icresp->hdr.type);
1441                 goto free_icresp;
1442         }
1443
1444         if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1445                 pr_err("queue %d: bad pdu length returned %d\n",
1446                         nvme_tcp_queue_id(queue), icresp->hdr.plen);
1447                 goto free_icresp;
1448         }
1449
1450         if (icresp->pfv != NVME_TCP_PFV_1_0) {
1451                 pr_err("queue %d: bad pfv returned %d\n",
1452                         nvme_tcp_queue_id(queue), icresp->pfv);
1453                 goto free_icresp;
1454         }
1455
1456         ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1457         if ((queue->data_digest && !ctrl_ddgst) ||
1458             (!queue->data_digest && ctrl_ddgst)) {
1459                 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1460                         nvme_tcp_queue_id(queue),
1461                         queue->data_digest ? "enabled" : "disabled",
1462                         ctrl_ddgst ? "enabled" : "disabled");
1463                 goto free_icresp;
1464         }
1465
1466         ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1467         if ((queue->hdr_digest && !ctrl_hdgst) ||
1468             (!queue->hdr_digest && ctrl_hdgst)) {
1469                 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1470                         nvme_tcp_queue_id(queue),
1471                         queue->hdr_digest ? "enabled" : "disabled",
1472                         ctrl_hdgst ? "enabled" : "disabled");
1473                 goto free_icresp;
1474         }
1475
1476         if (icresp->cpda != 0) {
1477                 pr_err("queue %d: unsupported cpda returned %d\n",
1478                         nvme_tcp_queue_id(queue), icresp->cpda);
1479                 goto free_icresp;
1480         }
1481
1482         maxh2cdata = le32_to_cpu(icresp->maxdata);
1483         if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) {
1484                 pr_err("queue %d: invalid maxh2cdata returned %u\n",
1485                        nvme_tcp_queue_id(queue), maxh2cdata);
1486                 goto free_icresp;
1487         }
1488         queue->maxh2cdata = maxh2cdata;
1489
1490         ret = 0;
1491 free_icresp:
1492         kfree(icresp);
1493 free_icreq:
1494         kfree(icreq);
1495         return ret;
1496 }
1497
1498 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1499 {
1500         return nvme_tcp_queue_id(queue) == 0;
1501 }
1502
1503 static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1504 {
1505         struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1506         int qid = nvme_tcp_queue_id(queue);
1507
1508         return !nvme_tcp_admin_queue(queue) &&
1509                 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1510 }
1511
1512 static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1513 {
1514         struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1515         int qid = nvme_tcp_queue_id(queue);
1516
1517         return !nvme_tcp_admin_queue(queue) &&
1518                 !nvme_tcp_default_queue(queue) &&
1519                 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1520                           ctrl->io_queues[HCTX_TYPE_READ];
1521 }
1522
1523 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1524 {
1525         struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1526         int qid = nvme_tcp_queue_id(queue);
1527
1528         return !nvme_tcp_admin_queue(queue) &&
1529                 !nvme_tcp_default_queue(queue) &&
1530                 !nvme_tcp_read_queue(queue) &&
1531                 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1532                           ctrl->io_queues[HCTX_TYPE_READ] +
1533                           ctrl->io_queues[HCTX_TYPE_POLL];
1534 }
1535
1536 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1537 {
1538         struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1539         int qid = nvme_tcp_queue_id(queue);
1540         int n = 0;
1541
1542         if (nvme_tcp_default_queue(queue))
1543                 n = qid - 1;
1544         else if (nvme_tcp_read_queue(queue))
1545                 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1546         else if (nvme_tcp_poll_queue(queue))
1547                 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1548                                 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1549         queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1550 }
1551
1552 static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
1553 {
1554         struct nvme_tcp_queue *queue = data;
1555         struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1556         int qid = nvme_tcp_queue_id(queue);
1557         struct key *tls_key;
1558
1559         dev_dbg(ctrl->ctrl.device, "queue %d: TLS handshake done, key %x, status %d\n",
1560                 qid, pskid, status);
1561
1562         if (status) {
1563                 queue->tls_err = -status;
1564                 goto out_complete;
1565         }
1566
1567         tls_key = key_lookup(pskid);
1568         if (IS_ERR(tls_key)) {
1569                 dev_warn(ctrl->ctrl.device, "queue %d: Invalid key %x\n",
1570                          qid, pskid);
1571                 queue->tls_err = -ENOKEY;
1572         } else {
1573                 ctrl->ctrl.tls_key = tls_key;
1574                 queue->tls_err = 0;
1575         }
1576
1577 out_complete:
1578         complete(&queue->tls_complete);
1579 }
1580
1581 static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl,
1582                               struct nvme_tcp_queue *queue,
1583                               key_serial_t pskid)
1584 {
1585         int qid = nvme_tcp_queue_id(queue);
1586         int ret;
1587         struct tls_handshake_args args;
1588         unsigned long tmo = tls_handshake_timeout * HZ;
1589         key_serial_t keyring = nvme_keyring_id();
1590
1591         dev_dbg(nctrl->device, "queue %d: start TLS with key %x\n",
1592                 qid, pskid);
1593         memset(&args, 0, sizeof(args));
1594         args.ta_sock = queue->sock;
1595         args.ta_done = nvme_tcp_tls_done;
1596         args.ta_data = queue;
1597         args.ta_my_peerids[0] = pskid;
1598         args.ta_num_peerids = 1;
1599         if (nctrl->opts->keyring)
1600                 keyring = key_serial(nctrl->opts->keyring);
1601         args.ta_keyring = keyring;
1602         args.ta_timeout_ms = tls_handshake_timeout * 1000;
1603         queue->tls_err = -EOPNOTSUPP;
1604         init_completion(&queue->tls_complete);
1605         ret = tls_client_hello_psk(&args, GFP_KERNEL);
1606         if (ret) {
1607                 dev_err(nctrl->device, "queue %d: failed to start TLS: %d\n",
1608                         qid, ret);
1609                 return ret;
1610         }
1611         ret = wait_for_completion_interruptible_timeout(&queue->tls_complete, tmo);
1612         if (ret <= 0) {
1613                 if (ret == 0)
1614                         ret = -ETIMEDOUT;
1615
1616                 dev_err(nctrl->device,
1617                         "queue %d: TLS handshake failed, error %d\n",
1618                         qid, ret);
1619                 tls_handshake_cancel(queue->sock->sk);
1620         } else {
1621                 dev_dbg(nctrl->device,
1622                         "queue %d: TLS handshake complete, error %d\n",
1623                         qid, queue->tls_err);
1624                 ret = queue->tls_err;
1625         }
1626         return ret;
1627 }
1628
1629 static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
1630                                 key_serial_t pskid)
1631 {
1632         struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1633         struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1634         int ret, rcv_pdu_size;
1635         struct file *sock_file;
1636
1637         mutex_init(&queue->queue_lock);
1638         queue->ctrl = ctrl;
1639         init_llist_head(&queue->req_list);
1640         INIT_LIST_HEAD(&queue->send_list);
1641         mutex_init(&queue->send_mutex);
1642         INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1643
1644         if (qid > 0)
1645                 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
1646         else
1647                 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1648                                                 NVME_TCP_ADMIN_CCSZ;
1649
1650         ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1651                         IPPROTO_TCP, &queue->sock);
1652         if (ret) {
1653                 dev_err(nctrl->device,
1654                         "failed to create socket: %d\n", ret);
1655                 goto err_destroy_mutex;
1656         }
1657
1658         sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL);
1659         if (IS_ERR(sock_file)) {
1660                 ret = PTR_ERR(sock_file);
1661                 goto err_destroy_mutex;
1662         }
1663         nvme_tcp_reclassify_socket(queue->sock);
1664
1665         /* Single syn retry */
1666         tcp_sock_set_syncnt(queue->sock->sk, 1);
1667
1668         /* Set TCP no delay */
1669         tcp_sock_set_nodelay(queue->sock->sk);
1670
1671         /*
1672          * Cleanup whatever is sitting in the TCP transmit queue on socket
1673          * close. This is done to prevent stale data from being sent should
1674          * the network connection be restored before TCP times out.
1675          */
1676         sock_no_linger(queue->sock->sk);
1677
1678         if (so_priority > 0)
1679                 sock_set_priority(queue->sock->sk, so_priority);
1680
1681         /* Set socket type of service */
1682         if (nctrl->opts->tos >= 0)
1683                 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
1684
1685         /* Set 10 seconds timeout for icresp recvmsg */
1686         queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1687
1688         queue->sock->sk->sk_allocation = GFP_ATOMIC;
1689         queue->sock->sk->sk_use_task_frag = false;
1690         nvme_tcp_set_queue_io_cpu(queue);
1691         queue->request = NULL;
1692         queue->data_remaining = 0;
1693         queue->ddgst_remaining = 0;
1694         queue->pdu_remaining = 0;
1695         queue->pdu_offset = 0;
1696         sk_set_memalloc(queue->sock->sk);
1697
1698         if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
1699                 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1700                         sizeof(ctrl->src_addr));
1701                 if (ret) {
1702                         dev_err(nctrl->device,
1703                                 "failed to bind queue %d socket %d\n",
1704                                 qid, ret);
1705                         goto err_sock;
1706                 }
1707         }
1708
1709         if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
1710                 char *iface = nctrl->opts->host_iface;
1711                 sockptr_t optval = KERNEL_SOCKPTR(iface);
1712
1713                 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
1714                                       optval, strlen(iface));
1715                 if (ret) {
1716                         dev_err(nctrl->device,
1717                           "failed to bind to interface %s queue %d err %d\n",
1718                           iface, qid, ret);
1719                         goto err_sock;
1720                 }
1721         }
1722
1723         queue->hdr_digest = nctrl->opts->hdr_digest;
1724         queue->data_digest = nctrl->opts->data_digest;
1725         if (queue->hdr_digest || queue->data_digest) {
1726                 ret = nvme_tcp_alloc_crypto(queue);
1727                 if (ret) {
1728                         dev_err(nctrl->device,
1729                                 "failed to allocate queue %d crypto\n", qid);
1730                         goto err_sock;
1731                 }
1732         }
1733
1734         rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1735                         nvme_tcp_hdgst_len(queue);
1736         queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1737         if (!queue->pdu) {
1738                 ret = -ENOMEM;
1739                 goto err_crypto;
1740         }
1741
1742         dev_dbg(nctrl->device, "connecting queue %d\n",
1743                         nvme_tcp_queue_id(queue));
1744
1745         ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1746                 sizeof(ctrl->addr), 0);
1747         if (ret) {
1748                 dev_err(nctrl->device,
1749                         "failed to connect socket: %d\n", ret);
1750                 goto err_rcv_pdu;
1751         }
1752
1753         /* If PSKs are configured try to start TLS */
1754         if (IS_ENABLED(CONFIG_NVME_TCP_TLS) && pskid) {
1755                 ret = nvme_tcp_start_tls(nctrl, queue, pskid);
1756                 if (ret)
1757                         goto err_init_connect;
1758         }
1759
1760         ret = nvme_tcp_init_connection(queue);
1761         if (ret)
1762                 goto err_init_connect;
1763
1764         set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1765
1766         return 0;
1767
1768 err_init_connect:
1769         kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1770 err_rcv_pdu:
1771         kfree(queue->pdu);
1772 err_crypto:
1773         if (queue->hdr_digest || queue->data_digest)
1774                 nvme_tcp_free_crypto(queue);
1775 err_sock:
1776         /* ->sock will be released by fput() */
1777         fput(queue->sock->file);
1778         queue->sock = NULL;
1779 err_destroy_mutex:
1780         mutex_destroy(&queue->send_mutex);
1781         mutex_destroy(&queue->queue_lock);
1782         return ret;
1783 }
1784
1785 static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
1786 {
1787         struct socket *sock = queue->sock;
1788
1789         write_lock_bh(&sock->sk->sk_callback_lock);
1790         sock->sk->sk_user_data  = NULL;
1791         sock->sk->sk_data_ready = queue->data_ready;
1792         sock->sk->sk_state_change = queue->state_change;
1793         sock->sk->sk_write_space  = queue->write_space;
1794         write_unlock_bh(&sock->sk->sk_callback_lock);
1795 }
1796
1797 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1798 {
1799         kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1800         nvme_tcp_restore_sock_ops(queue);
1801         cancel_work_sync(&queue->io_work);
1802 }
1803
1804 static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1805 {
1806         struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1807         struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1808
1809         if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1810                 return;
1811
1812         mutex_lock(&queue->queue_lock);
1813         if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1814                 __nvme_tcp_stop_queue(queue);
1815         mutex_unlock(&queue->queue_lock);
1816 }
1817
1818 static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
1819 {
1820         write_lock_bh(&queue->sock->sk->sk_callback_lock);
1821         queue->sock->sk->sk_user_data = queue;
1822         queue->state_change = queue->sock->sk->sk_state_change;
1823         queue->data_ready = queue->sock->sk->sk_data_ready;
1824         queue->write_space = queue->sock->sk->sk_write_space;
1825         queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1826         queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1827         queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1828 #ifdef CONFIG_NET_RX_BUSY_POLL
1829         queue->sock->sk->sk_ll_usec = 1;
1830 #endif
1831         write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1832 }
1833
1834 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1835 {
1836         struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1837         struct nvme_tcp_queue *queue = &ctrl->queues[idx];
1838         int ret;
1839
1840         queue->rd_enabled = true;
1841         nvme_tcp_init_recv_ctx(queue);
1842         nvme_tcp_setup_sock_ops(queue);
1843
1844         if (idx)
1845                 ret = nvmf_connect_io_queue(nctrl, idx);
1846         else
1847                 ret = nvmf_connect_admin_queue(nctrl);
1848
1849         if (!ret) {
1850                 set_bit(NVME_TCP_Q_LIVE, &queue->flags);
1851         } else {
1852                 if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1853                         __nvme_tcp_stop_queue(queue);
1854                 dev_err(nctrl->device,
1855                         "failed to connect queue: %d ret=%d\n", idx, ret);
1856         }
1857         return ret;
1858 }
1859
1860 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1861 {
1862         if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1863                 cancel_work_sync(&ctrl->async_event_work);
1864                 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1865                 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1866         }
1867
1868         nvme_tcp_free_queue(ctrl, 0);
1869 }
1870
1871 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1872 {
1873         int i;
1874
1875         for (i = 1; i < ctrl->queue_count; i++)
1876                 nvme_tcp_free_queue(ctrl, i);
1877 }
1878
1879 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1880 {
1881         int i;
1882
1883         for (i = 1; i < ctrl->queue_count; i++)
1884                 nvme_tcp_stop_queue(ctrl, i);
1885 }
1886
1887 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
1888                                     int first, int last)
1889 {
1890         int i, ret;
1891
1892         for (i = first; i < last; i++) {
1893                 ret = nvme_tcp_start_queue(ctrl, i);
1894                 if (ret)
1895                         goto out_stop_queues;
1896         }
1897
1898         return 0;
1899
1900 out_stop_queues:
1901         for (i--; i >= first; i--)
1902                 nvme_tcp_stop_queue(ctrl, i);
1903         return ret;
1904 }
1905
1906 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1907 {
1908         int ret;
1909         key_serial_t pskid = 0;
1910
1911         if (nvme_tcp_tls(ctrl)) {
1912                 if (ctrl->opts->tls_key)
1913                         pskid = key_serial(ctrl->opts->tls_key);
1914                 else
1915                         pskid = nvme_tls_psk_default(ctrl->opts->keyring,
1916                                                       ctrl->opts->host->nqn,
1917                                                       ctrl->opts->subsysnqn);
1918                 if (!pskid) {
1919                         dev_err(ctrl->device, "no valid PSK found\n");
1920                         return -ENOKEY;
1921                 }
1922         }
1923
1924         ret = nvme_tcp_alloc_queue(ctrl, 0, pskid);
1925         if (ret)
1926                 return ret;
1927
1928         ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1929         if (ret)
1930                 goto out_free_queue;
1931
1932         return 0;
1933
1934 out_free_queue:
1935         nvme_tcp_free_queue(ctrl, 0);
1936         return ret;
1937 }
1938
1939 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1940 {
1941         int i, ret;
1942
1943         if (nvme_tcp_tls(ctrl) && !ctrl->tls_key) {
1944                 dev_err(ctrl->device, "no PSK negotiated\n");
1945                 return -ENOKEY;
1946         }
1947         for (i = 1; i < ctrl->queue_count; i++) {
1948                 ret = nvme_tcp_alloc_queue(ctrl, i,
1949                                 key_serial(ctrl->tls_key));
1950                 if (ret)
1951                         goto out_free_queues;
1952         }
1953
1954         return 0;
1955
1956 out_free_queues:
1957         for (i--; i >= 1; i--)
1958                 nvme_tcp_free_queue(ctrl, i);
1959
1960         return ret;
1961 }
1962
1963 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1964 {
1965         unsigned int nr_io_queues;
1966         int ret;
1967
1968         nr_io_queues = nvmf_nr_io_queues(ctrl->opts);
1969         ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1970         if (ret)
1971                 return ret;
1972
1973         if (nr_io_queues == 0) {
1974                 dev_err(ctrl->device,
1975                         "unable to set any I/O queues\n");
1976                 return -ENOMEM;
1977         }
1978
1979         ctrl->queue_count = nr_io_queues + 1;
1980         dev_info(ctrl->device,
1981                 "creating %d I/O queues.\n", nr_io_queues);
1982
1983         nvmf_set_io_queues(ctrl->opts, nr_io_queues,
1984                            to_tcp_ctrl(ctrl)->io_queues);
1985         return __nvme_tcp_alloc_io_queues(ctrl);
1986 }
1987
1988 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1989 {
1990         nvme_tcp_stop_io_queues(ctrl);
1991         if (remove)
1992                 nvme_remove_io_tag_set(ctrl);
1993         nvme_tcp_free_io_queues(ctrl);
1994 }
1995
1996 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1997 {
1998         int ret, nr_queues;
1999
2000         ret = nvme_tcp_alloc_io_queues(ctrl);
2001         if (ret)
2002                 return ret;
2003
2004         if (new) {
2005                 ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
2006                                 &nvme_tcp_mq_ops,
2007                                 ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
2008                                 sizeof(struct nvme_tcp_request));
2009                 if (ret)
2010                         goto out_free_io_queues;
2011         }
2012
2013         /*
2014          * Only start IO queues for which we have allocated the tagset
2015          * and limitted it to the available queues. On reconnects, the
2016          * queue number might have changed.
2017          */
2018         nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count);
2019         ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues);
2020         if (ret)
2021                 goto out_cleanup_connect_q;
2022
2023         if (!new) {
2024                 nvme_start_freeze(ctrl);
2025                 nvme_unquiesce_io_queues(ctrl);
2026                 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
2027                         /*
2028                          * If we timed out waiting for freeze we are likely to
2029                          * be stuck.  Fail the controller initialization just
2030                          * to be safe.
2031                          */
2032                         ret = -ENODEV;
2033                         nvme_unfreeze(ctrl);
2034                         goto out_wait_freeze_timed_out;
2035                 }
2036                 blk_mq_update_nr_hw_queues(ctrl->tagset,
2037                         ctrl->queue_count - 1);
2038                 nvme_unfreeze(ctrl);
2039         }
2040
2041         /*
2042          * If the number of queues has increased (reconnect case)
2043          * start all new queues now.
2044          */
2045         ret = nvme_tcp_start_io_queues(ctrl, nr_queues,
2046                                        ctrl->tagset->nr_hw_queues + 1);
2047         if (ret)
2048                 goto out_wait_freeze_timed_out;
2049
2050         return 0;
2051
2052 out_wait_freeze_timed_out:
2053         nvme_quiesce_io_queues(ctrl);
2054         nvme_sync_io_queues(ctrl);
2055         nvme_tcp_stop_io_queues(ctrl);
2056 out_cleanup_connect_q:
2057         nvme_cancel_tagset(ctrl);
2058         if (new)
2059                 nvme_remove_io_tag_set(ctrl);
2060 out_free_io_queues:
2061         nvme_tcp_free_io_queues(ctrl);
2062         return ret;
2063 }
2064
2065 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
2066 {
2067         nvme_tcp_stop_queue(ctrl, 0);
2068         if (remove)
2069                 nvme_remove_admin_tag_set(ctrl);
2070         nvme_tcp_free_admin_queue(ctrl);
2071 }
2072
2073 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
2074 {
2075         int error;
2076
2077         error = nvme_tcp_alloc_admin_queue(ctrl);
2078         if (error)
2079                 return error;
2080
2081         if (new) {
2082                 error = nvme_alloc_admin_tag_set(ctrl,
2083                                 &to_tcp_ctrl(ctrl)->admin_tag_set,
2084                                 &nvme_tcp_admin_mq_ops,
2085                                 sizeof(struct nvme_tcp_request));
2086                 if (error)
2087                         goto out_free_queue;
2088         }
2089
2090         error = nvme_tcp_start_queue(ctrl, 0);
2091         if (error)
2092                 goto out_cleanup_tagset;
2093
2094         error = nvme_enable_ctrl(ctrl);
2095         if (error)
2096                 goto out_stop_queue;
2097
2098         nvme_unquiesce_admin_queue(ctrl);
2099
2100         error = nvme_init_ctrl_finish(ctrl, false);
2101         if (error)
2102                 goto out_quiesce_queue;
2103
2104         return 0;
2105
2106 out_quiesce_queue:
2107         nvme_quiesce_admin_queue(ctrl);
2108         blk_sync_queue(ctrl->admin_q);
2109 out_stop_queue:
2110         nvme_tcp_stop_queue(ctrl, 0);
2111         nvme_cancel_admin_tagset(ctrl);
2112 out_cleanup_tagset:
2113         if (new)
2114                 nvme_remove_admin_tag_set(ctrl);
2115 out_free_queue:
2116         nvme_tcp_free_admin_queue(ctrl);
2117         return error;
2118 }
2119
2120 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
2121                 bool remove)
2122 {
2123         nvme_quiesce_admin_queue(ctrl);
2124         blk_sync_queue(ctrl->admin_q);
2125         nvme_tcp_stop_queue(ctrl, 0);
2126         nvme_cancel_admin_tagset(ctrl);
2127         if (remove)
2128                 nvme_unquiesce_admin_queue(ctrl);
2129         nvme_tcp_destroy_admin_queue(ctrl, remove);
2130 }
2131
2132 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
2133                 bool remove)
2134 {
2135         if (ctrl->queue_count <= 1)
2136                 return;
2137         nvme_quiesce_admin_queue(ctrl);
2138         nvme_quiesce_io_queues(ctrl);
2139         nvme_sync_io_queues(ctrl);
2140         nvme_tcp_stop_io_queues(ctrl);
2141         nvme_cancel_tagset(ctrl);
2142         if (remove)
2143                 nvme_unquiesce_io_queues(ctrl);
2144         nvme_tcp_destroy_io_queues(ctrl, remove);
2145 }
2146
2147 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
2148 {
2149         enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
2150
2151         /* If we are resetting/deleting then do nothing */
2152         if (state != NVME_CTRL_CONNECTING) {
2153                 WARN_ON_ONCE(state == NVME_CTRL_NEW || state == NVME_CTRL_LIVE);
2154                 return;
2155         }
2156
2157         if (nvmf_should_reconnect(ctrl)) {
2158                 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
2159                         ctrl->opts->reconnect_delay);
2160                 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
2161                                 ctrl->opts->reconnect_delay * HZ);
2162         } else {
2163                 dev_info(ctrl->device, "Removing controller...\n");
2164                 nvme_delete_ctrl(ctrl);
2165         }
2166 }
2167
2168 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
2169 {
2170         struct nvmf_ctrl_options *opts = ctrl->opts;
2171         int ret;
2172
2173         ret = nvme_tcp_configure_admin_queue(ctrl, new);
2174         if (ret)
2175                 return ret;
2176
2177         if (ctrl->icdoff) {
2178                 ret = -EOPNOTSUPP;
2179                 dev_err(ctrl->device, "icdoff is not supported!\n");
2180                 goto destroy_admin;
2181         }
2182
2183         if (!nvme_ctrl_sgl_supported(ctrl)) {
2184                 ret = -EOPNOTSUPP;
2185                 dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
2186                 goto destroy_admin;
2187         }
2188
2189         if (opts->queue_size > ctrl->sqsize + 1)
2190                 dev_warn(ctrl->device,
2191                         "queue_size %zu > ctrl sqsize %u, clamping down\n",
2192                         opts->queue_size, ctrl->sqsize + 1);
2193
2194         if (ctrl->sqsize + 1 > ctrl->maxcmd) {
2195                 dev_warn(ctrl->device,
2196                         "sqsize %u > ctrl maxcmd %u, clamping down\n",
2197                         ctrl->sqsize + 1, ctrl->maxcmd);
2198                 ctrl->sqsize = ctrl->maxcmd - 1;
2199         }
2200
2201         if (ctrl->queue_count > 1) {
2202                 ret = nvme_tcp_configure_io_queues(ctrl, new);
2203                 if (ret)
2204                         goto destroy_admin;
2205         }
2206
2207         if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
2208                 /*
2209                  * state change failure is ok if we started ctrl delete,
2210                  * unless we're during creation of a new controller to
2211                  * avoid races with teardown flow.
2212                  */
2213                 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
2214
2215                 WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
2216                              state != NVME_CTRL_DELETING_NOIO);
2217                 WARN_ON_ONCE(new);
2218                 ret = -EINVAL;
2219                 goto destroy_io;
2220         }
2221
2222         nvme_start_ctrl(ctrl);
2223         return 0;
2224
2225 destroy_io:
2226         if (ctrl->queue_count > 1) {
2227                 nvme_quiesce_io_queues(ctrl);
2228                 nvme_sync_io_queues(ctrl);
2229                 nvme_tcp_stop_io_queues(ctrl);
2230                 nvme_cancel_tagset(ctrl);
2231                 nvme_tcp_destroy_io_queues(ctrl, new);
2232         }
2233 destroy_admin:
2234         nvme_stop_keep_alive(ctrl);
2235         nvme_tcp_teardown_admin_queue(ctrl, false);
2236         return ret;
2237 }
2238
2239 static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
2240 {
2241         struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
2242                         struct nvme_tcp_ctrl, connect_work);
2243         struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2244
2245         ++ctrl->nr_reconnects;
2246
2247         if (nvme_tcp_setup_ctrl(ctrl, false))
2248                 goto requeue;
2249
2250         dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
2251                         ctrl->nr_reconnects);
2252
2253         ctrl->nr_reconnects = 0;
2254
2255         return;
2256
2257 requeue:
2258         dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2259                         ctrl->nr_reconnects);
2260         nvme_tcp_reconnect_or_remove(ctrl);
2261 }
2262
2263 static void nvme_tcp_error_recovery_work(struct work_struct *work)
2264 {
2265         struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2266                                 struct nvme_tcp_ctrl, err_work);
2267         struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2268
2269         nvme_stop_keep_alive(ctrl);
2270         flush_work(&ctrl->async_event_work);
2271         nvme_tcp_teardown_io_queues(ctrl, false);
2272         /* unquiesce to fail fast pending requests */
2273         nvme_unquiesce_io_queues(ctrl);
2274         nvme_tcp_teardown_admin_queue(ctrl, false);
2275         nvme_unquiesce_admin_queue(ctrl);
2276         nvme_auth_stop(ctrl);
2277
2278         if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2279                 /* state change failure is ok if we started ctrl delete */
2280                 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
2281
2282                 WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
2283                              state != NVME_CTRL_DELETING_NOIO);
2284                 return;
2285         }
2286
2287         nvme_tcp_reconnect_or_remove(ctrl);
2288 }
2289
2290 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2291 {
2292         nvme_tcp_teardown_io_queues(ctrl, shutdown);
2293         nvme_quiesce_admin_queue(ctrl);
2294         nvme_disable_ctrl(ctrl, shutdown);
2295         nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2296 }
2297
2298 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2299 {
2300         nvme_tcp_teardown_ctrl(ctrl, true);
2301 }
2302
2303 static void nvme_reset_ctrl_work(struct work_struct *work)
2304 {
2305         struct nvme_ctrl *ctrl =
2306                 container_of(work, struct nvme_ctrl, reset_work);
2307
2308         nvme_stop_ctrl(ctrl);
2309         nvme_tcp_teardown_ctrl(ctrl, false);
2310
2311         if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2312                 /* state change failure is ok if we started ctrl delete */
2313                 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
2314
2315                 WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
2316                              state != NVME_CTRL_DELETING_NOIO);
2317                 return;
2318         }
2319
2320         if (nvme_tcp_setup_ctrl(ctrl, false))
2321                 goto out_fail;
2322
2323         return;
2324
2325 out_fail:
2326         ++ctrl->nr_reconnects;
2327         nvme_tcp_reconnect_or_remove(ctrl);
2328 }
2329
2330 static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
2331 {
2332         flush_work(&to_tcp_ctrl(ctrl)->err_work);
2333         cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2334 }
2335
2336 static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2337 {
2338         struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2339
2340         if (list_empty(&ctrl->list))
2341                 goto free_ctrl;
2342
2343         mutex_lock(&nvme_tcp_ctrl_mutex);
2344         list_del(&ctrl->list);
2345         mutex_unlock(&nvme_tcp_ctrl_mutex);
2346
2347         nvmf_free_options(nctrl->opts);
2348 free_ctrl:
2349         kfree(ctrl->queues);
2350         kfree(ctrl);
2351 }
2352
2353 static void nvme_tcp_set_sg_null(struct nvme_command *c)
2354 {
2355         struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2356
2357         sg->addr = 0;
2358         sg->length = 0;
2359         sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2360                         NVME_SGL_FMT_TRANSPORT_A;
2361 }
2362
2363 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2364                 struct nvme_command *c, u32 data_len)
2365 {
2366         struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2367
2368         sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2369         sg->length = cpu_to_le32(data_len);
2370         sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2371 }
2372
2373 static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2374                 u32 data_len)
2375 {
2376         struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2377
2378         sg->addr = 0;
2379         sg->length = cpu_to_le32(data_len);
2380         sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2381                         NVME_SGL_FMT_TRANSPORT_A;
2382 }
2383
2384 static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2385 {
2386         struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2387         struct nvme_tcp_queue *queue = &ctrl->queues[0];
2388         struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2389         struct nvme_command *cmd = &pdu->cmd;
2390         u8 hdgst = nvme_tcp_hdgst_len(queue);
2391
2392         memset(pdu, 0, sizeof(*pdu));
2393         pdu->hdr.type = nvme_tcp_cmd;
2394         if (queue->hdr_digest)
2395                 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2396         pdu->hdr.hlen = sizeof(*pdu);
2397         pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2398
2399         cmd->common.opcode = nvme_admin_async_event;
2400         cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2401         cmd->common.flags |= NVME_CMD_SGL_METABUF;
2402         nvme_tcp_set_sg_null(cmd);
2403
2404         ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2405         ctrl->async_req.offset = 0;
2406         ctrl->async_req.curr_bio = NULL;
2407         ctrl->async_req.data_len = 0;
2408
2409         nvme_tcp_queue_request(&ctrl->async_req, true, true);
2410 }
2411
2412 static void nvme_tcp_complete_timed_out(struct request *rq)
2413 {
2414         struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2415         struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2416
2417         nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2418         nvmf_complete_timed_out_request(rq);
2419 }
2420
2421 static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
2422 {
2423         struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2424         struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2425         struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2426         struct nvme_command *cmd = &pdu->cmd;
2427         int qid = nvme_tcp_queue_id(req->queue);
2428
2429         dev_warn(ctrl->device,
2430                  "I/O tag %d (%04x) type %d opcode %#x (%s) QID %d timeout\n",
2431                  rq->tag, nvme_cid(rq), pdu->hdr.type, cmd->common.opcode,
2432                  nvme_fabrics_opcode_str(qid, cmd), qid);
2433
2434         if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) {
2435                 /*
2436                  * If we are resetting, connecting or deleting we should
2437                  * complete immediately because we may block controller
2438                  * teardown or setup sequence
2439                  * - ctrl disable/shutdown fabrics requests
2440                  * - connect requests
2441                  * - initialization admin requests
2442                  * - I/O requests that entered after unquiescing and
2443                  *   the controller stopped responding
2444                  *
2445                  * All other requests should be cancelled by the error
2446                  * recovery work, so it's fine that we fail it here.
2447                  */
2448                 nvme_tcp_complete_timed_out(rq);
2449                 return BLK_EH_DONE;
2450         }
2451
2452         /*
2453          * LIVE state should trigger the normal error recovery which will
2454          * handle completing this request.
2455          */
2456         nvme_tcp_error_recovery(ctrl);
2457         return BLK_EH_RESET_TIMER;
2458 }
2459
2460 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2461                         struct request *rq)
2462 {
2463         struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2464         struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2465         struct nvme_command *c = &pdu->cmd;
2466
2467         c->common.flags |= NVME_CMD_SGL_METABUF;
2468
2469         if (!blk_rq_nr_phys_segments(rq))
2470                 nvme_tcp_set_sg_null(c);
2471         else if (rq_data_dir(rq) == WRITE &&
2472             req->data_len <= nvme_tcp_inline_data_size(req))
2473                 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2474         else
2475                 nvme_tcp_set_sg_host_data(c, req->data_len);
2476
2477         return 0;
2478 }
2479
2480 static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2481                 struct request *rq)
2482 {
2483         struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2484         struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2485         struct nvme_tcp_queue *queue = req->queue;
2486         u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2487         blk_status_t ret;
2488
2489         ret = nvme_setup_cmd(ns, rq);
2490         if (ret)
2491                 return ret;
2492
2493         req->state = NVME_TCP_SEND_CMD_PDU;
2494         req->status = cpu_to_le16(NVME_SC_SUCCESS);
2495         req->offset = 0;
2496         req->data_sent = 0;
2497         req->pdu_len = 0;
2498         req->pdu_sent = 0;
2499         req->h2cdata_left = 0;
2500         req->data_len = blk_rq_nr_phys_segments(rq) ?
2501                                 blk_rq_payload_bytes(rq) : 0;
2502         req->curr_bio = rq->bio;
2503         if (req->curr_bio && req->data_len)
2504                 nvme_tcp_init_iter(req, rq_data_dir(rq));
2505
2506         if (rq_data_dir(rq) == WRITE &&
2507             req->data_len <= nvme_tcp_inline_data_size(req))
2508                 req->pdu_len = req->data_len;
2509
2510         pdu->hdr.type = nvme_tcp_cmd;
2511         pdu->hdr.flags = 0;
2512         if (queue->hdr_digest)
2513                 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2514         if (queue->data_digest && req->pdu_len) {
2515                 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2516                 ddgst = nvme_tcp_ddgst_len(queue);
2517         }
2518         pdu->hdr.hlen = sizeof(*pdu);
2519         pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2520         pdu->hdr.plen =
2521                 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2522
2523         ret = nvme_tcp_map_data(queue, rq);
2524         if (unlikely(ret)) {
2525                 nvme_cleanup_cmd(rq);
2526                 dev_err(queue->ctrl->ctrl.device,
2527                         "Failed to map data (%d)\n", ret);
2528                 return ret;
2529         }
2530
2531         return 0;
2532 }
2533
2534 static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2535 {
2536         struct nvme_tcp_queue *queue = hctx->driver_data;
2537
2538         if (!llist_empty(&queue->req_list))
2539                 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2540 }
2541
2542 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2543                 const struct blk_mq_queue_data *bd)
2544 {
2545         struct nvme_ns *ns = hctx->queue->queuedata;
2546         struct nvme_tcp_queue *queue = hctx->driver_data;
2547         struct request *rq = bd->rq;
2548         struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2549         bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2550         blk_status_t ret;
2551
2552         if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2553                 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2554
2555         ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2556         if (unlikely(ret))
2557                 return ret;
2558
2559         nvme_start_request(rq);
2560
2561         nvme_tcp_queue_request(req, true, bd->last);
2562
2563         return BLK_STS_OK;
2564 }
2565
2566 static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2567 {
2568         struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
2569
2570         nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
2571 }
2572
2573 static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
2574 {
2575         struct nvme_tcp_queue *queue = hctx->driver_data;
2576         struct sock *sk = queue->sock->sk;
2577
2578         if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2579                 return 0;
2580
2581         set_bit(NVME_TCP_Q_POLLING, &queue->flags);
2582         if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
2583                 sk_busy_loop(sk, true);
2584         nvme_tcp_try_recv(queue);
2585         clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
2586         return queue->nr_cqe;
2587 }
2588
2589 static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
2590 {
2591         struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0];
2592         struct sockaddr_storage src_addr;
2593         int ret, len;
2594
2595         len = nvmf_get_address(ctrl, buf, size);
2596
2597         mutex_lock(&queue->queue_lock);
2598
2599         if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2600                 goto done;
2601         ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
2602         if (ret > 0) {
2603                 if (len > 0)
2604                         len--; /* strip trailing newline */
2605                 len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
2606                                 (len) ? "," : "", &src_addr);
2607         }
2608 done:
2609         mutex_unlock(&queue->queue_lock);
2610
2611         return len;
2612 }
2613
2614 static const struct blk_mq_ops nvme_tcp_mq_ops = {
2615         .queue_rq       = nvme_tcp_queue_rq,
2616         .commit_rqs     = nvme_tcp_commit_rqs,
2617         .complete       = nvme_complete_rq,
2618         .init_request   = nvme_tcp_init_request,
2619         .exit_request   = nvme_tcp_exit_request,
2620         .init_hctx      = nvme_tcp_init_hctx,
2621         .timeout        = nvme_tcp_timeout,
2622         .map_queues     = nvme_tcp_map_queues,
2623         .poll           = nvme_tcp_poll,
2624 };
2625
2626 static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2627         .queue_rq       = nvme_tcp_queue_rq,
2628         .complete       = nvme_complete_rq,
2629         .init_request   = nvme_tcp_init_request,
2630         .exit_request   = nvme_tcp_exit_request,
2631         .init_hctx      = nvme_tcp_init_admin_hctx,
2632         .timeout        = nvme_tcp_timeout,
2633 };
2634
2635 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2636         .name                   = "tcp",
2637         .module                 = THIS_MODULE,
2638         .flags                  = NVME_F_FABRICS | NVME_F_BLOCKING,
2639         .reg_read32             = nvmf_reg_read32,
2640         .reg_read64             = nvmf_reg_read64,
2641         .reg_write32            = nvmf_reg_write32,
2642         .free_ctrl              = nvme_tcp_free_ctrl,
2643         .submit_async_event     = nvme_tcp_submit_async_event,
2644         .delete_ctrl            = nvme_tcp_delete_ctrl,
2645         .get_address            = nvme_tcp_get_address,
2646         .stop_ctrl              = nvme_tcp_stop_ctrl,
2647 };
2648
2649 static bool
2650 nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2651 {
2652         struct nvme_tcp_ctrl *ctrl;
2653         bool found = false;
2654
2655         mutex_lock(&nvme_tcp_ctrl_mutex);
2656         list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2657                 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2658                 if (found)
2659                         break;
2660         }
2661         mutex_unlock(&nvme_tcp_ctrl_mutex);
2662
2663         return found;
2664 }
2665
2666 static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2667                 struct nvmf_ctrl_options *opts)
2668 {
2669         struct nvme_tcp_ctrl *ctrl;
2670         int ret;
2671
2672         ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2673         if (!ctrl)
2674                 return ERR_PTR(-ENOMEM);
2675
2676         INIT_LIST_HEAD(&ctrl->list);
2677         ctrl->ctrl.opts = opts;
2678         ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2679                                 opts->nr_poll_queues + 1;
2680         ctrl->ctrl.sqsize = opts->queue_size - 1;
2681         ctrl->ctrl.kato = opts->kato;
2682
2683         INIT_DELAYED_WORK(&ctrl->connect_work,
2684                         nvme_tcp_reconnect_ctrl_work);
2685         INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2686         INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2687
2688         if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2689                 opts->trsvcid =
2690                         kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2691                 if (!opts->trsvcid) {
2692                         ret = -ENOMEM;
2693                         goto out_free_ctrl;
2694                 }
2695                 opts->mask |= NVMF_OPT_TRSVCID;
2696         }
2697
2698         ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2699                         opts->traddr, opts->trsvcid, &ctrl->addr);
2700         if (ret) {
2701                 pr_err("malformed address passed: %s:%s\n",
2702                         opts->traddr, opts->trsvcid);
2703                 goto out_free_ctrl;
2704         }
2705
2706         if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2707                 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2708                         opts->host_traddr, NULL, &ctrl->src_addr);
2709                 if (ret) {
2710                         pr_err("malformed src address passed: %s\n",
2711                                opts->host_traddr);
2712                         goto out_free_ctrl;
2713                 }
2714         }
2715
2716         if (opts->mask & NVMF_OPT_HOST_IFACE) {
2717                 if (!__dev_get_by_name(&init_net, opts->host_iface)) {
2718                         pr_err("invalid interface passed: %s\n",
2719                                opts->host_iface);
2720                         ret = -ENODEV;
2721                         goto out_free_ctrl;
2722                 }
2723         }
2724
2725         if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2726                 ret = -EALREADY;
2727                 goto out_free_ctrl;
2728         }
2729
2730         ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2731                                 GFP_KERNEL);
2732         if (!ctrl->queues) {
2733                 ret = -ENOMEM;
2734                 goto out_free_ctrl;
2735         }
2736
2737         ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2738         if (ret)
2739                 goto out_kfree_queues;
2740
2741         if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2742                 WARN_ON_ONCE(1);
2743                 ret = -EINTR;
2744                 goto out_uninit_ctrl;
2745         }
2746
2747         ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2748         if (ret)
2749                 goto out_uninit_ctrl;
2750
2751         dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp, hostnqn: %s\n",
2752                 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn);
2753
2754         mutex_lock(&nvme_tcp_ctrl_mutex);
2755         list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2756         mutex_unlock(&nvme_tcp_ctrl_mutex);
2757
2758         return &ctrl->ctrl;
2759
2760 out_uninit_ctrl:
2761         nvme_uninit_ctrl(&ctrl->ctrl);
2762         nvme_put_ctrl(&ctrl->ctrl);
2763         if (ret > 0)
2764                 ret = -EIO;
2765         return ERR_PTR(ret);
2766 out_kfree_queues:
2767         kfree(ctrl->queues);
2768 out_free_ctrl:
2769         kfree(ctrl);
2770         return ERR_PTR(ret);
2771 }
2772
2773 static struct nvmf_transport_ops nvme_tcp_transport = {
2774         .name           = "tcp",
2775         .module         = THIS_MODULE,
2776         .required_opts  = NVMF_OPT_TRADDR,
2777         .allowed_opts   = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2778                           NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2779                           NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2780                           NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2781                           NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE | NVMF_OPT_TLS |
2782                           NVMF_OPT_KEYRING | NVMF_OPT_TLS_KEY,
2783         .create_ctrl    = nvme_tcp_create_ctrl,
2784 };
2785
2786 static int __init nvme_tcp_init_module(void)
2787 {
2788         BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
2789         BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
2790         BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24);
2791         BUILD_BUG_ON(sizeof(struct nvme_tcp_rsp_pdu) != 24);
2792         BUILD_BUG_ON(sizeof(struct nvme_tcp_r2t_pdu) != 24);
2793         BUILD_BUG_ON(sizeof(struct nvme_tcp_icreq_pdu) != 128);
2794         BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
2795         BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);
2796
2797         nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2798                         WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2799         if (!nvme_tcp_wq)
2800                 return -ENOMEM;
2801
2802         nvmf_register_transport(&nvme_tcp_transport);
2803         return 0;
2804 }
2805
2806 static void __exit nvme_tcp_cleanup_module(void)
2807 {
2808         struct nvme_tcp_ctrl *ctrl;
2809
2810         nvmf_unregister_transport(&nvme_tcp_transport);
2811
2812         mutex_lock(&nvme_tcp_ctrl_mutex);
2813         list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2814                 nvme_delete_ctrl(&ctrl->ctrl);
2815         mutex_unlock(&nvme_tcp_ctrl_mutex);
2816         flush_workqueue(nvme_delete_wq);
2817
2818         destroy_workqueue(nvme_tcp_wq);
2819 }
2820
2821 module_init(nvme_tcp_init_module);
2822 module_exit(nvme_tcp_cleanup_module);
2823
2824 MODULE_DESCRIPTION("NVMe host TCP transport driver");
2825 MODULE_LICENSE("GPL v2");