mptcp: corner case locking for rx path fields initialization
[sfrench/cifs-2.6.git] / net / mptcp / subflow.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2017 - 2019, Intel Corporation.
5  */
6
7 #define pr_fmt(fmt) "MPTCP: " fmt
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <crypto/sha2.h>
13 #include <crypto/utils.h>
14 #include <net/sock.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
18 #include <net/tcp.h>
19 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
20 #include <net/ip6_route.h>
21 #include <net/transp_v6.h>
22 #endif
23 #include <net/mptcp.h>
24 #include <uapi/linux/mptcp.h>
25 #include "protocol.h"
26 #include "mib.h"
27
28 #include <trace/events/mptcp.h>
29 #include <trace/events/sock.h>
30
31 static void mptcp_subflow_ops_undo_override(struct sock *ssk);
32
33 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
34                                   enum linux_mptcp_mib_field field)
35 {
36         MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
37 }
38
39 static void subflow_req_destructor(struct request_sock *req)
40 {
41         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
42
43         pr_debug("subflow_req=%p", subflow_req);
44
45         if (subflow_req->msk)
46                 sock_put((struct sock *)subflow_req->msk);
47
48         mptcp_token_destroy_request(req);
49 }
50
51 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
52                                   void *hmac)
53 {
54         u8 msg[8];
55
56         put_unaligned_be32(nonce1, &msg[0]);
57         put_unaligned_be32(nonce2, &msg[4]);
58
59         mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
60 }
61
62 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
63 {
64         return mptcp_is_fully_established((void *)msk) &&
65                 ((mptcp_pm_is_userspace(msk) &&
66                   mptcp_userspace_pm_active(msk)) ||
67                  READ_ONCE(msk->pm.accept_subflow));
68 }
69
70 /* validate received token and create truncated hmac and nonce for SYN-ACK */
71 static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req)
72 {
73         struct mptcp_sock *msk = subflow_req->msk;
74         u8 hmac[SHA256_DIGEST_SIZE];
75
76         get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
77
78         subflow_generate_hmac(msk->local_key, msk->remote_key,
79                               subflow_req->local_nonce,
80                               subflow_req->remote_nonce, hmac);
81
82         subflow_req->thmac = get_unaligned_be64(hmac);
83 }
84
85 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
86 {
87         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
88         struct mptcp_sock *msk;
89         int local_id;
90
91         msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
92         if (!msk) {
93                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
94                 return NULL;
95         }
96
97         local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
98         if (local_id < 0) {
99                 sock_put((struct sock *)msk);
100                 return NULL;
101         }
102         subflow_req->local_id = local_id;
103
104         return msk;
105 }
106
107 static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
108 {
109         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
110
111         subflow_req->mp_capable = 0;
112         subflow_req->mp_join = 0;
113         subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener));
114         subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener));
115         subflow_req->msk = NULL;
116         mptcp_token_init_request(req);
117 }
118
119 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
120 {
121         return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
122 }
123
124 static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason)
125 {
126         struct mptcp_ext *mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
127
128         if (mpext) {
129                 memset(mpext, 0, sizeof(*mpext));
130                 mpext->reset_reason = reason;
131         }
132 }
133
134 /* Init mptcp request socket.
135  *
136  * Returns an error code if a JOIN has failed and a TCP reset
137  * should be sent.
138  */
139 static int subflow_check_req(struct request_sock *req,
140                              const struct sock *sk_listener,
141                              struct sk_buff *skb)
142 {
143         struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
144         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
145         struct mptcp_options_received mp_opt;
146         bool opt_mp_capable, opt_mp_join;
147
148         pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
149
150 #ifdef CONFIG_TCP_MD5SIG
151         /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
152          * TCP option space.
153          */
154         if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
155                 return -EINVAL;
156 #endif
157
158         mptcp_get_options(skb, &mp_opt);
159
160         opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYN);
161         opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYN);
162         if (opt_mp_capable) {
163                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
164
165                 if (opt_mp_join)
166                         return 0;
167         } else if (opt_mp_join) {
168                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
169         }
170
171         if (opt_mp_capable && listener->request_mptcp) {
172                 int err, retries = MPTCP_TOKEN_MAX_RETRIES;
173
174                 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
175 again:
176                 do {
177                         get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key));
178                 } while (subflow_req->local_key == 0);
179
180                 if (unlikely(req->syncookie)) {
181                         mptcp_crypto_key_sha(subflow_req->local_key,
182                                              &subflow_req->token,
183                                              &subflow_req->idsn);
184                         if (mptcp_token_exists(subflow_req->token)) {
185                                 if (retries-- > 0)
186                                         goto again;
187                                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
188                         } else {
189                                 subflow_req->mp_capable = 1;
190                         }
191                         return 0;
192                 }
193
194                 err = mptcp_token_new_request(req);
195                 if (err == 0)
196                         subflow_req->mp_capable = 1;
197                 else if (retries-- > 0)
198                         goto again;
199                 else
200                         SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
201
202         } else if (opt_mp_join && listener->request_mptcp) {
203                 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
204                 subflow_req->mp_join = 1;
205                 subflow_req->backup = mp_opt.backup;
206                 subflow_req->remote_id = mp_opt.join_id;
207                 subflow_req->token = mp_opt.token;
208                 subflow_req->remote_nonce = mp_opt.nonce;
209                 subflow_req->msk = subflow_token_join_request(req);
210
211                 /* Can't fall back to TCP in this case. */
212                 if (!subflow_req->msk) {
213                         subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
214                         return -EPERM;
215                 }
216
217                 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
218                         pr_debug("syn inet_sport=%d %d",
219                                  ntohs(inet_sk(sk_listener)->inet_sport),
220                                  ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
221                         if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
222                                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX);
223                                 return -EPERM;
224                         }
225                         SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX);
226                 }
227
228                 subflow_req_create_thmac(subflow_req);
229
230                 if (unlikely(req->syncookie)) {
231                         if (mptcp_can_accept_new_subflow(subflow_req->msk))
232                                 subflow_init_req_cookie_join_save(subflow_req, skb);
233                         else
234                                 return -EPERM;
235                 }
236
237                 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
238                          subflow_req->remote_nonce, subflow_req->msk);
239         }
240
241         return 0;
242 }
243
244 int mptcp_subflow_init_cookie_req(struct request_sock *req,
245                                   const struct sock *sk_listener,
246                                   struct sk_buff *skb)
247 {
248         struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
249         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
250         struct mptcp_options_received mp_opt;
251         bool opt_mp_capable, opt_mp_join;
252         int err;
253
254         subflow_init_req(req, sk_listener);
255         mptcp_get_options(skb, &mp_opt);
256
257         opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_ACK);
258         opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK);
259         if (opt_mp_capable && opt_mp_join)
260                 return -EINVAL;
261
262         if (opt_mp_capable && listener->request_mptcp) {
263                 if (mp_opt.sndr_key == 0)
264                         return -EINVAL;
265
266                 subflow_req->local_key = mp_opt.rcvr_key;
267                 err = mptcp_token_new_request(req);
268                 if (err)
269                         return err;
270
271                 subflow_req->mp_capable = 1;
272                 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
273         } else if (opt_mp_join && listener->request_mptcp) {
274                 if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
275                         return -EINVAL;
276
277                 subflow_req->mp_join = 1;
278                 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
279         }
280
281         return 0;
282 }
283 EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
284
285 static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
286                                               struct sk_buff *skb,
287                                               struct flowi *fl,
288                                               struct request_sock *req)
289 {
290         struct dst_entry *dst;
291         int err;
292
293         tcp_rsk(req)->is_mptcp = 1;
294         subflow_init_req(req, sk);
295
296         dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req);
297         if (!dst)
298                 return NULL;
299
300         err = subflow_check_req(req, sk, skb);
301         if (err == 0)
302                 return dst;
303
304         dst_release(dst);
305         if (!req->syncookie)
306                 tcp_request_sock_ops.send_reset(sk, skb);
307         return NULL;
308 }
309
310 static void subflow_prep_synack(const struct sock *sk, struct request_sock *req,
311                                 struct tcp_fastopen_cookie *foc,
312                                 enum tcp_synack_type synack_type)
313 {
314         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
315         struct inet_request_sock *ireq = inet_rsk(req);
316
317         /* clear tstamp_ok, as needed depending on cookie */
318         if (foc && foc->len > -1)
319                 ireq->tstamp_ok = 0;
320
321         if (synack_type == TCP_SYNACK_FASTOPEN)
322                 mptcp_fastopen_subflow_synack_set_params(subflow, req);
323 }
324
325 static int subflow_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
326                                   struct flowi *fl,
327                                   struct request_sock *req,
328                                   struct tcp_fastopen_cookie *foc,
329                                   enum tcp_synack_type synack_type,
330                                   struct sk_buff *syn_skb)
331 {
332         subflow_prep_synack(sk, req, foc, synack_type);
333
334         return tcp_request_sock_ipv4_ops.send_synack(sk, dst, fl, req, foc,
335                                                      synack_type, syn_skb);
336 }
337
338 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
339 static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
340                                   struct flowi *fl,
341                                   struct request_sock *req,
342                                   struct tcp_fastopen_cookie *foc,
343                                   enum tcp_synack_type synack_type,
344                                   struct sk_buff *syn_skb)
345 {
346         subflow_prep_synack(sk, req, foc, synack_type);
347
348         return tcp_request_sock_ipv6_ops.send_synack(sk, dst, fl, req, foc,
349                                                      synack_type, syn_skb);
350 }
351
352 static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
353                                               struct sk_buff *skb,
354                                               struct flowi *fl,
355                                               struct request_sock *req)
356 {
357         struct dst_entry *dst;
358         int err;
359
360         tcp_rsk(req)->is_mptcp = 1;
361         subflow_init_req(req, sk);
362
363         dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req);
364         if (!dst)
365                 return NULL;
366
367         err = subflow_check_req(req, sk, skb);
368         if (err == 0)
369                 return dst;
370
371         dst_release(dst);
372         if (!req->syncookie)
373                 tcp6_request_sock_ops.send_reset(sk, skb);
374         return NULL;
375 }
376 #endif
377
378 /* validate received truncated hmac and create hmac for third ACK */
379 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
380 {
381         u8 hmac[SHA256_DIGEST_SIZE];
382         u64 thmac;
383
384         subflow_generate_hmac(subflow->remote_key, subflow->local_key,
385                               subflow->remote_nonce, subflow->local_nonce,
386                               hmac);
387
388         thmac = get_unaligned_be64(hmac);
389         pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
390                  subflow, subflow->token, thmac, subflow->thmac);
391
392         return thmac == subflow->thmac;
393 }
394
395 void mptcp_subflow_reset(struct sock *ssk)
396 {
397         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
398         struct sock *sk = subflow->conn;
399
400         /* mptcp_mp_fail_no_response() can reach here on an already closed
401          * socket
402          */
403         if (ssk->sk_state == TCP_CLOSE)
404                 return;
405
406         /* must hold: tcp_done() could drop last reference on parent */
407         sock_hold(sk);
408
409         tcp_send_active_reset(ssk, GFP_ATOMIC);
410         tcp_done(ssk);
411         if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags))
412                 mptcp_schedule_work(sk);
413
414         sock_put(sk);
415 }
416
417 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk)
418 {
419         return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
420 }
421
422 void __mptcp_sync_state(struct sock *sk, int state)
423 {
424         struct mptcp_subflow_context *subflow;
425         struct mptcp_sock *msk = mptcp_sk(sk);
426         struct sock *ssk = msk->first;
427
428         subflow = mptcp_subflow_ctx(ssk);
429         __mptcp_propagate_sndbuf(sk, ssk);
430         if (!msk->rcvspace_init)
431                 mptcp_rcv_space_init(msk, ssk);
432
433         if (sk->sk_state == TCP_SYN_SENT) {
434                 /* subflow->idsn is always available is TCP_SYN_SENT state,
435                  * even for the FASTOPEN scenarios
436                  */
437                 WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
438                 WRITE_ONCE(msk->snd_nxt, msk->write_seq);
439                 mptcp_set_state(sk, state);
440                 sk->sk_state_change(sk);
441         }
442 }
443
444 static void subflow_set_remote_key(struct mptcp_sock *msk,
445                                    struct mptcp_subflow_context *subflow,
446                                    const struct mptcp_options_received *mp_opt)
447 {
448         /* active MPC subflow will reach here multiple times:
449          * at subflow_finish_connect() time and at 4th ack time
450          */
451         if (subflow->remote_key_valid)
452                 return;
453
454         subflow->remote_key_valid = 1;
455         subflow->remote_key = mp_opt->sndr_key;
456         mptcp_crypto_key_sha(subflow->remote_key, NULL, &subflow->iasn);
457         subflow->iasn++;
458
459         WRITE_ONCE(msk->remote_key, subflow->remote_key);
460         WRITE_ONCE(msk->ack_seq, subflow->iasn);
461         WRITE_ONCE(msk->can_ack, true);
462         atomic64_set(&msk->rcv_wnd_sent, subflow->iasn);
463 }
464
465 static void mptcp_propagate_state(struct sock *sk, struct sock *ssk,
466                                   struct mptcp_subflow_context *subflow,
467                                   const struct mptcp_options_received *mp_opt)
468 {
469         struct mptcp_sock *msk = mptcp_sk(sk);
470
471         mptcp_data_lock(sk);
472         if (mp_opt) {
473                 /* Options are available only in the non fallback cases
474                  * avoid updating rx path fields otherwise
475                  */
476                 WRITE_ONCE(msk->snd_una, subflow->idsn + 1);
477                 WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd);
478                 subflow_set_remote_key(msk, subflow, mp_opt);
479         }
480
481         if (!sock_owned_by_user(sk)) {
482                 __mptcp_sync_state(sk, ssk->sk_state);
483         } else {
484                 msk->pending_state = ssk->sk_state;
485                 __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags);
486         }
487         mptcp_data_unlock(sk);
488 }
489
490 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
491 {
492         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
493         struct mptcp_options_received mp_opt;
494         struct sock *parent = subflow->conn;
495         struct mptcp_sock *msk;
496
497         subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
498
499         /* be sure no special action on any packet other than syn-ack */
500         if (subflow->conn_finished)
501                 return;
502
503         msk = mptcp_sk(parent);
504         subflow->rel_write_seq = 1;
505         subflow->conn_finished = 1;
506         subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
507         pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
508
509         mptcp_get_options(skb, &mp_opt);
510         if (subflow->request_mptcp) {
511                 if (!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYNACK)) {
512                         MPTCP_INC_STATS(sock_net(sk),
513                                         MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
514                         mptcp_do_fallback(sk);
515                         pr_fallback(msk);
516                         goto fallback;
517                 }
518
519                 if (mp_opt.suboptions & OPTION_MPTCP_CSUMREQD)
520                         WRITE_ONCE(msk->csum_enabled, true);
521                 if (mp_opt.deny_join_id0)
522                         WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
523                 subflow->mp_capable = 1;
524                 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
525                 mptcp_finish_connect(sk);
526                 mptcp_propagate_state(parent, sk, subflow, &mp_opt);
527         } else if (subflow->request_join) {
528                 u8 hmac[SHA256_DIGEST_SIZE];
529
530                 if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYNACK)) {
531                         subflow->reset_reason = MPTCP_RST_EMPTCP;
532                         goto do_reset;
533                 }
534
535                 subflow->backup = mp_opt.backup;
536                 subflow->thmac = mp_opt.thmac;
537                 subflow->remote_nonce = mp_opt.nonce;
538                 subflow->remote_id = mp_opt.join_id;
539                 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
540                          subflow, subflow->thmac, subflow->remote_nonce,
541                          subflow->backup);
542
543                 if (!subflow_thmac_valid(subflow)) {
544                         MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
545                         subflow->reset_reason = MPTCP_RST_EMPTCP;
546                         goto do_reset;
547                 }
548
549                 if (!mptcp_finish_join(sk))
550                         goto do_reset;
551
552                 subflow_generate_hmac(subflow->local_key, subflow->remote_key,
553                                       subflow->local_nonce,
554                                       subflow->remote_nonce,
555                                       hmac);
556                 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
557
558                 subflow->mp_join = 1;
559                 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
560
561                 if (subflow_use_different_dport(msk, sk)) {
562                         pr_debug("synack inet_dport=%d %d",
563                                  ntohs(inet_sk(sk)->inet_dport),
564                                  ntohs(inet_sk(parent)->inet_dport));
565                         MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
566                 }
567         } else if (mptcp_check_fallback(sk)) {
568 fallback:
569                 mptcp_propagate_state(parent, sk, subflow, NULL);
570         }
571         return;
572
573 do_reset:
574         subflow->reset_transient = 0;
575         mptcp_subflow_reset(sk);
576 }
577
578 static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id)
579 {
580         subflow->local_id = local_id;
581         subflow->local_id_valid = 1;
582 }
583
584 static int subflow_chk_local_id(struct sock *sk)
585 {
586         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
587         struct mptcp_sock *msk = mptcp_sk(subflow->conn);
588         int err;
589
590         if (likely(subflow->local_id_valid))
591                 return 0;
592
593         err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
594         if (err < 0)
595                 return err;
596
597         subflow_set_local_id(subflow, err);
598         return 0;
599 }
600
601 static int subflow_rebuild_header(struct sock *sk)
602 {
603         int err = subflow_chk_local_id(sk);
604
605         if (unlikely(err < 0))
606                 return err;
607
608         return inet_sk_rebuild_header(sk);
609 }
610
611 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
612 static int subflow_v6_rebuild_header(struct sock *sk)
613 {
614         int err = subflow_chk_local_id(sk);
615
616         if (unlikely(err < 0))
617                 return err;
618
619         return inet6_sk_rebuild_header(sk);
620 }
621 #endif
622
623 static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init;
624 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init;
625
626 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
627 {
628         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
629
630         pr_debug("subflow=%p", subflow);
631
632         /* Never answer to SYNs sent to broadcast or multicast */
633         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
634                 goto drop;
635
636         return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops,
637                                 &subflow_request_sock_ipv4_ops,
638                                 sk, skb);
639 drop:
640         tcp_listendrop(sk);
641         return 0;
642 }
643
644 static void subflow_v4_req_destructor(struct request_sock *req)
645 {
646         subflow_req_destructor(req);
647         tcp_request_sock_ops.destructor(req);
648 }
649
650 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
651 static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init;
652 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
653 static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
654 static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
655 static struct proto tcpv6_prot_override __ro_after_init;
656
657 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
658 {
659         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
660
661         pr_debug("subflow=%p", subflow);
662
663         if (skb->protocol == htons(ETH_P_IP))
664                 return subflow_v4_conn_request(sk, skb);
665
666         if (!ipv6_unicast_destination(skb))
667                 goto drop;
668
669         if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
670                 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
671                 return 0;
672         }
673
674         return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops,
675                                 &subflow_request_sock_ipv6_ops, sk, skb);
676
677 drop:
678         tcp_listendrop(sk);
679         return 0; /* don't send reset */
680 }
681
682 static void subflow_v6_req_destructor(struct request_sock *req)
683 {
684         subflow_req_destructor(req);
685         tcp6_request_sock_ops.destructor(req);
686 }
687 #endif
688
689 struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
690                                                struct sock *sk_listener,
691                                                bool attach_listener)
692 {
693         if (ops->family == AF_INET)
694                 ops = &mptcp_subflow_v4_request_sock_ops;
695 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
696         else if (ops->family == AF_INET6)
697                 ops = &mptcp_subflow_v6_request_sock_ops;
698 #endif
699
700         return inet_reqsk_alloc(ops, sk_listener, attach_listener);
701 }
702 EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc);
703
704 /* validate hmac received in third ACK */
705 static bool subflow_hmac_valid(const struct request_sock *req,
706                                const struct mptcp_options_received *mp_opt)
707 {
708         const struct mptcp_subflow_request_sock *subflow_req;
709         u8 hmac[SHA256_DIGEST_SIZE];
710         struct mptcp_sock *msk;
711
712         subflow_req = mptcp_subflow_rsk(req);
713         msk = subflow_req->msk;
714         if (!msk)
715                 return false;
716
717         subflow_generate_hmac(msk->remote_key, msk->local_key,
718                               subflow_req->remote_nonce,
719                               subflow_req->local_nonce, hmac);
720
721         return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
722 }
723
724 static void subflow_ulp_fallback(struct sock *sk,
725                                  struct mptcp_subflow_context *old_ctx)
726 {
727         struct inet_connection_sock *icsk = inet_csk(sk);
728
729         mptcp_subflow_tcp_fallback(sk, old_ctx);
730         icsk->icsk_ulp_ops = NULL;
731         rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
732         tcp_sk(sk)->is_mptcp = 0;
733
734         mptcp_subflow_ops_undo_override(sk);
735 }
736
737 void mptcp_subflow_drop_ctx(struct sock *ssk)
738 {
739         struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
740
741         if (!ctx)
742                 return;
743
744         list_del(&mptcp_subflow_ctx(ssk)->node);
745         if (inet_csk(ssk)->icsk_ulp_ops) {
746                 subflow_ulp_fallback(ssk, ctx);
747                 if (ctx->conn)
748                         sock_put(ctx->conn);
749         }
750
751         kfree_rcu(ctx, rcu);
752 }
753
754 void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
755                                        struct mptcp_subflow_context *subflow,
756                                        const struct mptcp_options_received *mp_opt)
757 {
758         subflow_set_remote_key(msk, subflow, mp_opt);
759         subflow->fully_established = 1;
760         WRITE_ONCE(msk->fully_established, true);
761
762         if (subflow->is_mptfo)
763                 __mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt);
764 }
765
766 static struct sock *subflow_syn_recv_sock(const struct sock *sk,
767                                           struct sk_buff *skb,
768                                           struct request_sock *req,
769                                           struct dst_entry *dst,
770                                           struct request_sock *req_unhash,
771                                           bool *own_req)
772 {
773         struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
774         struct mptcp_subflow_request_sock *subflow_req;
775         struct mptcp_options_received mp_opt;
776         bool fallback, fallback_is_fatal;
777         struct mptcp_sock *owner;
778         struct sock *child;
779
780         pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
781
782         /* After child creation we must look for MPC even when options
783          * are not parsed
784          */
785         mp_opt.suboptions = 0;
786
787         /* hopefully temporary handling for MP_JOIN+syncookie */
788         subflow_req = mptcp_subflow_rsk(req);
789         fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
790         fallback = !tcp_rsk(req)->is_mptcp;
791         if (fallback)
792                 goto create_child;
793
794         /* if the sk is MP_CAPABLE, we try to fetch the client key */
795         if (subflow_req->mp_capable) {
796                 /* we can receive and accept an in-window, out-of-order pkt,
797                  * which may not carry the MP_CAPABLE opt even on mptcp enabled
798                  * paths: always try to extract the peer key, and fallback
799                  * for packets missing it.
800                  * Even OoO DSS packets coming legitly after dropped or
801                  * reordered MPC will cause fallback, but we don't have other
802                  * options.
803                  */
804                 mptcp_get_options(skb, &mp_opt);
805                 if (!(mp_opt.suboptions &
806                       (OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_ACK)))
807                         fallback = true;
808
809         } else if (subflow_req->mp_join) {
810                 mptcp_get_options(skb, &mp_opt);
811                 if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) ||
812                     !subflow_hmac_valid(req, &mp_opt) ||
813                     !mptcp_can_accept_new_subflow(subflow_req->msk)) {
814                         SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
815                         fallback = true;
816                 }
817         }
818
819 create_child:
820         child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
821                                                      req_unhash, own_req);
822
823         if (child && *own_req) {
824                 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
825
826                 tcp_rsk(req)->drop_req = false;
827
828                 /* we need to fallback on ctx allocation failure and on pre-reqs
829                  * checking above. In the latter scenario we additionally need
830                  * to reset the context to non MPTCP status.
831                  */
832                 if (!ctx || fallback) {
833                         if (fallback_is_fatal) {
834                                 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
835                                 goto dispose_child;
836                         }
837                         goto fallback;
838                 }
839
840                 /* ssk inherits options of listener sk */
841                 ctx->setsockopt_seq = listener->setsockopt_seq;
842
843                 if (ctx->mp_capable) {
844                         ctx->conn = mptcp_sk_clone_init(listener->conn, &mp_opt, child, req);
845                         if (!ctx->conn)
846                                 goto fallback;
847
848                         ctx->subflow_id = 1;
849                         owner = mptcp_sk(ctx->conn);
850                         mptcp_pm_new_connection(owner, child, 1);
851
852                         /* with OoO packets we can reach here without ingress
853                          * mpc option
854                          */
855                         if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) {
856                                 mptcp_pm_fully_established(owner, child);
857                                 ctx->pm_notified = 1;
858                         }
859                 } else if (ctx->mp_join) {
860                         owner = subflow_req->msk;
861                         if (!owner) {
862                                 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
863                                 goto dispose_child;
864                         }
865
866                         /* move the msk reference ownership to the subflow */
867                         subflow_req->msk = NULL;
868                         ctx->conn = (struct sock *)owner;
869
870                         if (subflow_use_different_sport(owner, sk)) {
871                                 pr_debug("ack inet_sport=%d %d",
872                                          ntohs(inet_sk(sk)->inet_sport),
873                                          ntohs(inet_sk((struct sock *)owner)->inet_sport));
874                                 if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
875                                         SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX);
876                                         goto dispose_child;
877                                 }
878                                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX);
879                         }
880
881                         if (!mptcp_finish_join(child))
882                                 goto dispose_child;
883
884                         SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
885                         tcp_rsk(req)->drop_req = true;
886                 }
887         }
888
889         /* check for expected invariant - should never trigger, just help
890          * catching eariler subtle bugs
891          */
892         WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
893                      (!mptcp_subflow_ctx(child) ||
894                       !mptcp_subflow_ctx(child)->conn));
895         return child;
896
897 dispose_child:
898         mptcp_subflow_drop_ctx(child);
899         tcp_rsk(req)->drop_req = true;
900         inet_csk_prepare_for_destroy_sock(child);
901         tcp_done(child);
902         req->rsk_ops->send_reset(sk, skb);
903
904         /* The last child reference will be released by the caller */
905         return child;
906
907 fallback:
908         mptcp_subflow_drop_ctx(child);
909         return child;
910 }
911
912 static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
913 static struct proto tcp_prot_override __ro_after_init;
914
915 enum mapping_status {
916         MAPPING_OK,
917         MAPPING_INVALID,
918         MAPPING_EMPTY,
919         MAPPING_DATA_FIN,
920         MAPPING_DUMMY,
921         MAPPING_BAD_CSUM
922 };
923
924 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
925 {
926         pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
927                  ssn, subflow->map_subflow_seq, subflow->map_data_len);
928 }
929
930 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
931 {
932         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
933         unsigned int skb_consumed;
934
935         skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
936         if (WARN_ON_ONCE(skb_consumed >= skb->len))
937                 return true;
938
939         return skb->len - skb_consumed <= subflow->map_data_len -
940                                           mptcp_subflow_get_map_offset(subflow);
941 }
942
943 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
944 {
945         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
946         u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
947
948         if (unlikely(before(ssn, subflow->map_subflow_seq))) {
949                 /* Mapping covers data later in the subflow stream,
950                  * currently unsupported.
951                  */
952                 dbg_bad_map(subflow, ssn);
953                 return false;
954         }
955         if (unlikely(!before(ssn, subflow->map_subflow_seq +
956                                   subflow->map_data_len))) {
957                 /* Mapping does covers past subflow data, invalid */
958                 dbg_bad_map(subflow, ssn);
959                 return false;
960         }
961         return true;
962 }
963
964 static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb,
965                                               bool csum_reqd)
966 {
967         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
968         u32 offset, seq, delta;
969         __sum16 csum;
970         int len;
971
972         if (!csum_reqd)
973                 return MAPPING_OK;
974
975         /* mapping already validated on previous traversal */
976         if (subflow->map_csum_len == subflow->map_data_len)
977                 return MAPPING_OK;
978
979         /* traverse the receive queue, ensuring it contains a full
980          * DSS mapping and accumulating the related csum.
981          * Preserve the accoumlate csum across multiple calls, to compute
982          * the csum only once
983          */
984         delta = subflow->map_data_len - subflow->map_csum_len;
985         for (;;) {
986                 seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len;
987                 offset = seq - TCP_SKB_CB(skb)->seq;
988
989                 /* if the current skb has not been accounted yet, csum its contents
990                  * up to the amount covered by the current DSS
991                  */
992                 if (offset < skb->len) {
993                         __wsum csum;
994
995                         len = min(skb->len - offset, delta);
996                         csum = skb_checksum(skb, offset, len, 0);
997                         subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum,
998                                                                 subflow->map_csum_len);
999
1000                         delta -= len;
1001                         subflow->map_csum_len += len;
1002                 }
1003                 if (delta == 0)
1004                         break;
1005
1006                 if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) {
1007                         /* if this subflow is closed, the partial mapping
1008                          * will be never completed; flush the pending skbs, so
1009                          * that subflow_sched_work_if_closed() can kick in
1010                          */
1011                         if (unlikely(ssk->sk_state == TCP_CLOSE))
1012                                 while ((skb = skb_peek(&ssk->sk_receive_queue)))
1013                                         sk_eat_skb(ssk, skb);
1014
1015                         /* not enough data to validate the csum */
1016                         return MAPPING_EMPTY;
1017                 }
1018
1019                 /* the DSS mapping for next skbs will be validated later,
1020                  * when a get_mapping_status call will process such skb
1021                  */
1022                 skb = skb->next;
1023         }
1024
1025         /* note that 'map_data_len' accounts only for the carried data, does
1026          * not include the eventual seq increment due to the data fin,
1027          * while the pseudo header requires the original DSS data len,
1028          * including that
1029          */
1030         csum = __mptcp_make_csum(subflow->map_seq,
1031                                  subflow->map_subflow_seq,
1032                                  subflow->map_data_len + subflow->map_data_fin,
1033                                  subflow->map_data_csum);
1034         if (unlikely(csum)) {
1035                 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
1036                 return MAPPING_BAD_CSUM;
1037         }
1038
1039         subflow->valid_csum_seen = 1;
1040         return MAPPING_OK;
1041 }
1042
1043 static enum mapping_status get_mapping_status(struct sock *ssk,
1044                                               struct mptcp_sock *msk)
1045 {
1046         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1047         bool csum_reqd = READ_ONCE(msk->csum_enabled);
1048         struct mptcp_ext *mpext;
1049         struct sk_buff *skb;
1050         u16 data_len;
1051         u64 map_seq;
1052
1053         skb = skb_peek(&ssk->sk_receive_queue);
1054         if (!skb)
1055                 return MAPPING_EMPTY;
1056
1057         if (mptcp_check_fallback(ssk))
1058                 return MAPPING_DUMMY;
1059
1060         mpext = mptcp_get_ext(skb);
1061         if (!mpext || !mpext->use_map) {
1062                 if (!subflow->map_valid && !skb->len) {
1063                         /* the TCP stack deliver 0 len FIN pkt to the receive
1064                          * queue, that is the only 0len pkts ever expected here,
1065                          * and we can admit no mapping only for 0 len pkts
1066                          */
1067                         if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1068                                 WARN_ONCE(1, "0len seq %d:%d flags %x",
1069                                           TCP_SKB_CB(skb)->seq,
1070                                           TCP_SKB_CB(skb)->end_seq,
1071                                           TCP_SKB_CB(skb)->tcp_flags);
1072                         sk_eat_skb(ssk, skb);
1073                         return MAPPING_EMPTY;
1074                 }
1075
1076                 if (!subflow->map_valid)
1077                         return MAPPING_INVALID;
1078
1079                 goto validate_seq;
1080         }
1081
1082         trace_get_mapping_status(mpext);
1083
1084         data_len = mpext->data_len;
1085         if (data_len == 0) {
1086                 pr_debug("infinite mapping received");
1087                 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
1088                 subflow->map_data_len = 0;
1089                 return MAPPING_INVALID;
1090         }
1091
1092         if (mpext->data_fin == 1) {
1093                 if (data_len == 1) {
1094                         bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
1095                                                                  mpext->dsn64);
1096                         pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
1097                         if (subflow->map_valid) {
1098                                 /* A DATA_FIN might arrive in a DSS
1099                                  * option before the previous mapping
1100                                  * has been fully consumed. Continue
1101                                  * handling the existing mapping.
1102                                  */
1103                                 skb_ext_del(skb, SKB_EXT_MPTCP);
1104                                 return MAPPING_OK;
1105                         } else {
1106                                 if (updated)
1107                                         mptcp_schedule_work((struct sock *)msk);
1108
1109                                 return MAPPING_DATA_FIN;
1110                         }
1111                 } else {
1112                         u64 data_fin_seq = mpext->data_seq + data_len - 1;
1113
1114                         /* If mpext->data_seq is a 32-bit value, data_fin_seq
1115                          * must also be limited to 32 bits.
1116                          */
1117                         if (!mpext->dsn64)
1118                                 data_fin_seq &= GENMASK_ULL(31, 0);
1119
1120                         mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
1121                         pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
1122                                  data_fin_seq, mpext->dsn64);
1123                 }
1124
1125                 /* Adjust for DATA_FIN using 1 byte of sequence space */
1126                 data_len--;
1127         }
1128
1129         map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64);
1130         WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
1131
1132         if (subflow->map_valid) {
1133                 /* Allow replacing only with an identical map */
1134                 if (subflow->map_seq == map_seq &&
1135                     subflow->map_subflow_seq == mpext->subflow_seq &&
1136                     subflow->map_data_len == data_len &&
1137                     subflow->map_csum_reqd == mpext->csum_reqd) {
1138                         skb_ext_del(skb, SKB_EXT_MPTCP);
1139                         goto validate_csum;
1140                 }
1141
1142                 /* If this skb data are fully covered by the current mapping,
1143                  * the new map would need caching, which is not supported
1144                  */
1145                 if (skb_is_fully_mapped(ssk, skb)) {
1146                         MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
1147                         return MAPPING_INVALID;
1148                 }
1149
1150                 /* will validate the next map after consuming the current one */
1151                 goto validate_csum;
1152         }
1153
1154         subflow->map_seq = map_seq;
1155         subflow->map_subflow_seq = mpext->subflow_seq;
1156         subflow->map_data_len = data_len;
1157         subflow->map_valid = 1;
1158         subflow->map_data_fin = mpext->data_fin;
1159         subflow->mpc_map = mpext->mpc_map;
1160         subflow->map_csum_reqd = mpext->csum_reqd;
1161         subflow->map_csum_len = 0;
1162         subflow->map_data_csum = csum_unfold(mpext->csum);
1163
1164         /* Cfr RFC 8684 Section 3.3.0 */
1165         if (unlikely(subflow->map_csum_reqd != csum_reqd))
1166                 return MAPPING_INVALID;
1167
1168         pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
1169                  subflow->map_seq, subflow->map_subflow_seq,
1170                  subflow->map_data_len, subflow->map_csum_reqd,
1171                  subflow->map_data_csum);
1172
1173 validate_seq:
1174         /* we revalidate valid mapping on new skb, because we must ensure
1175          * the current skb is completely covered by the available mapping
1176          */
1177         if (!validate_mapping(ssk, skb)) {
1178                 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH);
1179                 return MAPPING_INVALID;
1180         }
1181
1182         skb_ext_del(skb, SKB_EXT_MPTCP);
1183
1184 validate_csum:
1185         return validate_data_csum(ssk, skb, csum_reqd);
1186 }
1187
1188 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
1189                                        u64 limit)
1190 {
1191         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1192         bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
1193         u32 incr;
1194
1195         incr = limit >= skb->len ? skb->len + fin : limit;
1196
1197         pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
1198                  subflow->map_subflow_seq);
1199         MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
1200         tcp_sk(ssk)->copied_seq += incr;
1201         if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
1202                 sk_eat_skb(ssk, skb);
1203         if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
1204                 subflow->map_valid = 0;
1205 }
1206
1207 /* sched mptcp worker to remove the subflow if no more data is pending */
1208 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
1209 {
1210         if (likely(ssk->sk_state != TCP_CLOSE))
1211                 return;
1212
1213         if (skb_queue_empty(&ssk->sk_receive_queue) &&
1214             !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
1215                 mptcp_schedule_work((struct sock *)msk);
1216 }
1217
1218 static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
1219 {
1220         struct mptcp_sock *msk = mptcp_sk(subflow->conn);
1221
1222         if (subflow->mp_join)
1223                 return false;
1224         else if (READ_ONCE(msk->csum_enabled))
1225                 return !subflow->valid_csum_seen;
1226         else
1227                 return !subflow->fully_established;
1228 }
1229
1230 static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
1231 {
1232         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1233         unsigned long fail_tout;
1234
1235         /* greceful failure can happen only on the MPC subflow */
1236         if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first)))
1237                 return;
1238
1239         /* since the close timeout take precedence on the fail one,
1240          * no need to start the latter when the first is already set
1241          */
1242         if (sock_flag((struct sock *)msk, SOCK_DEAD))
1243                 return;
1244
1245         /* we don't need extreme accuracy here, use a zero fail_tout as special
1246          * value meaning no fail timeout at all;
1247          */
1248         fail_tout = jiffies + TCP_RTO_MAX;
1249         if (!fail_tout)
1250                 fail_tout = 1;
1251         WRITE_ONCE(subflow->fail_tout, fail_tout);
1252         tcp_send_ack(ssk);
1253
1254         mptcp_reset_tout_timer(msk, subflow->fail_tout);
1255 }
1256
1257 static bool subflow_check_data_avail(struct sock *ssk)
1258 {
1259         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1260         enum mapping_status status;
1261         struct mptcp_sock *msk;
1262         struct sk_buff *skb;
1263
1264         if (!skb_peek(&ssk->sk_receive_queue))
1265                 WRITE_ONCE(subflow->data_avail, false);
1266         if (subflow->data_avail)
1267                 return true;
1268
1269         msk = mptcp_sk(subflow->conn);
1270         for (;;) {
1271                 u64 ack_seq;
1272                 u64 old_ack;
1273
1274                 status = get_mapping_status(ssk, msk);
1275                 trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
1276                 if (unlikely(status == MAPPING_INVALID || status == MAPPING_DUMMY ||
1277                              status == MAPPING_BAD_CSUM))
1278                         goto fallback;
1279
1280                 if (status != MAPPING_OK)
1281                         goto no_data;
1282
1283                 skb = skb_peek(&ssk->sk_receive_queue);
1284                 if (WARN_ON_ONCE(!skb))
1285                         goto no_data;
1286
1287                 if (unlikely(!READ_ONCE(msk->can_ack)))
1288                         goto fallback;
1289
1290                 old_ack = READ_ONCE(msk->ack_seq);
1291                 ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
1292                 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
1293                          ack_seq);
1294                 if (unlikely(before64(ack_seq, old_ack))) {
1295                         mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
1296                         continue;
1297                 }
1298
1299                 WRITE_ONCE(subflow->data_avail, true);
1300                 break;
1301         }
1302         return true;
1303
1304 no_data:
1305         subflow_sched_work_if_closed(msk, ssk);
1306         return false;
1307
1308 fallback:
1309         if (!__mptcp_check_fallback(msk)) {
1310                 /* RFC 8684 section 3.7. */
1311                 if (status == MAPPING_BAD_CSUM &&
1312                     (subflow->mp_join || subflow->valid_csum_seen)) {
1313                         subflow->send_mp_fail = 1;
1314
1315                         if (!READ_ONCE(msk->allow_infinite_fallback)) {
1316                                 subflow->reset_transient = 0;
1317                                 subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
1318                                 goto reset;
1319                         }
1320                         mptcp_subflow_fail(msk, ssk);
1321                         WRITE_ONCE(subflow->data_avail, true);
1322                         return true;
1323                 }
1324
1325                 if (!subflow_can_fallback(subflow) && subflow->map_data_len) {
1326                         /* fatal protocol error, close the socket.
1327                          * subflow_error_report() will introduce the appropriate barriers
1328                          */
1329                         subflow->reset_transient = 0;
1330                         subflow->reset_reason = MPTCP_RST_EMPTCP;
1331
1332 reset:
1333                         WRITE_ONCE(ssk->sk_err, EBADMSG);
1334                         tcp_set_state(ssk, TCP_CLOSE);
1335                         while ((skb = skb_peek(&ssk->sk_receive_queue)))
1336                                 sk_eat_skb(ssk, skb);
1337                         tcp_send_active_reset(ssk, GFP_ATOMIC);
1338                         WRITE_ONCE(subflow->data_avail, false);
1339                         return false;
1340                 }
1341
1342                 mptcp_do_fallback(ssk);
1343         }
1344
1345         skb = skb_peek(&ssk->sk_receive_queue);
1346         subflow->map_valid = 1;
1347         subflow->map_seq = READ_ONCE(msk->ack_seq);
1348         subflow->map_data_len = skb->len;
1349         subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
1350         WRITE_ONCE(subflow->data_avail, true);
1351         return true;
1352 }
1353
1354 bool mptcp_subflow_data_available(struct sock *sk)
1355 {
1356         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1357
1358         /* check if current mapping is still valid */
1359         if (subflow->map_valid &&
1360             mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
1361                 subflow->map_valid = 0;
1362                 WRITE_ONCE(subflow->data_avail, false);
1363
1364                 pr_debug("Done with mapping: seq=%u data_len=%u",
1365                          subflow->map_subflow_seq,
1366                          subflow->map_data_len);
1367         }
1368
1369         return subflow_check_data_avail(sk);
1370 }
1371
1372 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
1373  * not the ssk one.
1374  *
1375  * In mptcp, rwin is about the mptcp-level connection data.
1376  *
1377  * Data that is still on the ssk rx queue can thus be ignored,
1378  * as far as mptcp peer is concerned that data is still inflight.
1379  * DSS ACK is updated when skb is moved to the mptcp rx queue.
1380  */
1381 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
1382 {
1383         const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1384         const struct sock *sk = subflow->conn;
1385
1386         *space = __mptcp_space(sk);
1387         *full_space = mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1388 }
1389
1390 static void subflow_error_report(struct sock *ssk)
1391 {
1392         struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1393
1394         /* bail early if this is a no-op, so that we avoid introducing a
1395          * problematic lockdep dependency between TCP accept queue lock
1396          * and msk socket spinlock
1397          */
1398         if (!sk->sk_socket)
1399                 return;
1400
1401         mptcp_data_lock(sk);
1402         if (!sock_owned_by_user(sk))
1403                 __mptcp_error_report(sk);
1404         else
1405                 __set_bit(MPTCP_ERROR_REPORT,  &mptcp_sk(sk)->cb_flags);
1406         mptcp_data_unlock(sk);
1407 }
1408
1409 static void subflow_data_ready(struct sock *sk)
1410 {
1411         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1412         u16 state = 1 << inet_sk_state_load(sk);
1413         struct sock *parent = subflow->conn;
1414         struct mptcp_sock *msk;
1415
1416         trace_sk_data_ready(sk);
1417
1418         msk = mptcp_sk(parent);
1419         if (state & TCPF_LISTEN) {
1420                 /* MPJ subflow are removed from accept queue before reaching here,
1421                  * avoid stray wakeups
1422                  */
1423                 if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
1424                         return;
1425
1426                 parent->sk_data_ready(parent);
1427                 return;
1428         }
1429
1430         WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
1431                      !subflow->mp_join && !(state & TCPF_CLOSE));
1432
1433         if (mptcp_subflow_data_available(sk)) {
1434                 mptcp_data_ready(parent, sk);
1435
1436                 /* subflow-level lowat test are not relevant.
1437                  * respect the msk-level threshold eventually mandating an immediate ack
1438                  */
1439                 if (mptcp_data_avail(msk) < parent->sk_rcvlowat &&
1440                     (tcp_sk(sk)->rcv_nxt - tcp_sk(sk)->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss)
1441                         inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
1442         } else if (unlikely(sk->sk_err)) {
1443                 subflow_error_report(sk);
1444         }
1445 }
1446
1447 static void subflow_write_space(struct sock *ssk)
1448 {
1449         struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1450
1451         mptcp_propagate_sndbuf(sk, ssk);
1452         mptcp_write_space(sk);
1453 }
1454
1455 static const struct inet_connection_sock_af_ops *
1456 subflow_default_af_ops(struct sock *sk)
1457 {
1458 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1459         if (sk->sk_family == AF_INET6)
1460                 return &subflow_v6_specific;
1461 #endif
1462         return &subflow_specific;
1463 }
1464
1465 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1466 void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
1467 {
1468         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1469         struct inet_connection_sock *icsk = inet_csk(sk);
1470         const struct inet_connection_sock_af_ops *target;
1471
1472         target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
1473
1474         pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
1475                  subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
1476
1477         if (likely(icsk->icsk_af_ops == target))
1478                 return;
1479
1480         subflow->icsk_af_ops = icsk->icsk_af_ops;
1481         icsk->icsk_af_ops = target;
1482 }
1483 #endif
1484
1485 void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
1486                          struct sockaddr_storage *addr,
1487                          unsigned short family)
1488 {
1489         memset(addr, 0, sizeof(*addr));
1490         addr->ss_family = family;
1491         if (addr->ss_family == AF_INET) {
1492                 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
1493
1494                 if (info->family == AF_INET)
1495                         in_addr->sin_addr = info->addr;
1496 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1497                 else if (ipv6_addr_v4mapped(&info->addr6))
1498                         in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3];
1499 #endif
1500                 in_addr->sin_port = info->port;
1501         }
1502 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1503         else if (addr->ss_family == AF_INET6) {
1504                 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
1505
1506                 if (info->family == AF_INET)
1507                         ipv6_addr_set_v4mapped(info->addr.s_addr,
1508                                                &in6_addr->sin6_addr);
1509                 else
1510                         in6_addr->sin6_addr = info->addr6;
1511                 in6_addr->sin6_port = info->port;
1512         }
1513 #endif
1514 }
1515
1516 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
1517                             const struct mptcp_addr_info *remote)
1518 {
1519         struct mptcp_sock *msk = mptcp_sk(sk);
1520         struct mptcp_subflow_context *subflow;
1521         struct sockaddr_storage addr;
1522         int remote_id = remote->id;
1523         int local_id = loc->id;
1524         int err = -ENOTCONN;
1525         struct socket *sf;
1526         struct sock *ssk;
1527         u32 remote_token;
1528         int addrlen;
1529         int ifindex;
1530         u8 flags;
1531
1532         if (!mptcp_is_fully_established(sk))
1533                 goto err_out;
1534
1535         err = mptcp_subflow_create_socket(sk, loc->family, &sf);
1536         if (err)
1537                 goto err_out;
1538
1539         ssk = sf->sk;
1540         subflow = mptcp_subflow_ctx(ssk);
1541         do {
1542                 get_random_bytes(&subflow->local_nonce, sizeof(u32));
1543         } while (!subflow->local_nonce);
1544
1545         if (local_id)
1546                 subflow_set_local_id(subflow, local_id);
1547
1548         mptcp_pm_get_flags_and_ifindex_by_id(msk, local_id,
1549                                              &flags, &ifindex);
1550         subflow->remote_key_valid = 1;
1551         subflow->remote_key = msk->remote_key;
1552         subflow->local_key = msk->local_key;
1553         subflow->token = msk->token;
1554         mptcp_info2sockaddr(loc, &addr, ssk->sk_family);
1555
1556         addrlen = sizeof(struct sockaddr_in);
1557 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1558         if (addr.ss_family == AF_INET6)
1559                 addrlen = sizeof(struct sockaddr_in6);
1560 #endif
1561         ssk->sk_bound_dev_if = ifindex;
1562         err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1563         if (err)
1564                 goto failed;
1565
1566         mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1567         pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
1568                  remote_token, local_id, remote_id);
1569         subflow->remote_token = remote_token;
1570         subflow->remote_id = remote_id;
1571         subflow->request_join = 1;
1572         subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
1573         subflow->subflow_id = msk->subflow_id++;
1574         mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
1575
1576         sock_hold(ssk);
1577         list_add_tail(&subflow->node, &msk->conn_list);
1578         err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1579         if (err && err != -EINPROGRESS)
1580                 goto failed_unlink;
1581
1582         /* discard the subflow socket */
1583         mptcp_sock_graft(ssk, sk->sk_socket);
1584         iput(SOCK_INODE(sf));
1585         WRITE_ONCE(msk->allow_infinite_fallback, false);
1586         mptcp_stop_tout_timer(sk);
1587         return 0;
1588
1589 failed_unlink:
1590         list_del(&subflow->node);
1591         sock_put(mptcp_subflow_tcp_sock(subflow));
1592
1593 failed:
1594         subflow->disposable = 1;
1595         sock_release(sf);
1596
1597 err_out:
1598         /* we account subflows before the creation, and this failures will not
1599          * be caught by sk_state_change()
1600          */
1601         mptcp_pm_close_subflow(msk);
1602         return err;
1603 }
1604
1605 static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
1606 {
1607 #ifdef CONFIG_SOCK_CGROUP_DATA
1608         struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data,
1609                                 *child_skcd = &child->sk_cgrp_data;
1610
1611         /* only the additional subflows created by kworkers have to be modified */
1612         if (cgroup_id(sock_cgroup_ptr(parent_skcd)) !=
1613             cgroup_id(sock_cgroup_ptr(child_skcd))) {
1614 #ifdef CONFIG_MEMCG
1615                 struct mem_cgroup *memcg = parent->sk_memcg;
1616
1617                 mem_cgroup_sk_free(child);
1618                 if (memcg && css_tryget(&memcg->css))
1619                         child->sk_memcg = memcg;
1620 #endif /* CONFIG_MEMCG */
1621
1622                 cgroup_sk_free(child_skcd);
1623                 *child_skcd = *parent_skcd;
1624                 cgroup_sk_clone(child_skcd);
1625         }
1626 #endif /* CONFIG_SOCK_CGROUP_DATA */
1627 }
1628
1629 static void mptcp_subflow_ops_override(struct sock *ssk)
1630 {
1631 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1632         if (ssk->sk_prot == &tcpv6_prot)
1633                 ssk->sk_prot = &tcpv6_prot_override;
1634         else
1635 #endif
1636                 ssk->sk_prot = &tcp_prot_override;
1637 }
1638
1639 static void mptcp_subflow_ops_undo_override(struct sock *ssk)
1640 {
1641 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1642         if (ssk->sk_prot == &tcpv6_prot_override)
1643                 ssk->sk_prot = &tcpv6_prot;
1644         else
1645 #endif
1646                 ssk->sk_prot = &tcp_prot;
1647 }
1648
1649 int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
1650                                 struct socket **new_sock)
1651 {
1652         struct mptcp_subflow_context *subflow;
1653         struct net *net = sock_net(sk);
1654         struct socket *sf;
1655         int err;
1656
1657         /* un-accepted server sockets can reach here - on bad configuration
1658          * bail early to avoid greater trouble later
1659          */
1660         if (unlikely(!sk->sk_socket))
1661                 return -EINVAL;
1662
1663         err = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &sf);
1664         if (err)
1665                 return err;
1666
1667         lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING);
1668
1669         err = security_mptcp_add_subflow(sk, sf->sk);
1670         if (err)
1671                 goto err_free;
1672
1673         /* the newly created socket has to be in the same cgroup as its parent */
1674         mptcp_attach_cgroup(sk, sf->sk);
1675
1676         /* kernel sockets do not by default acquire net ref, but TCP timer
1677          * needs it.
1678          * Update ns_tracker to current stack trace and refcounted tracker.
1679          */
1680         __netns_tracker_free(net, &sf->sk->ns_tracker, false);
1681         sf->sk->sk_net_refcnt = 1;
1682         get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL);
1683         sock_inuse_add(net, 1);
1684         err = tcp_set_ulp(sf->sk, "mptcp");
1685         if (err)
1686                 goto err_free;
1687
1688         mptcp_sockopt_sync_locked(mptcp_sk(sk), sf->sk);
1689         release_sock(sf->sk);
1690
1691         /* the newly created socket really belongs to the owning MPTCP master
1692          * socket, even if for additional subflows the allocation is performed
1693          * by a kernel workqueue. Adjust inode references, so that the
1694          * procfs/diag interfaces really show this one belonging to the correct
1695          * user.
1696          */
1697         SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1698         SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1699         SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1700
1701         subflow = mptcp_subflow_ctx(sf->sk);
1702         pr_debug("subflow=%p", subflow);
1703
1704         *new_sock = sf;
1705         sock_hold(sk);
1706         subflow->conn = sk;
1707         mptcp_subflow_ops_override(sf->sk);
1708
1709         return 0;
1710
1711 err_free:
1712         release_sock(sf->sk);
1713         sock_release(sf);
1714         return err;
1715 }
1716
1717 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1718                                                         gfp_t priority)
1719 {
1720         struct inet_connection_sock *icsk = inet_csk(sk);
1721         struct mptcp_subflow_context *ctx;
1722
1723         ctx = kzalloc(sizeof(*ctx), priority);
1724         if (!ctx)
1725                 return NULL;
1726
1727         rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1728         INIT_LIST_HEAD(&ctx->node);
1729         INIT_LIST_HEAD(&ctx->delegated_node);
1730
1731         pr_debug("subflow=%p", ctx);
1732
1733         ctx->tcp_sock = sk;
1734
1735         return ctx;
1736 }
1737
1738 static void __subflow_state_change(struct sock *sk)
1739 {
1740         struct socket_wq *wq;
1741
1742         rcu_read_lock();
1743         wq = rcu_dereference(sk->sk_wq);
1744         if (skwq_has_sleeper(wq))
1745                 wake_up_interruptible_all(&wq->wait);
1746         rcu_read_unlock();
1747 }
1748
1749 static bool subflow_is_done(const struct sock *sk)
1750 {
1751         return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1752 }
1753
1754 static void subflow_state_change(struct sock *sk)
1755 {
1756         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1757         struct sock *parent = subflow->conn;
1758         struct mptcp_sock *msk;
1759
1760         __subflow_state_change(sk);
1761
1762         msk = mptcp_sk(parent);
1763         if (subflow_simultaneous_connect(sk)) {
1764                 mptcp_do_fallback(sk);
1765                 pr_fallback(msk);
1766                 subflow->conn_finished = 1;
1767                 mptcp_propagate_state(parent, sk, subflow, NULL);
1768         }
1769
1770         /* as recvmsg() does not acquire the subflow socket for ssk selection
1771          * a fin packet carrying a DSS can be unnoticed if we don't trigger
1772          * the data available machinery here.
1773          */
1774         if (mptcp_subflow_data_available(sk))
1775                 mptcp_data_ready(parent, sk);
1776         else if (unlikely(sk->sk_err))
1777                 subflow_error_report(sk);
1778
1779         subflow_sched_work_if_closed(mptcp_sk(parent), sk);
1780
1781         /* when the fallback subflow closes the rx side, trigger a 'dummy'
1782          * ingress data fin, so that the msk state will follow along
1783          */
1784         if (__mptcp_check_fallback(msk) && subflow_is_done(sk) && msk->first == sk &&
1785             mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true))
1786                 mptcp_schedule_work(parent);
1787 }
1788
1789 void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
1790 {
1791         struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
1792         struct request_sock *req, *head, *tail;
1793         struct mptcp_subflow_context *subflow;
1794         struct sock *sk, *ssk;
1795
1796         /* Due to lock dependencies no relevant lock can be acquired under rskq_lock.
1797          * Splice the req list, so that accept() can not reach the pending ssk after
1798          * the listener socket is released below.
1799          */
1800         spin_lock_bh(&queue->rskq_lock);
1801         head = queue->rskq_accept_head;
1802         tail = queue->rskq_accept_tail;
1803         queue->rskq_accept_head = NULL;
1804         queue->rskq_accept_tail = NULL;
1805         spin_unlock_bh(&queue->rskq_lock);
1806
1807         if (!head)
1808                 return;
1809
1810         /* can't acquire the msk socket lock under the subflow one,
1811          * or will cause ABBA deadlock
1812          */
1813         release_sock(listener_ssk);
1814
1815         for (req = head; req; req = req->dl_next) {
1816                 ssk = req->sk;
1817                 if (!sk_is_mptcp(ssk))
1818                         continue;
1819
1820                 subflow = mptcp_subflow_ctx(ssk);
1821                 if (!subflow || !subflow->conn)
1822                         continue;
1823
1824                 sk = subflow->conn;
1825                 sock_hold(sk);
1826
1827                 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1828                 __mptcp_unaccepted_force_close(sk);
1829                 release_sock(sk);
1830
1831                 /* lockdep will report a false positive ABBA deadlock
1832                  * between cancel_work_sync and the listener socket.
1833                  * The involved locks belong to different sockets WRT
1834                  * the existing AB chain.
1835                  * Using a per socket key is problematic as key
1836                  * deregistration requires process context and must be
1837                  * performed at socket disposal time, in atomic
1838                  * context.
1839                  * Just tell lockdep to consider the listener socket
1840                  * released here.
1841                  */
1842                 mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
1843                 mptcp_cancel_work(sk);
1844                 mutex_acquire(&listener_sk->sk_lock.dep_map, 0, 0, _RET_IP_);
1845
1846                 sock_put(sk);
1847         }
1848
1849         /* we are still under the listener msk socket lock */
1850         lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
1851
1852         /* restore the listener queue, to let the TCP code clean it up */
1853         spin_lock_bh(&queue->rskq_lock);
1854         WARN_ON_ONCE(queue->rskq_accept_head);
1855         queue->rskq_accept_head = head;
1856         queue->rskq_accept_tail = tail;
1857         spin_unlock_bh(&queue->rskq_lock);
1858 }
1859
1860 static int subflow_ulp_init(struct sock *sk)
1861 {
1862         struct inet_connection_sock *icsk = inet_csk(sk);
1863         struct mptcp_subflow_context *ctx;
1864         struct tcp_sock *tp = tcp_sk(sk);
1865         int err = 0;
1866
1867         /* disallow attaching ULP to a socket unless it has been
1868          * created with sock_create_kern()
1869          */
1870         if (!sk->sk_kern_sock) {
1871                 err = -EOPNOTSUPP;
1872                 goto out;
1873         }
1874
1875         ctx = subflow_create_ctx(sk, GFP_KERNEL);
1876         if (!ctx) {
1877                 err = -ENOMEM;
1878                 goto out;
1879         }
1880
1881         pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1882
1883         tp->is_mptcp = 1;
1884         ctx->icsk_af_ops = icsk->icsk_af_ops;
1885         icsk->icsk_af_ops = subflow_default_af_ops(sk);
1886         ctx->tcp_state_change = sk->sk_state_change;
1887         ctx->tcp_error_report = sk->sk_error_report;
1888
1889         WARN_ON_ONCE(sk->sk_data_ready != sock_def_readable);
1890         WARN_ON_ONCE(sk->sk_write_space != sk_stream_write_space);
1891
1892         sk->sk_data_ready = subflow_data_ready;
1893         sk->sk_write_space = subflow_write_space;
1894         sk->sk_state_change = subflow_state_change;
1895         sk->sk_error_report = subflow_error_report;
1896 out:
1897         return err;
1898 }
1899
1900 static void subflow_ulp_release(struct sock *ssk)
1901 {
1902         struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
1903         bool release = true;
1904         struct sock *sk;
1905
1906         if (!ctx)
1907                 return;
1908
1909         sk = ctx->conn;
1910         if (sk) {
1911                 /* if the msk has been orphaned, keep the ctx
1912                  * alive, will be freed by __mptcp_close_ssk(),
1913                  * when the subflow is still unaccepted
1914                  */
1915                 release = ctx->disposable || list_empty(&ctx->node);
1916
1917                 /* inet_child_forget() does not call sk_state_change(),
1918                  * explicitly trigger the socket close machinery
1919                  */
1920                 if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW,
1921                                                   &mptcp_sk(sk)->flags))
1922                         mptcp_schedule_work(sk);
1923                 sock_put(sk);
1924         }
1925
1926         mptcp_subflow_ops_undo_override(ssk);
1927         if (release)
1928                 kfree_rcu(ctx, rcu);
1929 }
1930
1931 static void subflow_ulp_clone(const struct request_sock *req,
1932                               struct sock *newsk,
1933                               const gfp_t priority)
1934 {
1935         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1936         struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1937         struct mptcp_subflow_context *new_ctx;
1938
1939         if (!tcp_rsk(req)->is_mptcp ||
1940             (!subflow_req->mp_capable && !subflow_req->mp_join)) {
1941                 subflow_ulp_fallback(newsk, old_ctx);
1942                 return;
1943         }
1944
1945         new_ctx = subflow_create_ctx(newsk, priority);
1946         if (!new_ctx) {
1947                 subflow_ulp_fallback(newsk, old_ctx);
1948                 return;
1949         }
1950
1951         new_ctx->conn_finished = 1;
1952         new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
1953         new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1954         new_ctx->tcp_error_report = old_ctx->tcp_error_report;
1955         new_ctx->rel_write_seq = 1;
1956         new_ctx->tcp_sock = newsk;
1957
1958         if (subflow_req->mp_capable) {
1959                 /* see comments in subflow_syn_recv_sock(), MPTCP connection
1960                  * is fully established only after we receive the remote key
1961                  */
1962                 new_ctx->mp_capable = 1;
1963                 new_ctx->local_key = subflow_req->local_key;
1964                 new_ctx->token = subflow_req->token;
1965                 new_ctx->ssn_offset = subflow_req->ssn_offset;
1966                 new_ctx->idsn = subflow_req->idsn;
1967
1968                 /* this is the first subflow, id is always 0 */
1969                 new_ctx->local_id_valid = 1;
1970         } else if (subflow_req->mp_join) {
1971                 new_ctx->ssn_offset = subflow_req->ssn_offset;
1972                 new_ctx->mp_join = 1;
1973                 new_ctx->fully_established = 1;
1974                 new_ctx->remote_key_valid = 1;
1975                 new_ctx->backup = subflow_req->backup;
1976                 new_ctx->remote_id = subflow_req->remote_id;
1977                 new_ctx->token = subflow_req->token;
1978                 new_ctx->thmac = subflow_req->thmac;
1979
1980                 /* the subflow req id is valid, fetched via subflow_check_req()
1981                  * and subflow_token_join_request()
1982                  */
1983                 subflow_set_local_id(new_ctx, subflow_req->local_id);
1984         }
1985 }
1986
1987 static void tcp_release_cb_override(struct sock *ssk)
1988 {
1989         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1990         long status;
1991
1992         /* process and clear all the pending actions, but leave the subflow into
1993          * the napi queue. To respect locking, only the same CPU that originated
1994          * the action can touch the list. mptcp_napi_poll will take care of it.
1995          */
1996         status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0);
1997         if (status)
1998                 mptcp_subflow_process_delegated(ssk, status);
1999
2000         tcp_release_cb(ssk);
2001 }
2002
2003 static int tcp_abort_override(struct sock *ssk, int err)
2004 {
2005         /* closing a listener subflow requires a great deal of care.
2006          * keep it simple and just prevent such operation
2007          */
2008         if (inet_sk_state_load(ssk) == TCP_LISTEN)
2009                 return -EINVAL;
2010
2011         return tcp_abort(ssk, err);
2012 }
2013
2014 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
2015         .name           = "mptcp",
2016         .owner          = THIS_MODULE,
2017         .init           = subflow_ulp_init,
2018         .release        = subflow_ulp_release,
2019         .clone          = subflow_ulp_clone,
2020 };
2021
2022 static int subflow_ops_init(struct request_sock_ops *subflow_ops)
2023 {
2024         subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
2025
2026         subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
2027                                               subflow_ops->obj_size, 0,
2028                                               SLAB_ACCOUNT |
2029                                               SLAB_TYPESAFE_BY_RCU,
2030                                               NULL);
2031         if (!subflow_ops->slab)
2032                 return -ENOMEM;
2033
2034         return 0;
2035 }
2036
2037 void __init mptcp_subflow_init(void)
2038 {
2039         mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops;
2040         mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4";
2041         mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor;
2042
2043         if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0)
2044                 panic("MPTCP: failed to init subflow v4 request sock ops\n");
2045
2046         subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
2047         subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
2048         subflow_request_sock_ipv4_ops.send_synack = subflow_v4_send_synack;
2049
2050         subflow_specific = ipv4_specific;
2051         subflow_specific.conn_request = subflow_v4_conn_request;
2052         subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
2053         subflow_specific.sk_rx_dst_set = subflow_finish_connect;
2054         subflow_specific.rebuild_header = subflow_rebuild_header;
2055
2056         tcp_prot_override = tcp_prot;
2057         tcp_prot_override.release_cb = tcp_release_cb_override;
2058         tcp_prot_override.diag_destroy = tcp_abort_override;
2059
2060 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2061         /* In struct mptcp_subflow_request_sock, we assume the TCP request sock
2062          * structures for v4 and v6 have the same size. It should not changed in
2063          * the future but better to make sure to be warned if it is no longer
2064          * the case.
2065          */
2066         BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock));
2067
2068         mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops;
2069         mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6";
2070         mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor;
2071
2072         if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0)
2073                 panic("MPTCP: failed to init subflow v6 request sock ops\n");
2074
2075         subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
2076         subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
2077         subflow_request_sock_ipv6_ops.send_synack = subflow_v6_send_synack;
2078
2079         subflow_v6_specific = ipv6_specific;
2080         subflow_v6_specific.conn_request = subflow_v6_conn_request;
2081         subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
2082         subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
2083         subflow_v6_specific.rebuild_header = subflow_v6_rebuild_header;
2084
2085         subflow_v6m_specific = subflow_v6_specific;
2086         subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
2087         subflow_v6m_specific.send_check = ipv4_specific.send_check;
2088         subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
2089         subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
2090         subflow_v6m_specific.rebuild_header = subflow_rebuild_header;
2091
2092         tcpv6_prot_override = tcpv6_prot;
2093         tcpv6_prot_override.release_cb = tcp_release_cb_override;
2094         tcpv6_prot_override.diag_destroy = tcp_abort_override;
2095 #endif
2096
2097         mptcp_diag_subflow_init(&subflow_ulp_ops);
2098
2099         if (tcp_register_ulp(&subflow_ulp_ops) != 0)
2100                 panic("MPTCP: failed to register subflows to ULP\n");
2101 }