1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
8 #include <linux/ipv6.h>
9 #include <linux/netdevice.h>
10 #include <linux/if_ether.h>
14 #include <net/ip6_route.h>
15 #include <net/neighbour.h>
16 #include <net/netfilter/nf_flow_table.h>
17 #include <net/netfilter/nf_conntrack_acct.h>
18 /* For layer 4 checksum field offset. */
19 #include <linux/tcp.h>
20 #include <linux/udp.h>
22 static int nf_flow_state_check(struct flow_offload *flow, int proto,
23 struct sk_buff *skb, unsigned int thoff)
27 if (proto != IPPROTO_TCP)
30 tcph = (void *)(skb_network_header(skb) + thoff);
31 if (unlikely(tcph->fin || tcph->rst)) {
32 flow_offload_teardown(flow);
39 static void nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
40 __be32 addr, __be32 new_addr)
44 tcph = (void *)(skb_network_header(skb) + thoff);
45 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
48 static void nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
49 __be32 addr, __be32 new_addr)
53 udph = (void *)(skb_network_header(skb) + thoff);
54 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
55 inet_proto_csum_replace4(&udph->check, skb, addr,
58 udph->check = CSUM_MANGLED_0;
62 static void nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
63 unsigned int thoff, __be32 addr,
66 switch (iph->protocol) {
68 nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr);
71 nf_flow_nat_ip_udp(skb, thoff, addr, new_addr);
76 static void nf_flow_snat_ip(const struct flow_offload *flow,
77 struct sk_buff *skb, struct iphdr *iph,
78 unsigned int thoff, enum flow_offload_tuple_dir dir)
80 __be32 addr, new_addr;
83 case FLOW_OFFLOAD_DIR_ORIGINAL:
85 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
86 iph->saddr = new_addr;
88 case FLOW_OFFLOAD_DIR_REPLY:
90 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
91 iph->daddr = new_addr;
94 csum_replace4(&iph->check, addr, new_addr);
96 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
99 static void nf_flow_dnat_ip(const struct flow_offload *flow,
100 struct sk_buff *skb, struct iphdr *iph,
101 unsigned int thoff, enum flow_offload_tuple_dir dir)
103 __be32 addr, new_addr;
106 case FLOW_OFFLOAD_DIR_ORIGINAL:
108 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
109 iph->daddr = new_addr;
111 case FLOW_OFFLOAD_DIR_REPLY:
113 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
114 iph->saddr = new_addr;
117 csum_replace4(&iph->check, addr, new_addr);
119 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
122 static void nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
123 unsigned int thoff, enum flow_offload_tuple_dir dir,
126 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
127 nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir);
128 nf_flow_snat_ip(flow, skb, iph, thoff, dir);
130 if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
131 nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir);
132 nf_flow_dnat_ip(flow, skb, iph, thoff, dir);
136 static bool ip_has_options(unsigned int thoff)
138 return thoff != sizeof(struct iphdr);
141 static void nf_flow_tuple_encap(struct sk_buff *skb,
142 struct flow_offload_tuple *tuple)
144 struct vlan_ethhdr *veth;
145 struct pppoe_hdr *phdr;
148 if (skb_vlan_tag_present(skb)) {
149 tuple->encap[i].id = skb_vlan_tag_get(skb);
150 tuple->encap[i].proto = skb->vlan_proto;
153 switch (skb->protocol) {
154 case htons(ETH_P_8021Q):
155 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
156 tuple->encap[i].id = ntohs(veth->h_vlan_TCI);
157 tuple->encap[i].proto = skb->protocol;
159 case htons(ETH_P_PPP_SES):
160 phdr = (struct pppoe_hdr *)skb_network_header(skb);
161 tuple->encap[i].id = ntohs(phdr->sid);
162 tuple->encap[i].proto = skb->protocol;
167 struct nf_flowtable_ctx {
168 const struct net_device *in;
173 static int nf_flow_tuple_ip(struct nf_flowtable_ctx *ctx, struct sk_buff *skb,
174 struct flow_offload_tuple *tuple)
176 struct flow_ports *ports;
181 if (!pskb_may_pull(skb, sizeof(*iph) + ctx->offset))
184 iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset);
185 thoff = (iph->ihl * 4);
187 if (ip_is_fragment(iph) ||
188 unlikely(ip_has_options(thoff)))
191 thoff += ctx->offset;
193 ipproto = iph->protocol;
196 ctx->hdrsize = sizeof(struct tcphdr);
199 ctx->hdrsize = sizeof(struct udphdr);
201 #ifdef CONFIG_NF_CT_PROTO_GRE
203 ctx->hdrsize = sizeof(struct gre_base_hdr);
213 if (!pskb_may_pull(skb, thoff + ctx->hdrsize))
219 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
220 tuple->src_port = ports->source;
221 tuple->dst_port = ports->dest;
224 struct gre_base_hdr *greh;
226 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
227 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
233 iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset);
235 tuple->src_v4.s_addr = iph->saddr;
236 tuple->dst_v4.s_addr = iph->daddr;
237 tuple->l3proto = AF_INET;
238 tuple->l4proto = ipproto;
239 tuple->iifidx = ctx->in->ifindex;
240 nf_flow_tuple_encap(skb, tuple);
245 /* Based on ip_exceeds_mtu(). */
246 static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
251 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
257 static inline bool nf_flow_dst_check(struct flow_offload_tuple *tuple)
259 if (tuple->xmit_type != FLOW_OFFLOAD_XMIT_NEIGH &&
260 tuple->xmit_type != FLOW_OFFLOAD_XMIT_XFRM)
263 return dst_check(tuple->dst_cache, tuple->dst_cookie);
266 static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
267 const struct nf_hook_state *state,
268 struct dst_entry *dst)
271 skb_dst_set_noref(skb, dst);
272 dst_output(state->net, state->sk, skb);
276 static bool nf_flow_skb_encap_protocol(struct sk_buff *skb, __be16 proto,
279 struct vlan_ethhdr *veth;
282 switch (skb->protocol) {
283 case htons(ETH_P_8021Q):
284 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
285 if (veth->h_vlan_encapsulated_proto == proto) {
286 *offset += VLAN_HLEN;
290 case htons(ETH_P_PPP_SES):
291 if (nf_flow_pppoe_proto(skb, &inner_proto) &&
292 inner_proto == proto) {
293 *offset += PPPOE_SES_HLEN;
302 static void nf_flow_encap_pop(struct sk_buff *skb,
303 struct flow_offload_tuple_rhash *tuplehash)
305 struct vlan_hdr *vlan_hdr;
308 for (i = 0; i < tuplehash->tuple.encap_num; i++) {
309 if (skb_vlan_tag_present(skb)) {
310 __vlan_hwaccel_clear_tag(skb);
313 switch (skb->protocol) {
314 case htons(ETH_P_8021Q):
315 vlan_hdr = (struct vlan_hdr *)skb->data;
316 __skb_pull(skb, VLAN_HLEN);
317 vlan_set_encap_proto(skb, vlan_hdr);
318 skb_reset_network_header(skb);
320 case htons(ETH_P_PPP_SES):
321 skb->protocol = __nf_flow_pppoe_proto(skb);
322 skb_pull(skb, PPPOE_SES_HLEN);
323 skb_reset_network_header(skb);
329 static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
330 const struct flow_offload_tuple_rhash *tuplehash,
333 struct net_device *outdev;
335 outdev = dev_get_by_index_rcu(net, tuplehash->tuple.out.ifidx);
340 dev_hard_header(skb, skb->dev, type, tuplehash->tuple.out.h_dest,
341 tuplehash->tuple.out.h_source, skb->len);
347 static struct flow_offload_tuple_rhash *
348 nf_flow_offload_lookup(struct nf_flowtable_ctx *ctx,
349 struct nf_flowtable *flow_table, struct sk_buff *skb)
351 struct flow_offload_tuple tuple = {};
353 if (skb->protocol != htons(ETH_P_IP) &&
354 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &ctx->offset))
357 if (nf_flow_tuple_ip(ctx, skb, &tuple) < 0)
360 return flow_offload_lookup(flow_table, &tuple);
363 static int nf_flow_offload_forward(struct nf_flowtable_ctx *ctx,
364 struct nf_flowtable *flow_table,
365 struct flow_offload_tuple_rhash *tuplehash,
368 enum flow_offload_tuple_dir dir;
369 struct flow_offload *flow;
370 unsigned int thoff, mtu;
373 dir = tuplehash->tuple.dir;
374 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
376 mtu = flow->tuplehash[dir].tuple.mtu + ctx->offset;
377 if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
380 iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset);
381 thoff = (iph->ihl * 4) + ctx->offset;
382 if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
385 if (!nf_flow_dst_check(&tuplehash->tuple)) {
386 flow_offload_teardown(flow);
390 if (skb_try_make_writable(skb, thoff + ctx->hdrsize))
393 flow_offload_refresh(flow_table, flow, false);
395 nf_flow_encap_pop(skb, tuplehash);
396 thoff -= ctx->offset;
399 nf_flow_nat_ip(flow, skb, thoff, dir, iph);
401 ip_decrease_ttl(iph);
402 skb_clear_tstamp(skb);
404 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
405 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
411 nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
412 const struct nf_hook_state *state)
414 struct flow_offload_tuple_rhash *tuplehash;
415 struct nf_flowtable *flow_table = priv;
416 enum flow_offload_tuple_dir dir;
417 struct nf_flowtable_ctx ctx = {
420 struct flow_offload *flow;
421 struct net_device *outdev;
426 tuplehash = nf_flow_offload_lookup(&ctx, flow_table, skb);
430 ret = nf_flow_offload_forward(&ctx, flow_table, tuplehash, skb);
436 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
437 rt = (struct rtable *)tuplehash->tuple.dst_cache;
438 memset(skb->cb, 0, sizeof(struct inet_skb_parm));
439 IPCB(skb)->iif = skb->dev->ifindex;
440 IPCB(skb)->flags = IPSKB_FORWARDED;
441 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
444 dir = tuplehash->tuple.dir;
445 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
447 switch (tuplehash->tuple.xmit_type) {
448 case FLOW_OFFLOAD_XMIT_NEIGH:
449 rt = (struct rtable *)tuplehash->tuple.dst_cache;
450 outdev = rt->dst.dev;
452 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
453 skb_dst_set_noref(skb, &rt->dst);
454 neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
457 case FLOW_OFFLOAD_XMIT_DIRECT:
458 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IP);
460 flow_offload_teardown(flow);
470 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
472 static void nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
473 struct in6_addr *addr,
474 struct in6_addr *new_addr,
475 struct ipv6hdr *ip6h)
479 tcph = (void *)(skb_network_header(skb) + thoff);
480 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
481 new_addr->s6_addr32, true);
484 static void nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
485 struct in6_addr *addr,
486 struct in6_addr *new_addr)
490 udph = (void *)(skb_network_header(skb) + thoff);
491 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
492 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
493 new_addr->s6_addr32, true);
495 udph->check = CSUM_MANGLED_0;
499 static void nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
500 unsigned int thoff, struct in6_addr *addr,
501 struct in6_addr *new_addr)
503 switch (ip6h->nexthdr) {
505 nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr, ip6h);
508 nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr);
513 static void nf_flow_snat_ipv6(const struct flow_offload *flow,
514 struct sk_buff *skb, struct ipv6hdr *ip6h,
516 enum flow_offload_tuple_dir dir)
518 struct in6_addr addr, new_addr;
521 case FLOW_OFFLOAD_DIR_ORIGINAL:
523 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
524 ip6h->saddr = new_addr;
526 case FLOW_OFFLOAD_DIR_REPLY:
528 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
529 ip6h->daddr = new_addr;
533 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
536 static void nf_flow_dnat_ipv6(const struct flow_offload *flow,
537 struct sk_buff *skb, struct ipv6hdr *ip6h,
539 enum flow_offload_tuple_dir dir)
541 struct in6_addr addr, new_addr;
544 case FLOW_OFFLOAD_DIR_ORIGINAL:
546 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
547 ip6h->daddr = new_addr;
549 case FLOW_OFFLOAD_DIR_REPLY:
551 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
552 ip6h->saddr = new_addr;
556 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
559 static void nf_flow_nat_ipv6(const struct flow_offload *flow,
561 enum flow_offload_tuple_dir dir,
562 struct ipv6hdr *ip6h)
564 unsigned int thoff = sizeof(*ip6h);
566 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
567 nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir);
568 nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir);
570 if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
571 nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir);
572 nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir);
576 static int nf_flow_tuple_ipv6(struct nf_flowtable_ctx *ctx, struct sk_buff *skb,
577 struct flow_offload_tuple *tuple)
579 struct flow_ports *ports;
580 struct ipv6hdr *ip6h;
584 thoff = sizeof(*ip6h) + ctx->offset;
585 if (!pskb_may_pull(skb, thoff))
588 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset);
590 nexthdr = ip6h->nexthdr;
593 ctx->hdrsize = sizeof(struct tcphdr);
596 ctx->hdrsize = sizeof(struct udphdr);
598 #ifdef CONFIG_NF_CT_PROTO_GRE
600 ctx->hdrsize = sizeof(struct gre_base_hdr);
607 if (ip6h->hop_limit <= 1)
610 if (!pskb_may_pull(skb, thoff + ctx->hdrsize))
616 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
617 tuple->src_port = ports->source;
618 tuple->dst_port = ports->dest;
621 struct gre_base_hdr *greh;
623 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
624 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
630 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset);
632 tuple->src_v6 = ip6h->saddr;
633 tuple->dst_v6 = ip6h->daddr;
634 tuple->l3proto = AF_INET6;
635 tuple->l4proto = nexthdr;
636 tuple->iifidx = ctx->in->ifindex;
637 nf_flow_tuple_encap(skb, tuple);
642 static int nf_flow_offload_ipv6_forward(struct nf_flowtable_ctx *ctx,
643 struct nf_flowtable *flow_table,
644 struct flow_offload_tuple_rhash *tuplehash,
647 enum flow_offload_tuple_dir dir;
648 struct flow_offload *flow;
649 unsigned int thoff, mtu;
650 struct ipv6hdr *ip6h;
652 dir = tuplehash->tuple.dir;
653 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
655 mtu = flow->tuplehash[dir].tuple.mtu + ctx->offset;
656 if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
659 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset);
660 thoff = sizeof(*ip6h) + ctx->offset;
661 if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff))
664 if (!nf_flow_dst_check(&tuplehash->tuple)) {
665 flow_offload_teardown(flow);
669 if (skb_try_make_writable(skb, thoff + ctx->hdrsize))
672 flow_offload_refresh(flow_table, flow, false);
674 nf_flow_encap_pop(skb, tuplehash);
676 ip6h = ipv6_hdr(skb);
677 nf_flow_nat_ipv6(flow, skb, dir, ip6h);
680 skb_clear_tstamp(skb);
682 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
683 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
688 static struct flow_offload_tuple_rhash *
689 nf_flow_offload_ipv6_lookup(struct nf_flowtable_ctx *ctx,
690 struct nf_flowtable *flow_table,
693 struct flow_offload_tuple tuple = {};
695 if (skb->protocol != htons(ETH_P_IPV6) &&
696 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6), &ctx->offset))
699 if (nf_flow_tuple_ipv6(ctx, skb, &tuple) < 0)
702 return flow_offload_lookup(flow_table, &tuple);
706 nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
707 const struct nf_hook_state *state)
709 struct flow_offload_tuple_rhash *tuplehash;
710 struct nf_flowtable *flow_table = priv;
711 enum flow_offload_tuple_dir dir;
712 struct nf_flowtable_ctx ctx = {
715 const struct in6_addr *nexthop;
716 struct flow_offload *flow;
717 struct net_device *outdev;
721 tuplehash = nf_flow_offload_ipv6_lookup(&ctx, flow_table, skb);
722 if (tuplehash == NULL)
725 ret = nf_flow_offload_ipv6_forward(&ctx, flow_table, tuplehash, skb);
731 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
732 rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
733 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
734 IP6CB(skb)->iif = skb->dev->ifindex;
735 IP6CB(skb)->flags = IP6SKB_FORWARDED;
736 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
739 dir = tuplehash->tuple.dir;
740 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
742 switch (tuplehash->tuple.xmit_type) {
743 case FLOW_OFFLOAD_XMIT_NEIGH:
744 rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
745 outdev = rt->dst.dev;
747 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
748 skb_dst_set_noref(skb, &rt->dst);
749 neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
752 case FLOW_OFFLOAD_XMIT_DIRECT:
753 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IPV6);
755 flow_offload_teardown(flow);
765 EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);