1 // SPDX-License-Identifier: GPL-2.0
2 /* Generic nexthop implementation
4 * Copyright (c) 2017-19 Cumulus Networks
5 * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
8 #include <linux/nexthop.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
13 #include <net/ipv6_stubs.h>
14 #include <net/lwtunnel.h>
15 #include <net/ndisc.h>
16 #include <net/nexthop.h>
17 #include <net/route.h>
20 #define NH_RES_DEFAULT_IDLE_TIMER (120 * HZ)
21 #define NH_RES_DEFAULT_UNBALANCED_TIMER 0 /* No forced rebalancing. */
23 static void remove_nexthop(struct net *net, struct nexthop *nh,
24 struct nl_info *nlinfo);
26 #define NH_DEV_HASHBITS 8
27 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
29 #define NHA_OP_FLAGS_DUMP_ALL (NHA_OP_FLAG_DUMP_STATS)
31 static const struct nla_policy rtm_nh_policy_new[] = {
32 [NHA_ID] = { .type = NLA_U32 },
33 [NHA_GROUP] = { .type = NLA_BINARY },
34 [NHA_GROUP_TYPE] = { .type = NLA_U16 },
35 [NHA_BLACKHOLE] = { .type = NLA_FLAG },
36 [NHA_OIF] = { .type = NLA_U32 },
37 [NHA_GATEWAY] = { .type = NLA_BINARY },
38 [NHA_ENCAP_TYPE] = { .type = NLA_U16 },
39 [NHA_ENCAP] = { .type = NLA_NESTED },
40 [NHA_FDB] = { .type = NLA_FLAG },
41 [NHA_RES_GROUP] = { .type = NLA_NESTED },
42 [NHA_HW_STATS_ENABLE] = NLA_POLICY_MAX(NLA_U32, true),
45 static const struct nla_policy rtm_nh_policy_get[] = {
46 [NHA_ID] = { .type = NLA_U32 },
47 [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
48 NHA_OP_FLAGS_DUMP_ALL),
51 static const struct nla_policy rtm_nh_policy_del[] = {
52 [NHA_ID] = { .type = NLA_U32 },
55 static const struct nla_policy rtm_nh_policy_dump[] = {
56 [NHA_OIF] = { .type = NLA_U32 },
57 [NHA_GROUPS] = { .type = NLA_FLAG },
58 [NHA_MASTER] = { .type = NLA_U32 },
59 [NHA_FDB] = { .type = NLA_FLAG },
60 [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
61 NHA_OP_FLAGS_DUMP_ALL),
64 static const struct nla_policy rtm_nh_res_policy_new[] = {
65 [NHA_RES_GROUP_BUCKETS] = { .type = NLA_U16 },
66 [NHA_RES_GROUP_IDLE_TIMER] = { .type = NLA_U32 },
67 [NHA_RES_GROUP_UNBALANCED_TIMER] = { .type = NLA_U32 },
70 static const struct nla_policy rtm_nh_policy_dump_bucket[] = {
71 [NHA_ID] = { .type = NLA_U32 },
72 [NHA_OIF] = { .type = NLA_U32 },
73 [NHA_MASTER] = { .type = NLA_U32 },
74 [NHA_RES_BUCKET] = { .type = NLA_NESTED },
77 static const struct nla_policy rtm_nh_res_bucket_policy_dump[] = {
78 [NHA_RES_BUCKET_NH_ID] = { .type = NLA_U32 },
81 static const struct nla_policy rtm_nh_policy_get_bucket[] = {
82 [NHA_ID] = { .type = NLA_U32 },
83 [NHA_RES_BUCKET] = { .type = NLA_NESTED },
86 static const struct nla_policy rtm_nh_res_bucket_policy_get[] = {
87 [NHA_RES_BUCKET_INDEX] = { .type = NLA_U16 },
90 static bool nexthop_notifiers_is_empty(struct net *net)
92 return !net->nexthop.notifier_chain.head;
96 __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info,
97 const struct nh_info *nhi)
99 nh_info->dev = nhi->fib_nhc.nhc_dev;
100 nh_info->gw_family = nhi->fib_nhc.nhc_gw_family;
101 if (nh_info->gw_family == AF_INET)
102 nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4;
103 else if (nh_info->gw_family == AF_INET6)
104 nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6;
106 nh_info->is_reject = nhi->reject_nh;
107 nh_info->is_fdb = nhi->fdb_nh;
108 nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate;
111 static int nh_notifier_single_info_init(struct nh_notifier_info *info,
112 const struct nexthop *nh)
114 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
116 info->type = NH_NOTIFIER_INFO_TYPE_SINGLE;
117 info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL);
121 __nh_notifier_single_info_init(info->nh, nhi);
126 static void nh_notifier_single_info_fini(struct nh_notifier_info *info)
131 static int nh_notifier_mpath_info_init(struct nh_notifier_info *info,
132 struct nh_group *nhg)
134 u16 num_nh = nhg->num_nh;
137 info->type = NH_NOTIFIER_INFO_TYPE_GRP;
138 info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh),
143 info->nh_grp->num_nh = num_nh;
144 info->nh_grp->is_fdb = nhg->fdb_nh;
145 info->nh_grp->hw_stats = nhg->hw_stats;
147 for (i = 0; i < num_nh; i++) {
148 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
151 nhi = rtnl_dereference(nhge->nh->nh_info);
152 info->nh_grp->nh_entries[i].id = nhge->nh->id;
153 info->nh_grp->nh_entries[i].weight = nhge->weight;
154 __nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh,
161 static int nh_notifier_res_table_info_init(struct nh_notifier_info *info,
162 struct nh_group *nhg)
164 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
165 u16 num_nh_buckets = res_table->num_nh_buckets;
169 info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE;
170 size = struct_size(info->nh_res_table, nhs, num_nh_buckets);
171 info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO |
173 if (!info->nh_res_table)
176 info->nh_res_table->num_nh_buckets = num_nh_buckets;
177 info->nh_res_table->hw_stats = nhg->hw_stats;
179 for (i = 0; i < num_nh_buckets; i++) {
180 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
181 struct nh_grp_entry *nhge;
184 nhge = rtnl_dereference(bucket->nh_entry);
185 nhi = rtnl_dereference(nhge->nh->nh_info);
186 __nh_notifier_single_info_init(&info->nh_res_table->nhs[i],
193 static int nh_notifier_grp_info_init(struct nh_notifier_info *info,
194 const struct nexthop *nh)
196 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
198 if (nhg->hash_threshold)
199 return nh_notifier_mpath_info_init(info, nhg);
200 else if (nhg->resilient)
201 return nh_notifier_res_table_info_init(info, nhg);
205 static void nh_notifier_grp_info_fini(struct nh_notifier_info *info,
206 const struct nexthop *nh)
208 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
210 if (nhg->hash_threshold)
212 else if (nhg->resilient)
213 vfree(info->nh_res_table);
216 static int nh_notifier_info_init(struct nh_notifier_info *info,
217 const struct nexthop *nh)
222 return nh_notifier_grp_info_init(info, nh);
224 return nh_notifier_single_info_init(info, nh);
227 static void nh_notifier_info_fini(struct nh_notifier_info *info,
228 const struct nexthop *nh)
231 nh_notifier_grp_info_fini(info, nh);
233 nh_notifier_single_info_fini(info);
236 static int call_nexthop_notifiers(struct net *net,
237 enum nexthop_event_type event_type,
239 struct netlink_ext_ack *extack)
241 struct nh_notifier_info info = {
249 if (nexthop_notifiers_is_empty(net))
252 err = nh_notifier_info_init(&info, nh);
254 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
258 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
260 nh_notifier_info_fini(&info, nh);
262 return notifier_to_errno(err);
266 nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info,
267 bool force, unsigned int *p_idle_timer_ms)
269 struct nh_res_table *res_table;
270 struct nh_group *nhg;
274 /* When 'force' is false, nexthop bucket replacement is performed
275 * because the bucket was deemed to be idle. In this case, capable
276 * listeners can choose to perform an atomic replacement: The bucket is
277 * only replaced if it is inactive. However, if the idle timer interval
278 * is smaller than the interval in which a listener is querying
279 * buckets' activity from the device, then atomic replacement should
280 * not be tried. Pass the idle timer value to listeners, so that they
281 * could determine which type of replacement to perform.
284 *p_idle_timer_ms = 0;
290 nh = nexthop_find_by_id(info->net, info->id);
296 nhg = rcu_dereference(nh->nh_grp);
297 res_table = rcu_dereference(nhg->res_table);
298 *p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer);
306 static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info,
307 u16 bucket_index, bool force,
308 struct nh_info *oldi,
309 struct nh_info *newi)
311 unsigned int idle_timer_ms;
314 err = nh_notifier_res_bucket_idle_timer_get(info, force,
319 info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET;
320 info->nh_res_bucket = kzalloc(sizeof(*info->nh_res_bucket),
322 if (!info->nh_res_bucket)
325 info->nh_res_bucket->bucket_index = bucket_index;
326 info->nh_res_bucket->idle_timer_ms = idle_timer_ms;
327 info->nh_res_bucket->force = force;
328 __nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi);
329 __nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi);
333 static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info)
335 kfree(info->nh_res_bucket);
338 static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
339 u16 bucket_index, bool force,
340 struct nh_info *oldi,
341 struct nh_info *newi,
342 struct netlink_ext_ack *extack)
344 struct nh_notifier_info info = {
351 if (nexthop_notifiers_is_empty(net))
354 err = nh_notifier_res_bucket_info_init(&info, bucket_index, force,
359 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
360 NEXTHOP_EVENT_BUCKET_REPLACE, &info);
361 nh_notifier_res_bucket_info_fini(&info);
363 return notifier_to_errno(err);
366 /* There are three users of RES_TABLE, and NHs etc. referenced from there:
368 * 1) a collection of callbacks for NH maintenance. This operates under
370 * 2) the delayed work that gradually balances the resilient table,
371 * 3) and nexthop_select_path(), operating under RCU.
373 * Both the delayed work and the RTNL block are writers, and need to
374 * maintain mutual exclusion. Since there are only two and well-known
375 * writers for each table, the RTNL code can make sure it has exclusive
378 * - Have the DW operate without locking;
379 * - synchronously cancel the DW;
381 * - if the write was not actually a delete, call upkeep, which schedules
382 * DW again if necessary.
384 * The functions that are always called from the RTNL context use
385 * rtnl_dereference(). The functions that can also be called from the DW do
386 * a raw dereference and rely on the above mutual exclusion scheme.
388 #define nh_res_dereference(p) (rcu_dereference_raw(p))
390 static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
391 u16 bucket_index, bool force,
392 struct nexthop *old_nh,
393 struct nexthop *new_nh,
394 struct netlink_ext_ack *extack)
396 struct nh_info *oldi = nh_res_dereference(old_nh->nh_info);
397 struct nh_info *newi = nh_res_dereference(new_nh->nh_info);
399 return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index,
400 force, oldi, newi, extack);
403 static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh,
404 struct netlink_ext_ack *extack)
406 struct nh_notifier_info info = {
410 struct nh_group *nhg;
415 if (nexthop_notifiers_is_empty(net))
418 /* At this point, the nexthop buckets are still not populated. Only
419 * emit a notification with the logical nexthops, so that a listener
420 * could potentially veto it in case of unsupported configuration.
422 nhg = rtnl_dereference(nh->nh_grp);
423 err = nh_notifier_mpath_info_init(&info, nhg);
425 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
429 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
430 NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
434 return notifier_to_errno(err);
437 static int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
438 enum nexthop_event_type event_type,
440 struct netlink_ext_ack *extack)
442 struct nh_notifier_info info = {
448 err = nh_notifier_info_init(&info, nh);
452 err = nb->notifier_call(nb, event_type, &info);
453 nh_notifier_info_fini(&info, nh);
455 return notifier_to_errno(err);
458 static unsigned int nh_dev_hashfn(unsigned int val)
460 unsigned int mask = NH_DEV_HASHSIZE - 1;
463 (val >> NH_DEV_HASHBITS) ^
464 (val >> (NH_DEV_HASHBITS * 2))) & mask;
467 static void nexthop_devhash_add(struct net *net, struct nh_info *nhi)
469 struct net_device *dev = nhi->fib_nhc.nhc_dev;
470 struct hlist_head *head;
475 hash = nh_dev_hashfn(dev->ifindex);
476 head = &net->nexthop.devhash[hash];
477 hlist_add_head(&nhi->dev_hash, head);
480 static void nexthop_free_group(struct nexthop *nh)
482 struct nh_group *nhg;
485 nhg = rcu_dereference_raw(nh->nh_grp);
486 for (i = 0; i < nhg->num_nh; ++i) {
487 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
489 WARN_ON(!list_empty(&nhge->nh_list));
490 free_percpu(nhge->stats);
491 nexthop_put(nhge->nh);
494 WARN_ON(nhg->spare == nhg);
497 vfree(rcu_dereference_raw(nhg->res_table));
503 static void nexthop_free_single(struct nexthop *nh)
507 nhi = rcu_dereference_raw(nh->nh_info);
508 switch (nhi->family) {
510 fib_nh_release(nh->net, &nhi->fib_nh);
513 ipv6_stub->fib6_nh_release(&nhi->fib6_nh);
519 void nexthop_free_rcu(struct rcu_head *head)
521 struct nexthop *nh = container_of(head, struct nexthop, rcu);
524 nexthop_free_group(nh);
526 nexthop_free_single(nh);
530 EXPORT_SYMBOL_GPL(nexthop_free_rcu);
532 static struct nexthop *nexthop_alloc(void)
536 nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL);
538 INIT_LIST_HEAD(&nh->fi_list);
539 INIT_LIST_HEAD(&nh->f6i_list);
540 INIT_LIST_HEAD(&nh->grp_list);
541 INIT_LIST_HEAD(&nh->fdb_list);
546 static struct nh_group *nexthop_grp_alloc(u16 num_nh)
548 struct nh_group *nhg;
550 nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL);
552 nhg->num_nh = num_nh;
557 static void nh_res_table_upkeep_dw(struct work_struct *work);
559 static struct nh_res_table *
560 nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg)
562 const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets;
563 struct nh_res_table *res_table;
566 size = struct_size(res_table, nh_buckets, num_nh_buckets);
567 res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
571 res_table->net = net;
572 res_table->nhg_id = nhg_id;
573 INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw);
574 INIT_LIST_HEAD(&res_table->uw_nh_entries);
575 res_table->idle_timer = cfg->nh_grp_res_idle_timer;
576 res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer;
577 res_table->num_nh_buckets = num_nh_buckets;
581 static void nh_base_seq_inc(struct net *net)
583 while (++net->nexthop.seq == 0)
587 /* no reference taken; rcu lock or rtnl must be held */
588 struct nexthop *nexthop_find_by_id(struct net *net, u32 id)
590 struct rb_node **pp, *parent = NULL, *next;
592 pp = &net->nexthop.rb_root.rb_node;
596 next = rcu_dereference_raw(*pp);
601 nh = rb_entry(parent, struct nexthop, rb_node);
604 else if (id > nh->id)
605 pp = &next->rb_right;
611 EXPORT_SYMBOL_GPL(nexthop_find_by_id);
613 /* used for auto id allocation; called with rtnl held */
614 static u32 nh_find_unused_id(struct net *net)
616 u32 id_start = net->nexthop.last_id_allocated;
619 net->nexthop.last_id_allocated++;
620 if (net->nexthop.last_id_allocated == id_start)
623 if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated))
624 return net->nexthop.last_id_allocated;
629 static void nh_res_time_set_deadline(unsigned long next_time,
630 unsigned long *deadline)
632 if (time_before(next_time, *deadline))
633 *deadline = next_time;
636 static clock_t nh_res_table_unbalanced_time(struct nh_res_table *res_table)
638 if (list_empty(&res_table->uw_nh_entries))
640 return jiffies_delta_to_clock_t(jiffies - res_table->unbalanced_since);
643 static int nla_put_nh_group_res(struct sk_buff *skb, struct nh_group *nhg)
645 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
648 nest = nla_nest_start(skb, NHA_RES_GROUP);
652 if (nla_put_u16(skb, NHA_RES_GROUP_BUCKETS,
653 res_table->num_nh_buckets) ||
654 nla_put_u32(skb, NHA_RES_GROUP_IDLE_TIMER,
655 jiffies_to_clock_t(res_table->idle_timer)) ||
656 nla_put_u32(skb, NHA_RES_GROUP_UNBALANCED_TIMER,
657 jiffies_to_clock_t(res_table->unbalanced_timer)) ||
658 nla_put_u64_64bit(skb, NHA_RES_GROUP_UNBALANCED_TIME,
659 nh_res_table_unbalanced_time(res_table),
661 goto nla_put_failure;
663 nla_nest_end(skb, nest);
667 nla_nest_cancel(skb, nest);
671 static void nh_grp_entry_stats_inc(struct nh_grp_entry *nhge)
673 struct nh_grp_entry_stats *cpu_stats;
675 cpu_stats = this_cpu_ptr(nhge->stats);
676 u64_stats_update_begin(&cpu_stats->syncp);
677 u64_stats_inc(&cpu_stats->packets);
678 u64_stats_update_end(&cpu_stats->syncp);
681 static void nh_grp_entry_stats_read(struct nh_grp_entry *nhge,
688 for_each_possible_cpu(i) {
689 struct nh_grp_entry_stats *cpu_stats;
693 cpu_stats = per_cpu_ptr(nhge->stats, i);
695 start = u64_stats_fetch_begin(&cpu_stats->syncp);
696 packets = u64_stats_read(&cpu_stats->packets);
697 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
699 *ret_packets += packets;
703 static int nla_put_nh_group_stats_entry(struct sk_buff *skb,
704 struct nh_grp_entry *nhge)
709 nh_grp_entry_stats_read(nhge, &packets);
711 nest = nla_nest_start(skb, NHA_GROUP_STATS_ENTRY);
715 if (nla_put_u32(skb, NHA_GROUP_STATS_ENTRY_ID, nhge->nh->id) ||
716 nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS, packets))
717 goto nla_put_failure;
719 nla_nest_end(skb, nest);
723 nla_nest_cancel(skb, nest);
727 static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh)
729 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
733 nest = nla_nest_start(skb, NHA_GROUP_STATS);
737 for (i = 0; i < nhg->num_nh; i++)
738 if (nla_put_nh_group_stats_entry(skb, &nhg->nh_entries[i]))
741 nla_nest_end(skb, nest);
745 nla_nest_cancel(skb, nest);
749 static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
752 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
753 struct nexthop_grp *p;
754 size_t len = nhg->num_nh * sizeof(*p);
759 if (nhg->hash_threshold)
760 group_type = NEXTHOP_GRP_TYPE_MPATH;
761 else if (nhg->resilient)
762 group_type = NEXTHOP_GRP_TYPE_RES;
764 if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type))
765 goto nla_put_failure;
767 nla = nla_reserve(skb, NHA_GROUP, len);
769 goto nla_put_failure;
772 for (i = 0; i < nhg->num_nh; ++i) {
773 p->id = nhg->nh_entries[i].nh->id;
774 p->weight = nhg->nh_entries[i].weight - 1;
778 if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
779 goto nla_put_failure;
781 if (op_flags & NHA_OP_FLAG_DUMP_STATS &&
782 (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats) ||
783 nla_put_nh_group_stats(skb, nh)))
784 goto nla_put_failure;
792 static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
793 int event, u32 portid, u32 seq, unsigned int nlflags,
796 struct fib6_nh *fib6_nh;
797 struct fib_nh *fib_nh;
798 struct nlmsghdr *nlh;
802 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
806 nhm = nlmsg_data(nlh);
807 nhm->nh_family = AF_UNSPEC;
808 nhm->nh_flags = nh->nh_flags;
809 nhm->nh_protocol = nh->protocol;
813 if (nla_put_u32(skb, NHA_ID, nh->id))
814 goto nla_put_failure;
817 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
819 if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
820 goto nla_put_failure;
821 if (nla_put_nh_group(skb, nh, op_flags))
822 goto nla_put_failure;
826 nhi = rtnl_dereference(nh->nh_info);
827 nhm->nh_family = nhi->family;
828 if (nhi->reject_nh) {
829 if (nla_put_flag(skb, NHA_BLACKHOLE))
830 goto nla_put_failure;
832 } else if (nhi->fdb_nh) {
833 if (nla_put_flag(skb, NHA_FDB))
834 goto nla_put_failure;
836 const struct net_device *dev;
838 dev = nhi->fib_nhc.nhc_dev;
839 if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex))
840 goto nla_put_failure;
843 nhm->nh_scope = nhi->fib_nhc.nhc_scope;
844 switch (nhi->family) {
846 fib_nh = &nhi->fib_nh;
847 if (fib_nh->fib_nh_gw_family &&
848 nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4))
849 goto nla_put_failure;
853 fib6_nh = &nhi->fib6_nh;
854 if (fib6_nh->fib_nh_gw_family &&
855 nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6))
856 goto nla_put_failure;
860 if (nhi->fib_nhc.nhc_lwtstate &&
861 lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
862 NHA_ENCAP, NHA_ENCAP_TYPE) < 0)
863 goto nla_put_failure;
870 nlmsg_cancel(skb, nlh);
874 static size_t nh_nlmsg_size_grp_res(struct nh_group *nhg)
876 return nla_total_size(0) + /* NHA_RES_GROUP */
877 nla_total_size(2) + /* NHA_RES_GROUP_BUCKETS */
878 nla_total_size(4) + /* NHA_RES_GROUP_IDLE_TIMER */
879 nla_total_size(4) + /* NHA_RES_GROUP_UNBALANCED_TIMER */
880 nla_total_size_64bit(8);/* NHA_RES_GROUP_UNBALANCED_TIME */
883 static size_t nh_nlmsg_size_grp(struct nexthop *nh)
885 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
886 size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh;
887 size_t tot = nla_total_size(sz) +
888 nla_total_size(2); /* NHA_GROUP_TYPE */
891 tot += nh_nlmsg_size_grp_res(nhg);
896 static size_t nh_nlmsg_size_single(struct nexthop *nh)
898 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
901 /* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
902 * are mutually exclusive
904 sz = nla_total_size(4); /* NHA_OIF */
906 switch (nhi->family) {
908 if (nhi->fib_nh.fib_nh_gw_family)
909 sz += nla_total_size(4); /* NHA_GATEWAY */
914 if (nhi->fib6_nh.fib_nh_gw_family)
915 sz += nla_total_size(sizeof(const struct in6_addr));
919 if (nhi->fib_nhc.nhc_lwtstate) {
920 sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate);
921 sz += nla_total_size(2); /* NHA_ENCAP_TYPE */
927 static size_t nh_nlmsg_size(struct nexthop *nh)
929 size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg));
931 sz += nla_total_size(4); /* NHA_ID */
934 sz += nh_nlmsg_size_grp(nh);
936 sz += nh_nlmsg_size_single(nh);
941 static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
943 unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0;
944 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
948 skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any());
952 err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags, 0);
954 /* -EMSGSIZE implies BUG in nh_nlmsg_size() */
955 WARN_ON(err == -EMSGSIZE);
960 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP,
961 info->nlh, gfp_any());
965 rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
968 static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket)
970 return (unsigned long)atomic_long_read(&bucket->used_time);
974 nh_res_bucket_idle_point(const struct nh_res_table *res_table,
975 const struct nh_res_bucket *bucket,
978 unsigned long time = nh_res_bucket_used_time(bucket);
980 /* Bucket was not used since it was migrated. The idle time is now. */
981 if (time == bucket->migrated_time)
984 return time + res_table->idle_timer;
988 nh_res_table_unb_point(const struct nh_res_table *res_table)
990 return res_table->unbalanced_since + res_table->unbalanced_timer;
993 static void nh_res_bucket_set_idle(const struct nh_res_table *res_table,
994 struct nh_res_bucket *bucket)
996 unsigned long now = jiffies;
998 atomic_long_set(&bucket->used_time, (long)now);
999 bucket->migrated_time = now;
1002 static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket)
1004 atomic_long_set(&bucket->used_time, (long)jiffies);
1007 static clock_t nh_res_bucket_idle_time(const struct nh_res_bucket *bucket)
1009 unsigned long used_time = nh_res_bucket_used_time(bucket);
1011 return jiffies_delta_to_clock_t(jiffies - used_time);
1014 static int nh_fill_res_bucket(struct sk_buff *skb, struct nexthop *nh,
1015 struct nh_res_bucket *bucket, u16 bucket_index,
1016 int event, u32 portid, u32 seq,
1017 unsigned int nlflags,
1018 struct netlink_ext_ack *extack)
1020 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1021 struct nlmsghdr *nlh;
1022 struct nlattr *nest;
1025 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
1029 nhm = nlmsg_data(nlh);
1030 nhm->nh_family = AF_UNSPEC;
1031 nhm->nh_flags = bucket->nh_flags;
1032 nhm->nh_protocol = nh->protocol;
1036 if (nla_put_u32(skb, NHA_ID, nh->id))
1037 goto nla_put_failure;
1039 nest = nla_nest_start(skb, NHA_RES_BUCKET);
1041 goto nla_put_failure;
1043 if (nla_put_u16(skb, NHA_RES_BUCKET_INDEX, bucket_index) ||
1044 nla_put_u32(skb, NHA_RES_BUCKET_NH_ID, nhge->nh->id) ||
1045 nla_put_u64_64bit(skb, NHA_RES_BUCKET_IDLE_TIME,
1046 nh_res_bucket_idle_time(bucket),
1047 NHA_RES_BUCKET_PAD))
1048 goto nla_put_failure_nest;
1050 nla_nest_end(skb, nest);
1051 nlmsg_end(skb, nlh);
1054 nla_put_failure_nest:
1055 nla_nest_cancel(skb, nest);
1057 nlmsg_cancel(skb, nlh);
1061 static void nexthop_bucket_notify(struct nh_res_table *res_table,
1064 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1065 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1066 struct nexthop *nh = nhge->nh_parent;
1067 struct sk_buff *skb;
1070 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1074 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
1075 RTM_NEWNEXTHOPBUCKET, 0, 0, NLM_F_REPLACE,
1082 rtnl_notify(skb, nh->net, 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL);
1086 rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err);
1089 static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
1090 bool *is_fdb, struct netlink_ext_ack *extack)
1093 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
1095 /* Nesting groups within groups is not supported. */
1096 if (nhg->hash_threshold) {
1097 NL_SET_ERR_MSG(extack,
1098 "Hash-threshold group can not be a nexthop within a group");
1101 if (nhg->resilient) {
1102 NL_SET_ERR_MSG(extack,
1103 "Resilient group can not be a nexthop within a group");
1106 *is_fdb = nhg->fdb_nh;
1108 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1110 if (nhi->reject_nh && npaths > 1) {
1111 NL_SET_ERR_MSG(extack,
1112 "Blackhole nexthop can not be used in a group with more than 1 path");
1115 *is_fdb = nhi->fdb_nh;
1121 static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family,
1122 struct netlink_ext_ack *extack)
1124 struct nh_info *nhi;
1126 nhi = rtnl_dereference(nh->nh_info);
1129 NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops");
1133 if (*nh_family == AF_UNSPEC) {
1134 *nh_family = nhi->family;
1135 } else if (*nh_family != nhi->family) {
1136 NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops");
1143 static int nh_check_attr_group(struct net *net,
1144 struct nlattr *tb[], size_t tb_size,
1145 u16 nh_grp_type, struct netlink_ext_ack *extack)
1147 unsigned int len = nla_len(tb[NHA_GROUP]);
1148 u8 nh_family = AF_UNSPEC;
1149 struct nexthop_grp *nhg;
1153 if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
1154 NL_SET_ERR_MSG(extack,
1155 "Invalid length for nexthop group attribute");
1159 /* convert len to number of nexthop ids */
1160 len /= sizeof(*nhg);
1162 nhg = nla_data(tb[NHA_GROUP]);
1163 for (i = 0; i < len; ++i) {
1164 if (nhg[i].resvd1 || nhg[i].resvd2) {
1165 NL_SET_ERR_MSG(extack, "Reserved fields in nexthop_grp must be 0");
1168 if (nhg[i].weight > 254) {
1169 NL_SET_ERR_MSG(extack, "Invalid value for weight");
1172 for (j = i + 1; j < len; ++j) {
1173 if (nhg[i].id == nhg[j].id) {
1174 NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group");
1182 nhg = nla_data(tb[NHA_GROUP]);
1183 for (i = 0; i < len; ++i) {
1187 nh = nexthop_find_by_id(net, nhg[i].id);
1189 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
1192 if (!valid_group_nh(nh, len, &is_fdb_nh, extack))
1195 if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack))
1198 if (!nhg_fdb && is_fdb_nh) {
1199 NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops");
1203 for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) {
1207 case NHA_HW_STATS_ENABLE:
1211 if (nh_grp_type == NEXTHOP_GRP_TYPE_RES)
1215 NL_SET_ERR_MSG(extack,
1216 "No other attributes can be set in nexthop groups");
1223 static bool ipv6_good_nh(const struct fib6_nh *nh)
1225 int state = NUD_REACHABLE;
1226 struct neighbour *n;
1230 n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6);
1232 state = READ_ONCE(n->nud_state);
1236 return !!(state & NUD_VALID);
1239 static bool ipv4_good_nh(const struct fib_nh *nh)
1241 int state = NUD_REACHABLE;
1242 struct neighbour *n;
1246 n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
1247 (__force u32)nh->fib_nh_gw4);
1249 state = READ_ONCE(n->nud_state);
1253 return !!(state & NUD_VALID);
1256 static bool nexthop_is_good_nh(const struct nexthop *nh)
1258 struct nh_info *nhi = rcu_dereference(nh->nh_info);
1260 switch (nhi->family) {
1262 return ipv4_good_nh(&nhi->fib_nh);
1264 return ipv6_good_nh(&nhi->fib6_nh);
1270 static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash)
1274 for (i = 0; i < nhg->num_nh; i++) {
1275 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1277 if (hash > atomic_read(&nhge->hthr.upper_bound))
1280 nh_grp_entry_stats_inc(nhge);
1288 static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
1290 struct nh_grp_entry *nhge0 = NULL;
1294 return nexthop_select_path_fdb(nhg, hash);
1296 for (i = 0; i < nhg->num_nh; ++i) {
1297 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1299 /* nexthops always check if it is good and does
1300 * not rely on a sysctl for this behavior
1302 if (!nexthop_is_good_nh(nhge->nh))
1308 if (hash > atomic_read(&nhge->hthr.upper_bound))
1311 nh_grp_entry_stats_inc(nhge);
1316 nhge0 = &nhg->nh_entries[0];
1317 nh_grp_entry_stats_inc(nhge0);
1321 static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
1323 struct nh_res_table *res_table = rcu_dereference(nhg->res_table);
1324 u16 bucket_index = hash % res_table->num_nh_buckets;
1325 struct nh_res_bucket *bucket;
1326 struct nh_grp_entry *nhge;
1328 /* nexthop_select_path() is expected to return a non-NULL value, so
1329 * skip protocol validation and just hand out whatever there is.
1331 bucket = &res_table->nh_buckets[bucket_index];
1332 nh_res_bucket_set_busy(bucket);
1333 nhge = rcu_dereference(bucket->nh_entry);
1334 nh_grp_entry_stats_inc(nhge);
1338 struct nexthop *nexthop_select_path(struct nexthop *nh, int hash)
1340 struct nh_group *nhg;
1345 nhg = rcu_dereference(nh->nh_grp);
1346 if (nhg->hash_threshold)
1347 return nexthop_select_path_hthr(nhg, hash);
1348 else if (nhg->resilient)
1349 return nexthop_select_path_res(nhg, hash);
1354 EXPORT_SYMBOL_GPL(nexthop_select_path);
1356 int nexthop_for_each_fib6_nh(struct nexthop *nh,
1357 int (*cb)(struct fib6_nh *nh, void *arg),
1360 struct nh_info *nhi;
1364 struct nh_group *nhg;
1367 nhg = rcu_dereference_rtnl(nh->nh_grp);
1368 for (i = 0; i < nhg->num_nh; i++) {
1369 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1371 nhi = rcu_dereference_rtnl(nhge->nh->nh_info);
1372 err = cb(&nhi->fib6_nh, arg);
1377 nhi = rcu_dereference_rtnl(nh->nh_info);
1378 err = cb(&nhi->fib6_nh, arg);
1385 EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh);
1387 static int check_src_addr(const struct in6_addr *saddr,
1388 struct netlink_ext_ack *extack)
1390 if (!ipv6_addr_any(saddr)) {
1391 NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects");
1397 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
1398 struct netlink_ext_ack *extack)
1400 struct nh_info *nhi;
1403 /* fib6_src is unique to a fib6_info and limits the ability to cache
1404 * routes in fib6_nh within a nexthop that is potentially shared
1405 * across multiple fib entries. If the config wants to use source
1406 * routing it can not use nexthop objects. mlxsw also does not allow
1407 * fib6_src on routes.
1409 if (cfg && check_src_addr(&cfg->fc_src, extack) < 0)
1413 struct nh_group *nhg;
1415 nhg = rtnl_dereference(nh->nh_grp);
1418 is_fdb_nh = nhg->fdb_nh;
1420 nhi = rtnl_dereference(nh->nh_info);
1421 if (nhi->family == AF_INET)
1423 is_fdb_nh = nhi->fdb_nh;
1427 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1433 NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop");
1436 EXPORT_SYMBOL_GPL(fib6_check_nexthop);
1438 /* if existing nexthop has ipv6 routes linked to it, need
1439 * to verify this new spec works with ipv6
1441 static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new,
1442 struct netlink_ext_ack *extack)
1444 struct fib6_info *f6i;
1446 if (list_empty(&old->f6i_list))
1449 list_for_each_entry(f6i, &old->f6i_list, nh_list) {
1450 if (check_src_addr(&f6i->fib6_src.addr, extack) < 0)
1454 return fib6_check_nexthop(new, NULL, extack);
1457 static int nexthop_check_scope(struct nh_info *nhi, u8 scope,
1458 struct netlink_ext_ack *extack)
1460 if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) {
1461 NL_SET_ERR_MSG(extack,
1462 "Route with host scope can not have a gateway");
1466 if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) {
1467 NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop");
1474 /* Invoked by fib add code to verify nexthop by id is ok with
1475 * config for prefix; parts of fib_check_nh not done when nexthop
1478 int fib_check_nexthop(struct nexthop *nh, u8 scope,
1479 struct netlink_ext_ack *extack)
1481 struct nh_info *nhi;
1485 struct nh_group *nhg;
1487 nhg = rtnl_dereference(nh->nh_grp);
1489 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1494 if (scope == RT_SCOPE_HOST) {
1495 NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops");
1500 /* all nexthops in a group have the same scope */
1501 nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info);
1502 err = nexthop_check_scope(nhi, scope, extack);
1504 nhi = rtnl_dereference(nh->nh_info);
1506 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1510 err = nexthop_check_scope(nhi, scope, extack);
1517 static int fib_check_nh_list(struct nexthop *old, struct nexthop *new,
1518 struct netlink_ext_ack *extack)
1520 struct fib_info *fi;
1522 list_for_each_entry(fi, &old->fi_list, nh_list) {
1525 err = fib_check_nexthop(new, fi->fib_scope, extack);
1532 static bool nh_res_nhge_is_balanced(const struct nh_grp_entry *nhge)
1534 return nhge->res.count_buckets == nhge->res.wants_buckets;
1537 static bool nh_res_nhge_is_ow(const struct nh_grp_entry *nhge)
1539 return nhge->res.count_buckets > nhge->res.wants_buckets;
1542 static bool nh_res_nhge_is_uw(const struct nh_grp_entry *nhge)
1544 return nhge->res.count_buckets < nhge->res.wants_buckets;
1547 static bool nh_res_table_is_balanced(const struct nh_res_table *res_table)
1549 return list_empty(&res_table->uw_nh_entries);
1552 static void nh_res_bucket_unset_nh(struct nh_res_bucket *bucket)
1554 struct nh_grp_entry *nhge;
1556 if (bucket->occupied) {
1557 nhge = nh_res_dereference(bucket->nh_entry);
1558 nhge->res.count_buckets--;
1559 bucket->occupied = false;
1563 static void nh_res_bucket_set_nh(struct nh_res_bucket *bucket,
1564 struct nh_grp_entry *nhge)
1566 nh_res_bucket_unset_nh(bucket);
1568 bucket->occupied = true;
1569 rcu_assign_pointer(bucket->nh_entry, nhge);
1570 nhge->res.count_buckets++;
1573 static bool nh_res_bucket_should_migrate(struct nh_res_table *res_table,
1574 struct nh_res_bucket *bucket,
1575 unsigned long *deadline, bool *force)
1577 unsigned long now = jiffies;
1578 struct nh_grp_entry *nhge;
1579 unsigned long idle_point;
1581 if (!bucket->occupied) {
1582 /* The bucket is not occupied, its NHGE pointer is either
1583 * NULL or obsolete. We _have to_ migrate: set force.
1589 nhge = nh_res_dereference(bucket->nh_entry);
1591 /* If the bucket is populated by an underweight or balanced
1592 * nexthop, do not migrate.
1594 if (!nh_res_nhge_is_ow(nhge))
1597 /* At this point we know that the bucket is populated with an
1598 * overweight nexthop. It needs to be migrated to a new nexthop if
1599 * the idle timer of unbalanced timer expired.
1602 idle_point = nh_res_bucket_idle_point(res_table, bucket, now);
1603 if (time_after_eq(now, idle_point)) {
1604 /* The bucket is idle. We _can_ migrate: unset force. */
1609 /* Unbalanced timer of 0 means "never force". */
1610 if (res_table->unbalanced_timer) {
1611 unsigned long unb_point;
1613 unb_point = nh_res_table_unb_point(res_table);
1614 if (time_after(now, unb_point)) {
1615 /* The bucket is not idle, but the unbalanced timer
1616 * expired. We _can_ migrate, but set force anyway,
1617 * so that drivers know to ignore activity reports
1624 nh_res_time_set_deadline(unb_point, deadline);
1627 nh_res_time_set_deadline(idle_point, deadline);
1631 static bool nh_res_bucket_migrate(struct nh_res_table *res_table,
1632 u16 bucket_index, bool notify,
1633 bool notify_nl, bool force)
1635 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1636 struct nh_grp_entry *new_nhge;
1637 struct netlink_ext_ack extack;
1640 new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries,
1641 struct nh_grp_entry,
1643 if (WARN_ON_ONCE(!new_nhge))
1644 /* If this function is called, "bucket" is either not
1645 * occupied, or it belongs to a next hop that is
1646 * overweight. In either case, there ought to be a
1647 * corresponding underweight next hop.
1652 struct nh_grp_entry *old_nhge;
1654 old_nhge = nh_res_dereference(bucket->nh_entry);
1655 err = call_nexthop_res_bucket_notifiers(res_table->net,
1657 bucket_index, force,
1659 new_nhge->nh, &extack);
1661 pr_err_ratelimited("%s\n", extack._msg);
1664 /* It is not possible to veto a forced replacement, so
1665 * just clear the hardware flags from the nexthop
1666 * bucket to indicate to user space that this bucket is
1667 * not correctly populated in hardware.
1669 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
1673 nh_res_bucket_set_nh(bucket, new_nhge);
1674 nh_res_bucket_set_idle(res_table, bucket);
1677 nexthop_bucket_notify(res_table, bucket_index);
1679 if (nh_res_nhge_is_balanced(new_nhge))
1680 list_del(&new_nhge->res.uw_nh_entry);
1684 #define NH_RES_UPKEEP_DW_MINIMUM_INTERVAL (HZ / 2)
1686 static void nh_res_table_upkeep(struct nh_res_table *res_table,
1687 bool notify, bool notify_nl)
1689 unsigned long now = jiffies;
1690 unsigned long deadline;
1693 /* Deadline is the next time that upkeep should be run. It is the
1694 * earliest time at which one of the buckets might be migrated.
1695 * Start at the most pessimistic estimate: either unbalanced_timer
1696 * from now, or if there is none, idle_timer from now. For each
1697 * encountered time point, call nh_res_time_set_deadline() to
1698 * refine the estimate.
1700 if (res_table->unbalanced_timer)
1701 deadline = now + res_table->unbalanced_timer;
1703 deadline = now + res_table->idle_timer;
1705 for (i = 0; i < res_table->num_nh_buckets; i++) {
1706 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1709 if (nh_res_bucket_should_migrate(res_table, bucket,
1710 &deadline, &force)) {
1711 if (!nh_res_bucket_migrate(res_table, i, notify,
1712 notify_nl, force)) {
1713 unsigned long idle_point;
1715 /* A driver can override the migration
1716 * decision if the HW reports that the
1717 * bucket is actually not idle. Therefore
1718 * remark the bucket as busy again and
1719 * update the deadline.
1721 nh_res_bucket_set_busy(bucket);
1722 idle_point = nh_res_bucket_idle_point(res_table,
1725 nh_res_time_set_deadline(idle_point, &deadline);
1730 /* If the group is still unbalanced, schedule the next upkeep to
1731 * either the deadline computed above, or the minimum deadline,
1732 * whichever comes later.
1734 if (!nh_res_table_is_balanced(res_table)) {
1735 unsigned long now = jiffies;
1736 unsigned long min_deadline;
1738 min_deadline = now + NH_RES_UPKEEP_DW_MINIMUM_INTERVAL;
1739 if (time_before(deadline, min_deadline))
1740 deadline = min_deadline;
1742 queue_delayed_work(system_power_efficient_wq,
1743 &res_table->upkeep_dw, deadline - now);
1747 static void nh_res_table_upkeep_dw(struct work_struct *work)
1749 struct delayed_work *dw = to_delayed_work(work);
1750 struct nh_res_table *res_table;
1752 res_table = container_of(dw, struct nh_res_table, upkeep_dw);
1753 nh_res_table_upkeep(res_table, true, true);
1756 static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table)
1758 cancel_delayed_work_sync(&res_table->upkeep_dw);
1761 static void nh_res_group_rebalance(struct nh_group *nhg,
1762 struct nh_res_table *res_table)
1764 int prev_upper_bound = 0;
1769 INIT_LIST_HEAD(&res_table->uw_nh_entries);
1771 for (i = 0; i < nhg->num_nh; ++i)
1772 total += nhg->nh_entries[i].weight;
1774 for (i = 0; i < nhg->num_nh; ++i) {
1775 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1779 upper_bound = DIV_ROUND_CLOSEST(res_table->num_nh_buckets * w,
1781 nhge->res.wants_buckets = upper_bound - prev_upper_bound;
1782 prev_upper_bound = upper_bound;
1784 if (nh_res_nhge_is_uw(nhge)) {
1785 if (list_empty(&res_table->uw_nh_entries))
1786 res_table->unbalanced_since = jiffies;
1787 list_add(&nhge->res.uw_nh_entry,
1788 &res_table->uw_nh_entries);
1793 /* Migrate buckets in res_table so that they reference NHGE's from NHG with
1794 * the right NH ID. Set those buckets that do not have a corresponding NHGE
1795 * entry in NHG as not occupied.
1797 static void nh_res_table_migrate_buckets(struct nh_res_table *res_table,
1798 struct nh_group *nhg)
1802 for (i = 0; i < res_table->num_nh_buckets; i++) {
1803 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1804 u32 id = rtnl_dereference(bucket->nh_entry)->nh->id;
1808 for (j = 0; j < nhg->num_nh; j++) {
1809 struct nh_grp_entry *nhge = &nhg->nh_entries[j];
1811 if (nhge->nh->id == id) {
1812 nh_res_bucket_set_nh(bucket, nhge);
1819 nh_res_bucket_unset_nh(bucket);
1823 static void replace_nexthop_grp_res(struct nh_group *oldg,
1824 struct nh_group *newg)
1826 /* For NH group replacement, the new NHG might only have a stub
1827 * hash table with 0 buckets, because the number of buckets was not
1828 * specified. For NH removal, oldg and newg both reference the same
1829 * res_table. So in any case, in the following, we want to work
1830 * with oldg->res_table.
1832 struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table);
1833 unsigned long prev_unbalanced_since = old_res_table->unbalanced_since;
1834 bool prev_has_uw = !list_empty(&old_res_table->uw_nh_entries);
1836 nh_res_table_cancel_upkeep(old_res_table);
1837 nh_res_table_migrate_buckets(old_res_table, newg);
1838 nh_res_group_rebalance(newg, old_res_table);
1839 if (prev_has_uw && !list_empty(&old_res_table->uw_nh_entries))
1840 old_res_table->unbalanced_since = prev_unbalanced_since;
1841 nh_res_table_upkeep(old_res_table, true, false);
1844 static void nh_hthr_group_rebalance(struct nh_group *nhg)
1850 for (i = 0; i < nhg->num_nh; ++i)
1851 total += nhg->nh_entries[i].weight;
1853 for (i = 0; i < nhg->num_nh; ++i) {
1854 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1858 upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1;
1859 atomic_set(&nhge->hthr.upper_bound, upper_bound);
1863 static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
1864 struct nl_info *nlinfo)
1866 struct nh_grp_entry *nhges, *new_nhges;
1867 struct nexthop *nhp = nhge->nh_parent;
1868 struct netlink_ext_ack extack;
1869 struct nexthop *nh = nhge->nh;
1870 struct nh_group *nhg, *newg;
1875 nhg = rtnl_dereference(nhp->nh_grp);
1878 /* last entry, keep it visible and remove the parent */
1879 if (nhg->num_nh == 1) {
1880 remove_nexthop(net, nhp, nlinfo);
1884 newg->has_v4 = false;
1885 newg->is_multipath = nhg->is_multipath;
1886 newg->hash_threshold = nhg->hash_threshold;
1887 newg->resilient = nhg->resilient;
1888 newg->fdb_nh = nhg->fdb_nh;
1889 newg->num_nh = nhg->num_nh;
1891 /* copy old entries to new except the one getting removed */
1892 nhges = nhg->nh_entries;
1893 new_nhges = newg->nh_entries;
1894 for (i = 0, j = 0; i < nhg->num_nh; ++i) {
1895 struct nh_info *nhi;
1897 /* current nexthop getting removed */
1898 if (nhg->nh_entries[i].nh == nh) {
1903 nhi = rtnl_dereference(nhges[i].nh->nh_info);
1904 if (nhi->family == AF_INET)
1905 newg->has_v4 = true;
1907 list_del(&nhges[i].nh_list);
1908 new_nhges[j].stats = nhges[i].stats;
1909 new_nhges[j].nh_parent = nhges[i].nh_parent;
1910 new_nhges[j].nh = nhges[i].nh;
1911 new_nhges[j].weight = nhges[i].weight;
1912 list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
1916 if (newg->hash_threshold)
1917 nh_hthr_group_rebalance(newg);
1918 else if (newg->resilient)
1919 replace_nexthop_grp_res(nhg, newg);
1921 rcu_assign_pointer(nhp->nh_grp, newg);
1923 list_del(&nhge->nh_list);
1924 free_percpu(nhge->stats);
1925 nexthop_put(nhge->nh);
1927 /* Removal of a NH from a resilient group is notified through
1928 * bucket notifications.
1930 if (newg->hash_threshold) {
1931 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp,
1934 pr_err("%s\n", extack._msg);
1938 nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
1941 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
1942 struct nl_info *nlinfo)
1944 struct nh_grp_entry *nhge, *tmp;
1946 list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
1947 remove_nh_grp_entry(net, nhge, nlinfo);
1949 /* make sure all see the newly published array before releasing rtnl */
1953 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
1955 struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
1956 struct nh_res_table *res_table;
1957 int i, num_nh = nhg->num_nh;
1959 for (i = 0; i < num_nh; ++i) {
1960 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1962 if (WARN_ON(!nhge->nh))
1965 list_del_init(&nhge->nh_list);
1968 if (nhg->resilient) {
1969 res_table = rtnl_dereference(nhg->res_table);
1970 nh_res_table_cancel_upkeep(res_table);
1974 /* not called for nexthop replace */
1975 static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
1977 struct fib6_info *f6i, *tmp;
1978 bool do_flush = false;
1979 struct fib_info *fi;
1981 list_for_each_entry(fi, &nh->fi_list, nh_list) {
1982 fi->fib_flags |= RTNH_F_DEAD;
1988 /* ip6_del_rt removes the entry from this list hence the _safe */
1989 list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) {
1990 /* __ip6_del_rt does a release, so do a hold here */
1991 fib6_info_hold(f6i);
1992 ipv6_stub->ip6_del_rt(net, f6i,
1993 !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode));
1997 static void __remove_nexthop(struct net *net, struct nexthop *nh,
1998 struct nl_info *nlinfo)
2000 __remove_nexthop_fib(net, nh);
2003 remove_nexthop_group(nh, nlinfo);
2005 struct nh_info *nhi;
2007 nhi = rtnl_dereference(nh->nh_info);
2008 if (nhi->fib_nhc.nhc_dev)
2009 hlist_del(&nhi->dev_hash);
2011 remove_nexthop_from_groups(net, nh, nlinfo);
2015 static void remove_nexthop(struct net *net, struct nexthop *nh,
2016 struct nl_info *nlinfo)
2018 call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh, NULL);
2020 /* remove from the tree */
2021 rb_erase(&nh->rb_node, &net->nexthop.rb_root);
2024 nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo);
2026 __remove_nexthop(net, nh, nlinfo);
2027 nh_base_seq_inc(net);
2032 /* if any FIB entries reference this nexthop, any dst entries
2033 * need to be regenerated
2035 static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
2036 struct nexthop *replaced_nh)
2038 struct fib6_info *f6i;
2039 struct nh_group *nhg;
2042 if (!list_empty(&nh->fi_list))
2043 rt_cache_flush(net);
2045 list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2046 ipv6_stub->fib6_update_sernum(net, f6i);
2048 /* if an IPv6 group was replaced, we have to release all old
2049 * dsts to make sure all refcounts are released
2051 if (!replaced_nh->is_group)
2054 nhg = rtnl_dereference(replaced_nh->nh_grp);
2055 for (i = 0; i < nhg->num_nh; i++) {
2056 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2057 struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
2059 if (nhi->family == AF_INET6)
2060 ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
2064 static int replace_nexthop_grp(struct net *net, struct nexthop *old,
2065 struct nexthop *new, const struct nh_config *cfg,
2066 struct netlink_ext_ack *extack)
2068 struct nh_res_table *tmp_table = NULL;
2069 struct nh_res_table *new_res_table;
2070 struct nh_res_table *old_res_table;
2071 struct nh_group *oldg, *newg;
2074 if (!new->is_group) {
2075 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop.");
2079 oldg = rtnl_dereference(old->nh_grp);
2080 newg = rtnl_dereference(new->nh_grp);
2082 if (newg->hash_threshold != oldg->hash_threshold) {
2083 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with one of a different type.");
2087 if (newg->hash_threshold) {
2088 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new,
2092 } else if (newg->resilient) {
2093 new_res_table = rtnl_dereference(newg->res_table);
2094 old_res_table = rtnl_dereference(oldg->res_table);
2096 /* Accept if num_nh_buckets was not given, but if it was
2097 * given, demand that the value be correct.
2099 if (cfg->nh_grp_res_has_num_buckets &&
2100 cfg->nh_grp_res_num_buckets !=
2101 old_res_table->num_nh_buckets) {
2102 NL_SET_ERR_MSG(extack, "Can not change number of buckets of a resilient nexthop group.");
2106 /* Emit a pre-replace notification so that listeners could veto
2107 * a potentially unsupported configuration. Otherwise,
2108 * individual bucket replacement notifications would need to be
2109 * vetoed, which is something that should only happen if the
2110 * bucket is currently active.
2112 err = call_nexthop_res_table_notifiers(net, new, extack);
2116 if (cfg->nh_grp_res_has_idle_timer)
2117 old_res_table->idle_timer = cfg->nh_grp_res_idle_timer;
2118 if (cfg->nh_grp_res_has_unbalanced_timer)
2119 old_res_table->unbalanced_timer =
2120 cfg->nh_grp_res_unbalanced_timer;
2122 replace_nexthop_grp_res(oldg, newg);
2124 tmp_table = new_res_table;
2125 rcu_assign_pointer(newg->res_table, old_res_table);
2126 rcu_assign_pointer(newg->spare->res_table, old_res_table);
2129 /* update parents - used by nexthop code for cleanup */
2130 for (i = 0; i < newg->num_nh; i++)
2131 newg->nh_entries[i].nh_parent = old;
2133 rcu_assign_pointer(old->nh_grp, newg);
2135 /* Make sure concurrent readers are not using 'oldg' anymore. */
2138 if (newg->resilient) {
2139 rcu_assign_pointer(oldg->res_table, tmp_table);
2140 rcu_assign_pointer(oldg->spare->res_table, tmp_table);
2143 for (i = 0; i < oldg->num_nh; i++)
2144 oldg->nh_entries[i].nh_parent = new;
2146 rcu_assign_pointer(new->nh_grp, oldg);
2151 static void nh_group_v4_update(struct nh_group *nhg)
2153 struct nh_grp_entry *nhges;
2154 bool has_v4 = false;
2157 nhges = nhg->nh_entries;
2158 for (i = 0; i < nhg->num_nh; i++) {
2159 struct nh_info *nhi;
2161 nhi = rtnl_dereference(nhges[i].nh->nh_info);
2162 if (nhi->family == AF_INET)
2165 nhg->has_v4 = has_v4;
2168 static int replace_nexthop_single_notify_res(struct net *net,
2169 struct nh_res_table *res_table,
2170 struct nexthop *old,
2171 struct nh_info *oldi,
2172 struct nh_info *newi,
2173 struct netlink_ext_ack *extack)
2175 u32 nhg_id = res_table->nhg_id;
2179 for (i = 0; i < res_table->num_nh_buckets; i++) {
2180 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2181 struct nh_grp_entry *nhge;
2183 nhge = rtnl_dereference(bucket->nh_entry);
2184 if (nhge->nh == old) {
2185 err = __call_nexthop_res_bucket_notifiers(net, nhg_id,
2198 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2199 struct nh_grp_entry *nhge;
2201 nhge = rtnl_dereference(bucket->nh_entry);
2202 if (nhge->nh == old)
2203 __call_nexthop_res_bucket_notifiers(net, nhg_id, i,
2210 static int replace_nexthop_single_notify(struct net *net,
2211 struct nexthop *group_nh,
2212 struct nexthop *old,
2213 struct nh_info *oldi,
2214 struct nh_info *newi,
2215 struct netlink_ext_ack *extack)
2217 struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp);
2218 struct nh_res_table *res_table;
2220 if (nhg->hash_threshold) {
2221 return call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE,
2223 } else if (nhg->resilient) {
2224 res_table = rtnl_dereference(nhg->res_table);
2225 return replace_nexthop_single_notify_res(net, res_table,
2233 static int replace_nexthop_single(struct net *net, struct nexthop *old,
2234 struct nexthop *new,
2235 struct netlink_ext_ack *extack)
2237 u8 old_protocol, old_nh_flags;
2238 struct nh_info *oldi, *newi;
2239 struct nh_grp_entry *nhge;
2242 if (new->is_group) {
2243 NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group.");
2247 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack);
2251 /* Hardware flags were set on 'old' as 'new' is not in the red-black
2252 * tree. Therefore, inherit the flags from 'old' to 'new'.
2254 new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP);
2256 oldi = rtnl_dereference(old->nh_info);
2257 newi = rtnl_dereference(new->nh_info);
2259 newi->nh_parent = old;
2260 oldi->nh_parent = new;
2262 old_protocol = old->protocol;
2263 old_nh_flags = old->nh_flags;
2265 old->protocol = new->protocol;
2266 old->nh_flags = new->nh_flags;
2268 rcu_assign_pointer(old->nh_info, newi);
2269 rcu_assign_pointer(new->nh_info, oldi);
2271 /* Send a replace notification for all the groups using the nexthop. */
2272 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2273 struct nexthop *nhp = nhge->nh_parent;
2275 err = replace_nexthop_single_notify(net, nhp, old, oldi, newi,
2281 /* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially
2282 * update IPv4 indication in all the groups using the nexthop.
2284 if (oldi->family == AF_INET && newi->family == AF_INET6) {
2285 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2286 struct nexthop *nhp = nhge->nh_parent;
2287 struct nh_group *nhg;
2289 nhg = rtnl_dereference(nhp->nh_grp);
2290 nh_group_v4_update(nhg);
2297 rcu_assign_pointer(new->nh_info, newi);
2298 rcu_assign_pointer(old->nh_info, oldi);
2299 old->nh_flags = old_nh_flags;
2300 old->protocol = old_protocol;
2301 oldi->nh_parent = old;
2302 newi->nh_parent = new;
2303 list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) {
2304 struct nexthop *nhp = nhge->nh_parent;
2306 replace_nexthop_single_notify(net, nhp, old, newi, oldi, NULL);
2308 call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, old, extack);
2312 static void __nexthop_replace_notify(struct net *net, struct nexthop *nh,
2313 struct nl_info *info)
2315 struct fib6_info *f6i;
2317 if (!list_empty(&nh->fi_list)) {
2318 struct fib_info *fi;
2320 /* expectation is a few fib_info per nexthop and then
2321 * a lot of routes per fib_info. So mark the fib_info
2322 * and then walk the fib tables once
2324 list_for_each_entry(fi, &nh->fi_list, nh_list)
2325 fi->nh_updated = true;
2327 fib_info_notify_update(net, info);
2329 list_for_each_entry(fi, &nh->fi_list, nh_list)
2330 fi->nh_updated = false;
2333 list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2334 ipv6_stub->fib6_rt_update(net, f6i, info);
2337 /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
2338 * linked to this nexthop and for all groups that the nexthop
2341 static void nexthop_replace_notify(struct net *net, struct nexthop *nh,
2342 struct nl_info *info)
2344 struct nh_grp_entry *nhge;
2346 __nexthop_replace_notify(net, nh, info);
2348 list_for_each_entry(nhge, &nh->grp_list, nh_list)
2349 __nexthop_replace_notify(net, nhge->nh_parent, info);
2352 static int replace_nexthop(struct net *net, struct nexthop *old,
2353 struct nexthop *new, const struct nh_config *cfg,
2354 struct netlink_ext_ack *extack)
2356 bool new_is_reject = false;
2357 struct nh_grp_entry *nhge;
2360 /* check that existing FIB entries are ok with the
2361 * new nexthop definition
2363 err = fib_check_nh_list(old, new, extack);
2367 err = fib6_check_nh_list(old, new, extack);
2371 if (!new->is_group) {
2372 struct nh_info *nhi = rtnl_dereference(new->nh_info);
2374 new_is_reject = nhi->reject_nh;
2377 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2378 /* if new nexthop is a blackhole, any groups using this
2379 * nexthop cannot have more than 1 path
2381 if (new_is_reject &&
2382 nexthop_num_path(nhge->nh_parent) > 1) {
2383 NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path");
2387 err = fib_check_nh_list(nhge->nh_parent, new, extack);
2391 err = fib6_check_nh_list(nhge->nh_parent, new, extack);
2397 err = replace_nexthop_grp(net, old, new, cfg, extack);
2399 err = replace_nexthop_single(net, old, new, extack);
2402 nh_rt_cache_flush(net, old, new);
2404 __remove_nexthop(net, new, NULL);
2411 /* called with rtnl_lock held */
2412 static int insert_nexthop(struct net *net, struct nexthop *new_nh,
2413 struct nh_config *cfg, struct netlink_ext_ack *extack)
2415 struct rb_node **pp, *parent = NULL, *next;
2416 struct rb_root *root = &net->nexthop.rb_root;
2417 bool replace = !!(cfg->nlflags & NLM_F_REPLACE);
2418 bool create = !!(cfg->nlflags & NLM_F_CREATE);
2419 u32 new_id = new_nh->id;
2420 int replace_notify = 0;
2423 pp = &root->rb_node;
2433 nh = rb_entry(parent, struct nexthop, rb_node);
2434 if (new_id < nh->id) {
2435 pp = &next->rb_left;
2436 } else if (new_id > nh->id) {
2437 pp = &next->rb_right;
2438 } else if (replace) {
2439 rc = replace_nexthop(net, nh, new_nh, cfg, extack);
2441 new_nh = nh; /* send notification with old nh */
2446 /* id already exists and not a replace */
2451 if (replace && !create) {
2452 NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists");
2457 if (new_nh->is_group) {
2458 struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp);
2459 struct nh_res_table *res_table;
2461 if (nhg->resilient) {
2462 res_table = rtnl_dereference(nhg->res_table);
2464 /* Not passing the number of buckets is OK when
2465 * replacing, but not when creating a new group.
2467 if (!cfg->nh_grp_res_has_num_buckets) {
2468 NL_SET_ERR_MSG(extack, "Number of buckets not specified for nexthop group insertion");
2473 nh_res_group_rebalance(nhg, res_table);
2475 /* Do not send bucket notifications, we do full
2476 * notification below.
2478 nh_res_table_upkeep(res_table, false, false);
2482 rb_link_node_rcu(&new_nh->rb_node, parent, pp);
2483 rb_insert_color(&new_nh->rb_node, root);
2485 /* The initial insertion is a full notification for hash-threshold as
2486 * well as resilient groups.
2488 rc = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new_nh, extack);
2490 rb_erase(&new_nh->rb_node, &net->nexthop.rb_root);
2494 nh_base_seq_inc(net);
2495 nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo);
2496 if (replace_notify &&
2497 READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode))
2498 nexthop_replace_notify(net, new_nh, &cfg->nlinfo);
2505 /* remove all nexthops tied to a device being deleted */
2506 static void nexthop_flush_dev(struct net_device *dev, unsigned long event)
2508 unsigned int hash = nh_dev_hashfn(dev->ifindex);
2509 struct net *net = dev_net(dev);
2510 struct hlist_head *head = &net->nexthop.devhash[hash];
2511 struct hlist_node *n;
2512 struct nh_info *nhi;
2514 hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
2515 if (nhi->fib_nhc.nhc_dev != dev)
2518 if (nhi->reject_nh &&
2519 (event == NETDEV_DOWN || event == NETDEV_CHANGE))
2522 remove_nexthop(net, nhi->nh_parent, NULL);
2526 /* rtnl; called when net namespace is deleted */
2527 static void flush_all_nexthops(struct net *net)
2529 struct rb_root *root = &net->nexthop.rb_root;
2530 struct rb_node *node;
2533 while ((node = rb_first(root))) {
2534 nh = rb_entry(node, struct nexthop, rb_node);
2535 remove_nexthop(net, nh, NULL);
2540 static struct nexthop *nexthop_create_group(struct net *net,
2541 struct nh_config *cfg)
2543 struct nlattr *grps_attr = cfg->nh_grp;
2544 struct nexthop_grp *entry = nla_data(grps_attr);
2545 u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
2546 struct nh_group *nhg;
2551 if (WARN_ON(!num_nh))
2552 return ERR_PTR(-EINVAL);
2554 nh = nexthop_alloc();
2556 return ERR_PTR(-ENOMEM);
2560 nhg = nexthop_grp_alloc(num_nh);
2563 return ERR_PTR(-ENOMEM);
2566 /* spare group used for removals */
2567 nhg->spare = nexthop_grp_alloc(num_nh);
2571 return ERR_PTR(-ENOMEM);
2573 nhg->spare->spare = nhg;
2575 for (i = 0; i < nhg->num_nh; ++i) {
2576 struct nexthop *nhe;
2577 struct nh_info *nhi;
2579 nhe = nexthop_find_by_id(net, entry[i].id);
2580 if (!nexthop_get(nhe)) {
2585 nhi = rtnl_dereference(nhe->nh_info);
2586 if (nhi->family == AF_INET)
2589 nhg->nh_entries[i].stats =
2590 netdev_alloc_pcpu_stats(struct nh_grp_entry_stats);
2591 if (!nhg->nh_entries[i].stats) {
2596 nhg->nh_entries[i].nh = nhe;
2597 nhg->nh_entries[i].weight = entry[i].weight + 1;
2598 list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
2599 nhg->nh_entries[i].nh_parent = nh;
2602 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) {
2603 nhg->hash_threshold = 1;
2604 nhg->is_multipath = true;
2605 } else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) {
2606 struct nh_res_table *res_table;
2608 res_table = nexthop_res_table_alloc(net, cfg->nh_id, cfg);
2614 rcu_assign_pointer(nhg->spare->res_table, res_table);
2615 rcu_assign_pointer(nhg->res_table, res_table);
2616 nhg->resilient = true;
2617 nhg->is_multipath = true;
2620 WARN_ON_ONCE(nhg->hash_threshold + nhg->resilient != 1);
2622 if (nhg->hash_threshold)
2623 nh_hthr_group_rebalance(nhg);
2628 if (cfg->nh_hw_stats)
2629 nhg->hw_stats = true;
2631 rcu_assign_pointer(nh->nh_grp, nhg);
2636 for (i--; i >= 0; --i) {
2637 list_del(&nhg->nh_entries[i].nh_list);
2638 free_percpu(nhg->nh_entries[i].stats);
2639 nexthop_put(nhg->nh_entries[i].nh);
2646 return ERR_PTR(err);
2649 static int nh_create_ipv4(struct net *net, struct nexthop *nh,
2650 struct nh_info *nhi, struct nh_config *cfg,
2651 struct netlink_ext_ack *extack)
2653 struct fib_nh *fib_nh = &nhi->fib_nh;
2654 struct fib_config fib_cfg = {
2655 .fc_oif = cfg->nh_ifindex,
2656 .fc_gw4 = cfg->gw.ipv4,
2657 .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
2658 .fc_flags = cfg->nh_flags,
2659 .fc_nlinfo = cfg->nlinfo,
2660 .fc_encap = cfg->nh_encap,
2661 .fc_encap_type = cfg->nh_encap_type,
2663 u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN);
2666 err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack);
2668 fib_nh_release(net, fib_nh);
2675 /* sets nh_dev if successful */
2676 err = fib_check_nh(net, fib_nh, tb_id, 0, extack);
2678 nh->nh_flags = fib_nh->fib_nh_flags;
2679 fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
2680 !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1);
2682 fib_nh_release(net, fib_nh);
2688 static int nh_create_ipv6(struct net *net, struct nexthop *nh,
2689 struct nh_info *nhi, struct nh_config *cfg,
2690 struct netlink_ext_ack *extack)
2692 struct fib6_nh *fib6_nh = &nhi->fib6_nh;
2693 struct fib6_config fib6_cfg = {
2694 .fc_table = l3mdev_fib_table(cfg->dev),
2695 .fc_ifindex = cfg->nh_ifindex,
2696 .fc_gateway = cfg->gw.ipv6,
2697 .fc_flags = cfg->nh_flags,
2698 .fc_nlinfo = cfg->nlinfo,
2699 .fc_encap = cfg->nh_encap,
2700 .fc_encap_type = cfg->nh_encap_type,
2701 .fc_is_fdb = cfg->nh_fdb,
2705 if (!ipv6_addr_any(&cfg->gw.ipv6))
2706 fib6_cfg.fc_flags |= RTF_GATEWAY;
2708 /* sets nh_dev if successful */
2709 err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
2712 /* IPv6 is not enabled, don't call fib6_nh_release */
2713 if (err == -EAFNOSUPPORT)
2715 ipv6_stub->fib6_nh_release(fib6_nh);
2717 nh->nh_flags = fib6_nh->fib_nh_flags;
2723 static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg,
2724 struct netlink_ext_ack *extack)
2726 struct nh_info *nhi;
2730 nh = nexthop_alloc();
2732 return ERR_PTR(-ENOMEM);
2734 nhi = kzalloc(sizeof(*nhi), GFP_KERNEL);
2737 return ERR_PTR(-ENOMEM);
2740 nh->nh_flags = cfg->nh_flags;
2743 nhi->nh_parent = nh;
2744 nhi->family = cfg->nh_family;
2745 nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK;
2750 if (cfg->nh_blackhole) {
2752 cfg->nh_ifindex = net->loopback_dev->ifindex;
2755 switch (cfg->nh_family) {
2757 err = nh_create_ipv4(net, nh, nhi, cfg, extack);
2760 err = nh_create_ipv6(net, nh, nhi, cfg, extack);
2767 return ERR_PTR(err);
2770 /* add the entry to the device based hash */
2772 nexthop_devhash_add(net, nhi);
2774 rcu_assign_pointer(nh->nh_info, nhi);
2779 /* called with rtnl lock held */
2780 static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg,
2781 struct netlink_ext_ack *extack)
2786 if (cfg->nlflags & NLM_F_REPLACE && !cfg->nh_id) {
2787 NL_SET_ERR_MSG(extack, "Replace requires nexthop id");
2788 return ERR_PTR(-EINVAL);
2792 cfg->nh_id = nh_find_unused_id(net);
2794 NL_SET_ERR_MSG(extack, "No unused id");
2795 return ERR_PTR(-EINVAL);
2800 nh = nexthop_create_group(net, cfg);
2802 nh = nexthop_create(net, cfg, extack);
2807 refcount_set(&nh->refcnt, 1);
2808 nh->id = cfg->nh_id;
2809 nh->protocol = cfg->nh_protocol;
2812 err = insert_nexthop(net, nh, cfg, extack);
2814 __remove_nexthop(net, nh, NULL);
2822 static int rtm_nh_get_timer(struct nlattr *attr, unsigned long fallback,
2823 unsigned long *timer_p, bool *has_p,
2824 struct netlink_ext_ack *extack)
2826 unsigned long timer;
2830 *timer_p = fallback;
2835 value = nla_get_u32(attr);
2836 timer = clock_t_to_jiffies(value);
2837 if (timer == ~0UL) {
2838 NL_SET_ERR_MSG(extack, "Timer value too large");
2847 static int rtm_to_nh_config_grp_res(struct nlattr *res, struct nh_config *cfg,
2848 struct netlink_ext_ack *extack)
2850 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_policy_new)] = {};
2854 err = nla_parse_nested(tb,
2855 ARRAY_SIZE(rtm_nh_res_policy_new) - 1,
2856 res, rtm_nh_res_policy_new, extack);
2861 if (tb[NHA_RES_GROUP_BUCKETS]) {
2862 cfg->nh_grp_res_num_buckets =
2863 nla_get_u16(tb[NHA_RES_GROUP_BUCKETS]);
2864 cfg->nh_grp_res_has_num_buckets = true;
2865 if (!cfg->nh_grp_res_num_buckets) {
2866 NL_SET_ERR_MSG(extack, "Number of buckets needs to be non-0");
2871 err = rtm_nh_get_timer(tb[NHA_RES_GROUP_IDLE_TIMER],
2872 NH_RES_DEFAULT_IDLE_TIMER,
2873 &cfg->nh_grp_res_idle_timer,
2874 &cfg->nh_grp_res_has_idle_timer,
2879 return rtm_nh_get_timer(tb[NHA_RES_GROUP_UNBALANCED_TIMER],
2880 NH_RES_DEFAULT_UNBALANCED_TIMER,
2881 &cfg->nh_grp_res_unbalanced_timer,
2882 &cfg->nh_grp_res_has_unbalanced_timer,
2886 static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
2887 struct nlmsghdr *nlh, struct nh_config *cfg,
2888 struct netlink_ext_ack *extack)
2890 struct nhmsg *nhm = nlmsg_data(nlh);
2891 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)];
2894 err = nlmsg_parse(nlh, sizeof(*nhm), tb,
2895 ARRAY_SIZE(rtm_nh_policy_new) - 1,
2896 rtm_nh_policy_new, extack);
2901 if (nhm->resvd || nhm->nh_scope) {
2902 NL_SET_ERR_MSG(extack, "Invalid values in ancillary header");
2905 if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) {
2906 NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header");
2910 switch (nhm->nh_family) {
2919 NL_SET_ERR_MSG(extack, "Invalid address family");
2923 memset(cfg, 0, sizeof(*cfg));
2924 cfg->nlflags = nlh->nlmsg_flags;
2925 cfg->nlinfo.portid = NETLINK_CB(skb).portid;
2926 cfg->nlinfo.nlh = nlh;
2927 cfg->nlinfo.nl_net = net;
2929 cfg->nh_family = nhm->nh_family;
2930 cfg->nh_protocol = nhm->nh_protocol;
2931 cfg->nh_flags = nhm->nh_flags;
2934 cfg->nh_id = nla_get_u32(tb[NHA_ID]);
2937 if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] ||
2938 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE]) {
2939 NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole");
2942 if (nhm->nh_flags) {
2943 NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header");
2946 cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]);
2949 if (tb[NHA_GROUP]) {
2950 if (nhm->nh_family != AF_UNSPEC) {
2951 NL_SET_ERR_MSG(extack, "Invalid family for group");
2954 cfg->nh_grp = tb[NHA_GROUP];
2956 cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH;
2957 if (tb[NHA_GROUP_TYPE])
2958 cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]);
2960 if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) {
2961 NL_SET_ERR_MSG(extack, "Invalid group type");
2964 err = nh_check_attr_group(net, tb, ARRAY_SIZE(tb),
2965 cfg->nh_grp_type, extack);
2969 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES)
2970 err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP],
2973 if (tb[NHA_HW_STATS_ENABLE])
2974 cfg->nh_hw_stats = nla_get_u32(tb[NHA_HW_STATS_ENABLE]);
2976 /* no other attributes should be set */
2980 if (tb[NHA_BLACKHOLE]) {
2981 if (tb[NHA_GATEWAY] || tb[NHA_OIF] ||
2982 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) {
2983 NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb");
2987 cfg->nh_blackhole = 1;
2992 if (!cfg->nh_fdb && !tb[NHA_OIF]) {
2993 NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops");
2997 if (!cfg->nh_fdb && tb[NHA_OIF]) {
2998 cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]);
2999 if (cfg->nh_ifindex)
3000 cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex);
3003 NL_SET_ERR_MSG(extack, "Invalid device index");
3005 } else if (!(cfg->dev->flags & IFF_UP)) {
3006 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3009 } else if (!netif_carrier_ok(cfg->dev)) {
3010 NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down");
3017 if (tb[NHA_GATEWAY]) {
3018 struct nlattr *gwa = tb[NHA_GATEWAY];
3020 switch (cfg->nh_family) {
3022 if (nla_len(gwa) != sizeof(u32)) {
3023 NL_SET_ERR_MSG(extack, "Invalid gateway");
3026 cfg->gw.ipv4 = nla_get_be32(gwa);
3029 if (nla_len(gwa) != sizeof(struct in6_addr)) {
3030 NL_SET_ERR_MSG(extack, "Invalid gateway");
3033 cfg->gw.ipv6 = nla_get_in6_addr(gwa);
3036 NL_SET_ERR_MSG(extack,
3037 "Unknown address family for gateway");
3041 /* device only nexthop (no gateway) */
3042 if (cfg->nh_flags & RTNH_F_ONLINK) {
3043 NL_SET_ERR_MSG(extack,
3044 "ONLINK flag can not be set for nexthop without a gateway");
3049 if (tb[NHA_ENCAP]) {
3050 cfg->nh_encap = tb[NHA_ENCAP];
3052 if (!tb[NHA_ENCAP_TYPE]) {
3053 NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing");
3057 cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]);
3058 err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack);
3062 } else if (tb[NHA_ENCAP_TYPE]) {
3063 NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing");
3067 if (tb[NHA_HW_STATS_ENABLE]) {
3068 NL_SET_ERR_MSG(extack, "Cannot enable nexthop hardware statistics for non-group nexthops");
3078 static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3079 struct netlink_ext_ack *extack)
3081 struct net *net = sock_net(skb->sk);
3082 struct nh_config cfg;
3086 err = rtm_to_nh_config(net, skb, nlh, &cfg, extack);
3088 nh = nexthop_add(net, &cfg, extack);
3096 static int nh_valid_get_del_req(const struct nlmsghdr *nlh,
3097 struct nlattr **tb, u32 *id, u32 *op_flags,
3098 struct netlink_ext_ack *extack)
3100 struct nhmsg *nhm = nlmsg_data(nlh);
3102 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3103 NL_SET_ERR_MSG(extack, "Invalid values in header");
3108 NL_SET_ERR_MSG(extack, "Nexthop id is missing");
3112 *id = nla_get_u32(tb[NHA_ID]);
3114 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3118 if (tb[NHA_OP_FLAGS])
3119 *op_flags = nla_get_u32(tb[NHA_OP_FLAGS]);
3127 static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3128 struct netlink_ext_ack *extack)
3130 struct net *net = sock_net(skb->sk);
3131 struct nlattr *tb[NHA_MAX + 1];
3132 struct nl_info nlinfo = {
3135 .portid = NETLINK_CB(skb).portid,
3142 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, NHA_MAX,
3143 rtm_nh_policy_del, extack);
3147 err = nh_valid_get_del_req(nlh, tb, &id, &op_flags, extack);
3151 nh = nexthop_find_by_id(net, id);
3155 remove_nexthop(net, nh, &nlinfo);
3161 static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3162 struct netlink_ext_ack *extack)
3164 struct net *net = sock_net(in_skb->sk);
3165 struct nlattr *tb[NHA_MAX + 1];
3166 struct sk_buff *skb = NULL;
3172 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, NHA_MAX,
3173 rtm_nh_policy_get, extack);
3177 err = nh_valid_get_del_req(nlh, tb, &id, &op_flags, extack);
3182 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3187 nh = nexthop_find_by_id(net, id);
3191 err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
3192 nlh->nlmsg_seq, 0, op_flags);
3194 WARN_ON(err == -EMSGSIZE);
3198 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3206 struct nh_dump_filter {
3212 u32 res_bucket_nh_id;
3216 static bool nh_dump_filtered(struct nexthop *nh,
3217 struct nh_dump_filter *filter, u8 family)
3219 const struct net_device *dev;
3220 const struct nh_info *nhi;
3222 if (filter->group_filter && !nh->is_group)
3225 if (!filter->dev_idx && !filter->master_idx && !family)
3231 nhi = rtnl_dereference(nh->nh_info);
3232 if (family && nhi->family != family)
3235 dev = nhi->fib_nhc.nhc_dev;
3236 if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx))
3239 if (filter->master_idx) {
3240 struct net_device *master;
3245 master = netdev_master_upper_dev_get((struct net_device *)dev);
3246 if (!master || master->ifindex != filter->master_idx)
3253 static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb,
3254 struct nh_dump_filter *filter,
3255 struct netlink_ext_ack *extack)
3261 idx = nla_get_u32(tb[NHA_OIF]);
3262 if (idx > INT_MAX) {
3263 NL_SET_ERR_MSG(extack, "Invalid device index");
3266 filter->dev_idx = idx;
3268 if (tb[NHA_MASTER]) {
3269 idx = nla_get_u32(tb[NHA_MASTER]);
3270 if (idx > INT_MAX) {
3271 NL_SET_ERR_MSG(extack, "Invalid master device index");
3274 filter->master_idx = idx;
3276 filter->group_filter = nla_get_flag(tb[NHA_GROUPS]);
3277 filter->fdb_filter = nla_get_flag(tb[NHA_FDB]);
3279 nhm = nlmsg_data(nlh);
3280 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3281 NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request");
3285 if (tb[NHA_OP_FLAGS])
3286 filter->op_flags = nla_get_u32(tb[NHA_OP_FLAGS]);
3288 filter->op_flags = 0;
3293 static int nh_valid_dump_req(const struct nlmsghdr *nlh,
3294 struct nh_dump_filter *filter,
3295 struct netlink_callback *cb)
3297 struct nlattr *tb[NHA_MAX + 1];
3300 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, NHA_MAX,
3301 rtm_nh_policy_dump, cb->extack);
3305 return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3308 struct rtm_dump_nh_ctx {
3312 static struct rtm_dump_nh_ctx *
3313 rtm_dump_nh_ctx(struct netlink_callback *cb)
3315 struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx;
3317 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3321 static int rtm_dump_walk_nexthops(struct sk_buff *skb,
3322 struct netlink_callback *cb,
3323 struct rb_root *root,
3324 struct rtm_dump_nh_ctx *ctx,
3325 int (*nh_cb)(struct sk_buff *skb,
3326 struct netlink_callback *cb,
3327 struct nexthop *nh, void *data),
3330 struct rb_node *node;
3335 for (node = rb_first(root); node; node = rb_next(node)) {
3338 nh = rb_entry(node, struct nexthop, rb_node);
3343 err = nh_cb(skb, cb, nh, data);
3351 static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
3352 struct nexthop *nh, void *data)
3354 struct nhmsg *nhm = nlmsg_data(cb->nlh);
3355 struct nh_dump_filter *filter = data;
3357 if (nh_dump_filtered(nh, filter, nhm->nh_family))
3360 return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
3361 NETLINK_CB(cb->skb).portid,
3362 cb->nlh->nlmsg_seq, NLM_F_MULTI, filter->op_flags);
3366 static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
3368 struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb);
3369 struct net *net = sock_net(skb->sk);
3370 struct rb_root *root = &net->nexthop.rb_root;
3371 struct nh_dump_filter filter = {};
3374 err = nh_valid_dump_req(cb->nlh, &filter, cb);
3378 err = rtm_dump_walk_nexthops(skb, cb, root, ctx,
3379 &rtm_dump_nexthop_cb, &filter);
3381 if (likely(skb->len))
3385 cb->seq = net->nexthop.seq;
3386 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3390 static struct nexthop *
3391 nexthop_find_group_resilient(struct net *net, u32 id,
3392 struct netlink_ext_ack *extack)
3394 struct nh_group *nhg;
3397 nh = nexthop_find_by_id(net, id);
3399 return ERR_PTR(-ENOENT);
3401 if (!nh->is_group) {
3402 NL_SET_ERR_MSG(extack, "Not a nexthop group");
3403 return ERR_PTR(-EINVAL);
3406 nhg = rtnl_dereference(nh->nh_grp);
3407 if (!nhg->resilient) {
3408 NL_SET_ERR_MSG(extack, "Nexthop group not of type resilient");
3409 return ERR_PTR(-EINVAL);
3415 static int nh_valid_dump_nhid(struct nlattr *attr, u32 *nh_id_p,
3416 struct netlink_ext_ack *extack)
3421 idx = nla_get_u32(attr);
3423 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3434 static int nh_valid_dump_bucket_req(const struct nlmsghdr *nlh,
3435 struct nh_dump_filter *filter,
3436 struct netlink_callback *cb)
3438 struct nlattr *res_tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_dump)];
3439 struct nlattr *tb[NHA_MAX + 1];
3442 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, NHA_MAX,
3443 rtm_nh_policy_dump_bucket, NULL);
3447 err = nh_valid_dump_nhid(tb[NHA_ID], &filter->nh_id, cb->extack);
3451 if (tb[NHA_RES_BUCKET]) {
3452 size_t max = ARRAY_SIZE(rtm_nh_res_bucket_policy_dump) - 1;
3454 err = nla_parse_nested(res_tb, max,
3456 rtm_nh_res_bucket_policy_dump,
3461 err = nh_valid_dump_nhid(res_tb[NHA_RES_BUCKET_NH_ID],
3462 &filter->res_bucket_nh_id,
3468 return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3471 struct rtm_dump_res_bucket_ctx {
3472 struct rtm_dump_nh_ctx nh;
3476 static struct rtm_dump_res_bucket_ctx *
3477 rtm_dump_res_bucket_ctx(struct netlink_callback *cb)
3479 struct rtm_dump_res_bucket_ctx *ctx = (void *)cb->ctx;
3481 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3485 struct rtm_dump_nexthop_bucket_data {
3486 struct rtm_dump_res_bucket_ctx *ctx;
3487 struct nh_dump_filter filter;
3490 static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb,
3491 struct netlink_callback *cb,
3493 struct rtm_dump_nexthop_bucket_data *dd)
3495 u32 portid = NETLINK_CB(cb->skb).portid;
3496 struct nhmsg *nhm = nlmsg_data(cb->nlh);
3497 struct nh_res_table *res_table;
3498 struct nh_group *nhg;
3502 nhg = rtnl_dereference(nh->nh_grp);
3503 res_table = rtnl_dereference(nhg->res_table);
3504 for (bucket_index = dd->ctx->bucket_index;
3505 bucket_index < res_table->num_nh_buckets;
3507 struct nh_res_bucket *bucket;
3508 struct nh_grp_entry *nhge;
3510 bucket = &res_table->nh_buckets[bucket_index];
3511 nhge = rtnl_dereference(bucket->nh_entry);
3512 if (nh_dump_filtered(nhge->nh, &dd->filter, nhm->nh_family))
3515 if (dd->filter.res_bucket_nh_id &&
3516 dd->filter.res_bucket_nh_id != nhge->nh->id)
3519 dd->ctx->bucket_index = bucket_index;
3520 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
3521 RTM_NEWNEXTHOPBUCKET, portid,
3522 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3528 dd->ctx->bucket_index = 0;
3533 static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb,
3534 struct netlink_callback *cb,
3535 struct nexthop *nh, void *data)
3537 struct rtm_dump_nexthop_bucket_data *dd = data;
3538 struct nh_group *nhg;
3543 nhg = rtnl_dereference(nh->nh_grp);
3544 if (!nhg->resilient)
3547 return rtm_dump_nexthop_bucket_nh(skb, cb, nh, dd);
3551 static int rtm_dump_nexthop_bucket(struct sk_buff *skb,
3552 struct netlink_callback *cb)
3554 struct rtm_dump_res_bucket_ctx *ctx = rtm_dump_res_bucket_ctx(cb);
3555 struct rtm_dump_nexthop_bucket_data dd = { .ctx = ctx };
3556 struct net *net = sock_net(skb->sk);
3560 err = nh_valid_dump_bucket_req(cb->nlh, &dd.filter, cb);
3564 if (dd.filter.nh_id) {
3565 nh = nexthop_find_group_resilient(net, dd.filter.nh_id,
3569 err = rtm_dump_nexthop_bucket_nh(skb, cb, nh, &dd);
3571 struct rb_root *root = &net->nexthop.rb_root;
3573 err = rtm_dump_walk_nexthops(skb, cb, root, &ctx->nh,
3574 &rtm_dump_nexthop_bucket_cb, &dd);
3578 if (likely(skb->len))
3582 cb->seq = net->nexthop.seq;
3583 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3587 static int nh_valid_get_bucket_req_res_bucket(struct nlattr *res,
3589 struct netlink_ext_ack *extack)
3591 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_get)];
3594 err = nla_parse_nested(tb, ARRAY_SIZE(rtm_nh_res_bucket_policy_get) - 1,
3595 res, rtm_nh_res_bucket_policy_get, extack);
3599 if (!tb[NHA_RES_BUCKET_INDEX]) {
3600 NL_SET_ERR_MSG(extack, "Bucket index is missing");
3604 *bucket_index = nla_get_u16(tb[NHA_RES_BUCKET_INDEX]);
3608 static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh,
3609 u32 *id, u16 *bucket_index,
3610 struct netlink_ext_ack *extack)
3612 struct nlattr *tb[NHA_MAX + 1];
3616 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, NHA_MAX,
3617 rtm_nh_policy_get_bucket, extack);
3621 err = nh_valid_get_del_req(nlh, tb, id, &op_flags, extack);
3625 if (!tb[NHA_RES_BUCKET]) {
3626 NL_SET_ERR_MSG(extack, "Bucket information is missing");
3630 err = nh_valid_get_bucket_req_res_bucket(tb[NHA_RES_BUCKET],
3631 bucket_index, extack);
3639 static int rtm_get_nexthop_bucket(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3640 struct netlink_ext_ack *extack)
3642 struct net *net = sock_net(in_skb->sk);
3643 struct nh_res_table *res_table;
3644 struct sk_buff *skb = NULL;
3645 struct nh_group *nhg;
3651 err = nh_valid_get_bucket_req(nlh, &id, &bucket_index, extack);
3655 nh = nexthop_find_group_resilient(net, id, extack);
3659 nhg = rtnl_dereference(nh->nh_grp);
3660 res_table = rtnl_dereference(nhg->res_table);
3661 if (bucket_index >= res_table->num_nh_buckets) {
3662 NL_SET_ERR_MSG(extack, "Bucket index out of bounds");
3666 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3670 err = nh_fill_res_bucket(skb, nh, &res_table->nh_buckets[bucket_index],
3671 bucket_index, RTM_NEWNEXTHOPBUCKET,
3672 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
3675 WARN_ON(err == -EMSGSIZE);
3679 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3686 static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu)
3688 unsigned int hash = nh_dev_hashfn(dev->ifindex);
3689 struct net *net = dev_net(dev);
3690 struct hlist_head *head = &net->nexthop.devhash[hash];
3691 struct hlist_node *n;
3692 struct nh_info *nhi;
3694 hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
3695 if (nhi->fib_nhc.nhc_dev == dev) {
3696 if (nhi->family == AF_INET)
3697 fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu,
3704 static int nh_netdev_event(struct notifier_block *this,
3705 unsigned long event, void *ptr)
3707 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3708 struct netdev_notifier_info_ext *info_ext;
3712 case NETDEV_UNREGISTER:
3713 nexthop_flush_dev(dev, event);
3716 if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
3717 nexthop_flush_dev(dev, event);
3719 case NETDEV_CHANGEMTU:
3721 nexthop_sync_mtu(dev, info_ext->ext.mtu);
3722 rt_cache_flush(dev_net(dev));
3728 static struct notifier_block nh_netdev_notifier = {
3729 .notifier_call = nh_netdev_event,
3732 static int nexthops_dump(struct net *net, struct notifier_block *nb,
3733 enum nexthop_event_type event_type,
3734 struct netlink_ext_ack *extack)
3736 struct rb_root *root = &net->nexthop.rb_root;
3737 struct rb_node *node;
3740 for (node = rb_first(root); node; node = rb_next(node)) {
3743 nh = rb_entry(node, struct nexthop, rb_node);
3744 err = call_nexthop_notifier(nb, net, event_type, nh, extack);
3752 int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
3753 struct netlink_ext_ack *extack)
3758 err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack);
3761 err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
3767 EXPORT_SYMBOL(register_nexthop_notifier);
3769 int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3773 err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
3776 nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
3779 EXPORT_SYMBOL(__unregister_nexthop_notifier);
3781 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3786 err = __unregister_nexthop_notifier(net, nb);
3790 EXPORT_SYMBOL(unregister_nexthop_notifier);
3792 void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap)
3794 struct nexthop *nexthop;
3798 nexthop = nexthop_find_by_id(net, id);
3802 nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3804 nexthop->nh_flags |= RTNH_F_OFFLOAD;
3806 nexthop->nh_flags |= RTNH_F_TRAP;
3811 EXPORT_SYMBOL(nexthop_set_hw_flags);
3813 void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
3814 bool offload, bool trap)
3816 struct nh_res_table *res_table;
3817 struct nh_res_bucket *bucket;
3818 struct nexthop *nexthop;
3819 struct nh_group *nhg;
3823 nexthop = nexthop_find_by_id(net, id);
3824 if (!nexthop || !nexthop->is_group)
3827 nhg = rcu_dereference(nexthop->nh_grp);
3828 if (!nhg->resilient)
3831 if (bucket_index >= nhg->res_table->num_nh_buckets)
3834 res_table = rcu_dereference(nhg->res_table);
3835 bucket = &res_table->nh_buckets[bucket_index];
3836 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3838 bucket->nh_flags |= RTNH_F_OFFLOAD;
3840 bucket->nh_flags |= RTNH_F_TRAP;
3845 EXPORT_SYMBOL(nexthop_bucket_set_hw_flags);
3847 void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
3848 unsigned long *activity)
3850 struct nh_res_table *res_table;
3851 struct nexthop *nexthop;
3852 struct nh_group *nhg;
3857 nexthop = nexthop_find_by_id(net, id);
3858 if (!nexthop || !nexthop->is_group)
3861 nhg = rcu_dereference(nexthop->nh_grp);
3862 if (!nhg->resilient)
3865 /* Instead of silently ignoring some buckets, demand that the sizes
3868 res_table = rcu_dereference(nhg->res_table);
3869 if (num_buckets != res_table->num_nh_buckets)
3872 for (i = 0; i < num_buckets; i++) {
3873 if (test_bit(i, activity))
3874 nh_res_bucket_set_busy(&res_table->nh_buckets[i]);
3880 EXPORT_SYMBOL(nexthop_res_grp_activity_update);
3882 static void __net_exit nexthop_net_exit_batch_rtnl(struct list_head *net_list,
3883 struct list_head *dev_to_kill)
3888 list_for_each_entry(net, net_list, exit_list)
3889 flush_all_nexthops(net);
3892 static void __net_exit nexthop_net_exit(struct net *net)
3894 kfree(net->nexthop.devhash);
3895 net->nexthop.devhash = NULL;
3898 static int __net_init nexthop_net_init(struct net *net)
3900 size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE;
3902 net->nexthop.rb_root = RB_ROOT;
3903 net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
3904 if (!net->nexthop.devhash)
3906 BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
3911 static struct pernet_operations nexthop_net_ops = {
3912 .init = nexthop_net_init,
3913 .exit = nexthop_net_exit,
3914 .exit_batch_rtnl = nexthop_net_exit_batch_rtnl,
3917 static int __init nexthop_init(void)
3919 register_pernet_subsys(&nexthop_net_ops);
3921 register_netdevice_notifier(&nh_netdev_notifier);
3923 rtnl_register(PF_UNSPEC, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
3924 rtnl_register(PF_UNSPEC, RTM_DELNEXTHOP, rtm_del_nexthop, NULL, 0);
3925 rtnl_register(PF_UNSPEC, RTM_GETNEXTHOP, rtm_get_nexthop,
3926 rtm_dump_nexthop, 0);
3928 rtnl_register(PF_INET, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
3929 rtnl_register(PF_INET, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
3931 rtnl_register(PF_INET6, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
3932 rtnl_register(PF_INET6, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
3934 rtnl_register(PF_UNSPEC, RTM_GETNEXTHOPBUCKET, rtm_get_nexthop_bucket,
3935 rtm_dump_nexthop_bucket, 0);
3939 subsys_initcall(nexthop_init);