zebra: handle backup nexthops in nhe/nhgs

Include backup nexthops in nhe processing; connect incoming
zapi route data with updated rib/nhg apis; add more debugs in
nhg processing.

Signed-off-by: Mark Stapp <mjs@voltanet.io>
This commit is contained in:
Mark Stapp 2020-03-10 10:50:40 -04:00
parent 6d81b590a9
commit 377e29f7e7
4 changed files with 459 additions and 129 deletions

View File

@ -1783,13 +1783,24 @@ static void zread_route_add(ZAPI_HANDLER_ARGS)
return;
}
/* Include backup info with the route */
memset(&nhe, 0, sizeof(nhe));
/* Include backup info with the route. We use a temporary nhe here;
* if this is a new/unknown nhe, a new copy will be allocated
* and stored.
*/
zebra_nhe_init(&nhe, afi, ng->nexthop);
nhe.nhg.nexthop = ng->nexthop;
nhe.backup_info = bnhg;
ret = rib_add_multipath_nhe(afi, api.safi, &api.prefix, src_p,
re, &nhe);
/* At this point, these allocations are not needed: 're' has been
* retained or freed, and if 're' still exists, it is using
* a reference to a shared group object.
*/
nexthop_group_delete(&ng);
if (bnhg)
zebra_nhg_backup_free(&bnhg);
/* Stats */
switch (api.prefix.family) {
case AF_INET:

View File

@ -298,7 +298,7 @@ static void zebra_nhg_set_if(struct nhg_hash_entry *nhe, struct interface *ifp)
static void
zebra_nhg_connect_depends(struct nhg_hash_entry *nhe,
struct nhg_connected_tree_head nhg_depends)
struct nhg_connected_tree_head *nhg_depends)
{
struct nhg_connected *rb_node_dep = NULL;
@ -307,31 +307,58 @@ zebra_nhg_connect_depends(struct nhg_hash_entry *nhe,
* for now. Otherwise, their might be a time trade-off for repeated
* alloc/frees as startup.
*/
nhe->nhg_depends = nhg_depends;
nhe->nhg_depends = *nhg_depends;
/* Attach backpointer to anything that it depends on */
zebra_nhg_dependents_init(nhe);
if (!zebra_nhg_depends_is_empty(nhe)) {
frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) {
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: nhe %p (%u), dep %p (%u)",
__func__, nhe, nhe->id,
rb_node_dep->nhe,
rb_node_dep->nhe->id);
zebra_nhg_dependents_add(rb_node_dep->nhe, nhe);
}
}
}
/* Add the ifp now if its not a group or recursive and has ifindex */
if (zebra_nhg_depends_is_empty(nhe) && nhe->nhg.nexthop
&& nhe->nhg.nexthop->ifindex) {
struct interface *ifp = NULL;
/* Init an nhe, for use in a hash lookup for example */
void zebra_nhe_init(struct nhg_hash_entry *nhe, afi_t afi,
const struct nexthop *nh)
{
memset(nhe, 0, sizeof(struct nhg_hash_entry));
nhe->vrf_id = VRF_DEFAULT;
nhe->type = ZEBRA_ROUTE_NHG;
nhe->afi = AFI_UNSPEC;
ifp = if_lookup_by_index(nhe->nhg.nexthop->ifindex,
nhe->nhg.nexthop->vrf_id);
if (ifp)
zebra_nhg_set_if(nhe, ifp);
else
flog_err(
EC_ZEBRA_IF_LOOKUP_FAILED,
"Zebra failed to lookup an interface with ifindex=%d in vrf=%u for NHE id=%u",
nhe->nhg.nexthop->ifindex,
nhe->nhg.nexthop->vrf_id, nhe->id);
/* There are some special rules that apply to groups representing
* a single nexthop.
*/
if (nh && (nh->next == NULL)) {
switch (nh->type) {
case (NEXTHOP_TYPE_IFINDEX):
case (NEXTHOP_TYPE_BLACKHOLE):
/*
* This switch case handles setting the afi different
* for ipv4/v6 routes. Ifindex/blackhole nexthop
* objects cannot be ambiguous, they must be Address
* Family specific. If we get here, we will either use
* the AF of the route, or the one we got passed from
* here from the kernel.
*/
nhe->afi = afi;
break;
case (NEXTHOP_TYPE_IPV4_IFINDEX):
case (NEXTHOP_TYPE_IPV4):
nhe->afi = AFI_IP;
break;
case (NEXTHOP_TYPE_IPV6_IFINDEX):
case (NEXTHOP_TYPE_IPV6):
nhe->afi = AFI_IP6;
break;
}
}
}
@ -379,7 +406,25 @@ static void *zebra_nhg_hash_alloc(void *arg)
/* Mark duplicate nexthops in a group at creation time. */
nexthop_group_mark_duplicates(&(nhe->nhg));
zebra_nhg_connect_depends(nhe, copy->nhg_depends);
zebra_nhg_connect_depends(nhe, &(copy->nhg_depends));
/* Add the ifp now if it's not a group or recursive and has ifindex */
if (zebra_nhg_depends_is_empty(nhe) && nhe->nhg.nexthop
&& nhe->nhg.nexthop->ifindex) {
struct interface *ifp = NULL;
ifp = if_lookup_by_index(nhe->nhg.nexthop->ifindex,
nhe->nhg.nexthop->vrf_id);
if (ifp)
zebra_nhg_set_if(nhe, ifp);
else
flog_err(
EC_ZEBRA_IF_LOOKUP_FAILED,
"Zebra failed to lookup an interface with ifindex=%d in vrf=%u for NHE id=%u",
nhe->nhg.nexthop->ifindex,
nhe->nhg.nexthop->vrf_id, nhe->id);
}
zebra_nhg_insert_id(nhe);
return nhe;
@ -567,34 +612,181 @@ static void handle_recursive_depend(struct nhg_connected_tree_head *nhg_depends,
resolved_ng.nexthop = nh;
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: head %p, nh %pNHv",
__func__, nhg_depends, nh);
depend = zebra_nhg_rib_find(0, &resolved_ng, afi);
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: nh %pNHv => %p (%u)",
__func__, nh, depend,
depend ? depend->id : 0);
if (depend)
depends_add(nhg_depends, depend);
}
/*
* Lookup an nhe in the global hash, using data from another nhe. If 'lookup'
* has an id value, that's used. Create a new global/shared nhe if not found.
*/
static bool zebra_nhe_find(struct nhg_hash_entry **nhe, /* return value */
struct nhg_hash_entry *lookup,
struct nhg_connected_tree_head *nhg_depends,
afi_t afi)
{
bool created = false;
bool recursive = false;
struct nhg_hash_entry *newnhe;
struct nexthop *nh = NULL;
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: id %u, lookup %p, vrf %d, type %d, depends %p",
__func__, lookup->id, lookup,
lookup->vrf_id, lookup->type,
nhg_depends);
if (lookup->id)
(*nhe) = zebra_nhg_lookup_id(lookup->id);
else
(*nhe) = hash_lookup(zrouter.nhgs, lookup);
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: lookup => %p (%u)",
__func__, (*nhe),
(*nhe) ? (*nhe)->id : 0);
/* If we found an existing object, we're done */
if (*nhe)
goto done;
/* We're going to create/insert a new nhe:
* assign the next global id value if necessary.
*/
if (lookup->id == 0)
lookup->id = ++id_counter;
newnhe = hash_get(zrouter.nhgs, lookup, zebra_nhg_hash_alloc);
created = true;
/* Mail back the new object */
*nhe = newnhe;
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: => created %p (%u)", __func__, newnhe,
newnhe->id);
/* Only hash/lookup the depends if the first lookup
* fails to find something. This should hopefully save a
* lot of cycles for larger ecmp sizes.
*/
if (nhg_depends) {
/* If you don't want to hash on each nexthop in the
* nexthop group struct you can pass the depends
* directly. Kernel-side we do this since it just looks
* them up via IDs.
*/
zebra_nhg_connect_depends(newnhe, nhg_depends);
goto done;
}
/* Prepare dependency relationships if this is not a
* singleton nexthop. There are two cases: a single
* recursive nexthop, where we need a relationship to the
* resolving nexthop; or a group of nexthops, where we need
* relationships with the corresponding singletons.
*/
zebra_nhg_depends_init(lookup);
nh = newnhe->nhg.nexthop;
if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE))
SET_FLAG(newnhe->flags, NEXTHOP_GROUP_VALID);
if (nh->next == NULL) {
if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) {
/* Single recursive nexthop */
handle_recursive_depend(&newnhe->nhg_depends,
nh->resolved, afi);
recursive = true;
}
} else {
/* List of nexthops */
for (nh = newnhe->nhg.nexthop; nh; nh = nh->next) {
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: depends NH %pNHv %s",
__func__, nh,
CHECK_FLAG(nh->flags,
NEXTHOP_FLAG_RECURSIVE) ?
"(R)" : "");
depends_find_add(&newnhe->nhg_depends, nh, afi);
}
}
/* If there are backup nexthops, add them to the
* depends tree also. The rules here are a little different.
*/
if (zebra_nhg_get_backup_nhg(newnhe) == NULL ||
zebra_nhg_get_backup_nhg(newnhe)->nexthop == NULL)
goto backups_done;
nh = zebra_nhg_get_backup_nhg(newnhe)->nexthop;
/* Singleton recursive NH */
if (nh->next == NULL &&
CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) {
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: backup depend NH %pNHv (R)",
__func__, nh);
/* Single recursive nexthop */
handle_recursive_depend(&newnhe->nhg_depends,
nh->resolved, afi);
recursive = true;
} else {
/* One or more backup NHs */
for (; nh; nh = nh->next) {
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: backup depend NH %pNHv %s",
__func__, nh,
CHECK_FLAG(nh->flags,
NEXTHOP_FLAG_RECURSIVE) ?
"(R)" : "");
depends_find_add(&newnhe->nhg_depends,
nh, afi);
}
}
backups_done:
if (recursive)
SET_FLAG((*nhe)->flags, NEXTHOP_GROUP_RECURSIVE);
done:
return created;
}
/*
* Lookup or create an nhe, based on an nhg or an nhe id.
*/
static bool zebra_nhg_find(struct nhg_hash_entry **nhe, uint32_t id,
struct nexthop_group *nhg,
struct nhg_connected_tree_head *nhg_depends,
vrf_id_t vrf_id, afi_t afi, int type)
{
struct nhg_hash_entry lookup = {};
uint32_t old_id_counter = id_counter;
bool created = false;
bool recursive = false;
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: id %u, nhg %p, vrf %d, type %d, depends %p",
__func__, id, nhg, vrf_id, type,
nhg_depends);
/*
* If it has an id at this point, we must have gotten it from the kernel
*/
lookup.id = id ? id : ++id_counter;
/* Use a temporary nhe and call into the superset/common code */
lookup.id = id;
lookup.type = type ? type : ZEBRA_ROUTE_NHG;
lookup.nhg = *nhg;
@ -627,53 +819,8 @@ static bool zebra_nhg_find(struct nhg_hash_entry **nhe, uint32_t id,
}
}
if (id)
(*nhe) = zebra_nhg_lookup_id(id);
else
(*nhe) = hash_lookup(zrouter.nhgs, &lookup);
created = zebra_nhe_find(nhe, &lookup, nhg_depends, afi);
/* If it found an nhe in our tables, this new ID is unused */
if (*nhe)
id_counter = old_id_counter;
if (!(*nhe)) {
/* Only hash/lookup the depends if the first lookup
* fails to find something. This should hopefully save a
* lot of cycles for larger ecmp sizes.
*/
if (nhg_depends)
/* If you don't want to hash on each nexthop in the
* nexthop group struct you can pass the depends
* directly. Kernel-side we do this since it just looks
* them up via IDs.
*/
lookup.nhg_depends = *nhg_depends;
else {
if (nhg->nexthop->next) {
zebra_nhg_depends_init(&lookup);
/* If its a group, create a dependency tree */
struct nexthop *nh = NULL;
for (nh = nhg->nexthop; nh; nh = nh->next)
depends_find_add(&lookup.nhg_depends,
nh, afi);
} else if (CHECK_FLAG(nhg->nexthop->flags,
NEXTHOP_FLAG_RECURSIVE)) {
zebra_nhg_depends_init(&lookup);
handle_recursive_depend(&lookup.nhg_depends,
nhg->nexthop->resolved,
afi);
recursive = true;
}
}
(*nhe) = hash_get(zrouter.nhgs, &lookup, zebra_nhg_hash_alloc);
created = true;
if (recursive)
SET_FLAG((*nhe)->flags, NEXTHOP_GROUP_RECURSIVE);
}
return created;
}
@ -689,6 +836,10 @@ zebra_nhg_find_nexthop(uint32_t id, struct nexthop *nh, afi_t afi, int type)
zebra_nhg_find(&nhe, id, &nhg, NULL, vrf_id, afi, type);
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: nh %pNHv => %p (%u)",
__func__, nh, nhe, nhe ? nhe->id : 0);
return nhe;
}
@ -867,6 +1018,9 @@ done:
static void zebra_nhg_release(struct nhg_hash_entry *nhe)
{
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: nhe %p (%u)", __func__, nhe, nhe->id);
/* Remove it from any lists it may be on */
zebra_nhg_depends_release(nhe);
zebra_nhg_dependents_release(nhe);
@ -932,6 +1086,10 @@ static int nhg_ctx_process_new(struct nhg_ctx *ctx)
lookup = zebra_nhg_lookup_id(id);
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: id %u, count %d, lookup => %p",
__func__, id, count, lookup);
if (lookup) {
/* This is already present in our table, hence an update
* that we did not initate.
@ -979,6 +1137,11 @@ static int nhg_ctx_process_new(struct nhg_ctx *ctx)
*/
kernel_nhe = zebra_nhg_copy(nhe, id);
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: copying kernel nhe (%u), dup of %u",
__func__, id, nhe->id);
zebra_nhg_insert_id(kernel_nhe);
zebra_nhg_set_unhashable(kernel_nhe);
} else if (zebra_nhg_contains_unhashable(nhe)) {
@ -986,10 +1149,18 @@ static int nhg_ctx_process_new(struct nhg_ctx *ctx)
* depend, so lets mark this group as unhashable as well
* and release it from the non-ID hash.
*/
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: nhe %p (%u) unhashable",
__func__, nhe, nhe->id);
hash_release(zrouter.nhgs, nhe);
zebra_nhg_set_unhashable(nhe);
} else {
/* It actually created a new nhe */
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: nhe %p (%u) is new",
__func__, nhe, nhe->id);
SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
}
@ -1098,6 +1269,10 @@ int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, struct nh_grp *grp,
{
struct nhg_ctx *ctx = NULL;
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: nh %pNHv, id %u, count %d",
__func__, nh, id, (int)count);
if (id > id_counter)
/* Increase our counter so we don't try to create
* an ID that already exists
@ -1171,12 +1346,17 @@ static struct nhg_hash_entry *depends_find_singleton(const struct nexthop *nh,
/* The copy may have allocated labels; free them if necessary. */
nexthop_del_labels(&lookup);
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: nh %pNHv => %p (%u)",
__func__, nh, nhe, nhe ? nhe->id : 0);
return nhe;
}
static struct nhg_hash_entry *depends_find(const struct nexthop *nh, afi_t afi)
{
struct nhg_hash_entry *nhe = NULL;
char rbuf[10];
if (!nh)
goto done;
@ -1184,10 +1364,18 @@ static struct nhg_hash_entry *depends_find(const struct nexthop *nh, afi_t afi)
/* We are separating these functions out to increase handling speed
* in the non-recursive case (by not alloc/freeing)
*/
if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE))
if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) {
nhe = depends_find_recursive(nh, afi);
else
strlcpy(rbuf, "(R)", sizeof(rbuf));
} else {
nhe = depends_find_singleton(nh, afi);
rbuf[0] = '\0';
}
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: nh %pNHv %s => %p (%u)",
__func__, nh, rbuf,
nhe, nhe ? nhe->id : 0);
done:
return nhe;
@ -1196,6 +1384,10 @@ done:
static void depends_add(struct nhg_connected_tree_head *head,
struct nhg_hash_entry *depend)
{
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: head %p nh %pNHv",
__func__, head, depend->nhg.nexthop);
/* If NULL is returned, it was successfully added and
* needs to have its refcnt incremented.
*
@ -1243,7 +1435,7 @@ static void depends_decrement_free(struct nhg_connected_tree_head *head)
nhg_connected_tree_free(head);
}
/* Rib-side, you get a nexthop group struct */
/* Find an nhe based on a list of nexthops */
struct nhg_hash_entry *
zebra_nhg_rib_find(uint32_t id, struct nexthop_group *nhg, afi_t rt_afi)
{
@ -1259,6 +1451,36 @@ zebra_nhg_rib_find(uint32_t id, struct nexthop_group *nhg, afi_t rt_afi)
zebra_nhg_find(&nhe, id, nhg, NULL, vrf_id, rt_afi, 0);
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: => nhe %p (%u)",
__func__, nhe, nhe ? nhe->id : 0);
return nhe;
}
/* Find an nhe based on a route's nhe */
struct nhg_hash_entry *
zebra_nhg_rib_find_nhe(struct nhg_hash_entry *rt_nhe, afi_t rt_afi)
{
struct nhg_hash_entry *nhe = NULL;
if (!(rt_nhe && rt_nhe->nhg.nexthop)) {
flog_err(EC_ZEBRA_TABLE_LOOKUP_FAILED,
"No nexthop passed to %s", __func__);
return NULL;
}
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: rt_nhe %p (%u)",
__func__, rt_nhe,
rt_nhe ? rt_nhe->id : 0);
zebra_nhe_find(&nhe, rt_nhe, NULL, rt_afi);
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: => nhe %p (%u)",
__func__, nhe, nhe ? nhe->id : 0);
return nhe;
}
@ -1293,19 +1515,6 @@ void zebra_nhg_backup_free(struct nhg_backup_info **p)
}
}
/* Accessor for backup nexthop info */
struct nhg_hash_entry *zebra_nhg_get_backup_nhe(struct nhg_hash_entry *nhe)
{
struct nhg_hash_entry *p = NULL;
if (nhe) {
if (nhe->backup_info)
p = nhe->backup_info->nhe;
}
return p;
}
/* Accessor for backup nexthop group */
struct nexthop_group *zebra_nhg_get_backup_nhg(struct nhg_hash_entry *nhe)
{
@ -1351,6 +1560,21 @@ static void zebra_nhg_free_members(struct nhg_hash_entry *nhe)
void zebra_nhg_free(struct nhg_hash_entry *nhe)
{
if (IS_ZEBRA_DEBUG_NHG_DETAIL) {
/* Group or singleton? */
if (nhe->nhg.nexthop && nhe->nhg.nexthop->next)
zlog_debug("%s: nhe %p (%u), refcnt %d",
__func__, nhe,
(nhe ? nhe->id : 0),
(nhe ? nhe->refcnt : 0));
else
zlog_debug("%s: nhe %p (%u), refcnt %d, NH %pNHv",
__func__, nhe,
(nhe ? nhe->id : 0),
(nhe ? nhe->refcnt : 0),
nhe->nhg.nexthop);
}
if (nhe->refcnt)
zlog_debug("nhe_id=%u hash refcnt=%d", nhe->id, nhe->refcnt);
@ -1366,6 +1590,11 @@ void zebra_nhg_hash_free(void *p)
void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe)
{
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: nhe %p (%u) %d => %d",
__func__, nhe, nhe->id, nhe->refcnt,
nhe->refcnt - 1);
nhe->refcnt--;
if (!zebra_nhg_depends_is_empty(nhe))
@ -1377,6 +1606,11 @@ void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe)
void zebra_nhg_increment_ref(struct nhg_hash_entry *nhe)
{
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: nhe %p (%u) %d => %d",
__func__, nhe, nhe->id, nhe->refcnt,
nhe->refcnt + 1);
nhe->refcnt++;
if (!zebra_nhg_depends_is_empty(nhe))
@ -1526,6 +1760,10 @@ static int nexthop_active(afi_t afi, struct route_entry *re,
nexthop->resolved = NULL;
re->nexthop_mtu = 0;
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: re %p, nexthop %pNHv",
__func__, re, nexthop);
/*
* If the kernel has sent us a NEW route, then
* by golly gee whiz it's a good route.
@ -1674,6 +1912,12 @@ static int nexthop_active(afi_t afi, struct route_entry *re,
|| nexthop->type == NEXTHOP_TYPE_IPV6)
nexthop->ifindex = newhop->ifindex;
}
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: CONNECT match %p (%u), newhop %pNHv",
__func__, match,
match->nhe->id, newhop);
return 1;
} else if (CHECK_FLAG(re->flags, ZEBRA_FLAG_ALLOW_RECURSION)) {
resolved = 0;
@ -1684,6 +1928,11 @@ static int nexthop_active(afi_t afi, struct route_entry *re,
if (!nexthop_valid_resolve(nexthop, newhop))
continue;
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: RECURSIVE match %p (%u), newhop %pNHv",
__func__, match,
match->nhe->id, newhop);
SET_FLAG(nexthop->flags,
NEXTHOP_FLAG_RECURSIVE);
nexthop_set_resolved(afi, newhop, nexthop);
@ -1706,6 +1955,11 @@ static int nexthop_active(afi_t afi, struct route_entry *re,
if (!nexthop_valid_resolve(nexthop, newhop))
continue;
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug("%s: STATIC match %p (%u), newhop %pNHv",
__func__, match,
match->nhe->id, newhop);
SET_FLAG(nexthop->flags,
NEXTHOP_FLAG_RECURSIVE);
nexthop_set_resolved(afi, newhop, nexthop);
@ -1824,11 +2078,11 @@ static unsigned nexthop_active_check(struct route_node *rn,
default:
break;
}
if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) {
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug(
" %s: Unable to find a active nexthop",
__func__);
zlog_debug(" %s: Unable to find active nexthop",
__func__);
return 0;
}
@ -1909,45 +2163,37 @@ done:
}
/*
* Iterate over all nexthops of the given RIB entry and refresh their
* ACTIVE flag. If any nexthop is found to toggle the ACTIVE flag,
* the whole re structure is flagged with ROUTE_ENTRY_CHANGED.
*
* Return value is the new number of active nexthops.
* Process a list of nexthops, given the head of the list, determining
* whether each one is ACTIVE/installable at this time.
*/
int nexthop_active_update(struct route_node *rn, struct route_entry *re)
static uint32_t nexthop_list_active_update(struct route_node *rn,
struct route_entry *re,
struct nexthop *nexthop)
{
struct nexthop_group new_grp = {};
struct nexthop *nexthop;
union g_addr prev_src;
unsigned int prev_active, new_active;
ifindex_t prev_index;
uint8_t curr_active = 0;
uint32_t counter = 0;
afi_t rt_afi = family2afi(rn->p.family);
UNSET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
/* Copy over the nexthops in current state */
nexthop_group_copy(&new_grp, &(re->nhe->nhg));
for (nexthop = new_grp.nexthop; nexthop; nexthop = nexthop->next) {
/* Process nexthops one-by-one */
for ( ; nexthop; nexthop = nexthop->next) {
/* No protocol daemon provides src and so we're skipping
* tracking it */
* tracking it
*/
prev_src = nexthop->rmap_src;
prev_active = CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
prev_index = nexthop->ifindex;
/*
* We need to respect the multipath_num here
* as that what we should be able to install from
* a multipath perpsective should not be a data plane
* a multipath perspective should not be a data plane
* decision point.
*/
new_active =
nexthop_active_check(rn, re, nexthop);
if (new_active && curr_active >= zrouter.multipath_num) {
if (new_active && counter >= zrouter.multipath_num) {
struct nexthop *nh;
/* Set it and its resolved nexthop as inactive. */
@ -1958,7 +2204,7 @@ int nexthop_active_update(struct route_node *rn, struct route_entry *re)
}
if (new_active)
curr_active++;
counter++;
/* Don't allow src setting on IPv6 addr for now */
if (prev_active != new_active || prev_index != nexthop->ifindex
@ -1974,14 +2220,79 @@ int nexthop_active_update(struct route_node *rn, struct route_entry *re)
SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
}
return counter;
}
/*
* Iterate over all nexthops of the given RIB entry and refresh their
* ACTIVE flag. If any nexthop is found to toggle the ACTIVE flag,
* the whole re structure is flagged with ROUTE_ENTRY_CHANGED.
*
* Return value is the new number of active nexthops.
*/
int nexthop_active_update(struct route_node *rn, struct route_entry *re)
{
struct nhg_hash_entry *curr_nhe;
uint32_t curr_active = 0, backup_active = 0;
afi_t rt_afi = family2afi(rn->p.family);
UNSET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
/* Make a local copy of the existing nhe, so we don't work on/modify
* the shared nhe.
*/
curr_nhe = zebra_nhg_copy(re->nhe, re->nhe->id);
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: re %p nhe %p (%u), curr_nhe %p",
__func__, re, re->nhe, re->nhe->id,
curr_nhe);
/* Clear the existing id, if any: this will avoid any confusion
* if the id exists, and will also force the creation
* of a new nhe reflecting the changes we may make in this local copy.
*/
curr_nhe->id = 0;
/* Process nexthops */
curr_active = nexthop_list_active_update(rn, re, curr_nhe->nhg.nexthop);
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: re %p curr_active %u", __func__, re,
curr_active);
/* If there are no backup nexthops, we are done */
if (zebra_nhg_get_backup_nhg(curr_nhe) == NULL)
goto backups_done;
backup_active = nexthop_list_active_update(
rn, re, zebra_nhg_get_backup_nhg(curr_nhe)->nexthop);
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: re %p backup_active %u", __func__, re,
backup_active);
backups_done:
/*
* Ref or create an nhe that matches the current state of the
* nexthop(s).
*/
if (CHECK_FLAG(re->status, ROUTE_ENTRY_CHANGED)) {
struct nhg_hash_entry *new_nhe = NULL;
new_nhe = zebra_nhg_rib_find(0, &new_grp, rt_afi);
new_nhe = zebra_nhg_rib_find_nhe(curr_nhe, rt_afi);
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: re %p CHANGED: nhe %p (%u) => new_nhe %p (%u)",
__func__, re, re->nhe,
re->nhe->id, new_nhe, new_nhe->id);
route_entry_update_nhe(re, new_nhe);
}
/* Walk the NHE depends tree and toggle NEXTHOP_GROUP_VALID
* flag where appropriate.
*/
@ -1989,11 +2300,11 @@ int nexthop_active_update(struct route_node *rn, struct route_entry *re)
zebra_nhg_set_valid_if_active(re->nhe);
/*
* Do not need these nexthops anymore since they
* were either copied over into an nhe or not
* Do not need the old / copied nhe anymore since it
* was either copied over into a new nhe or not
* used at all.
*/
nexthops_free(new_grp.nexthop);
zebra_nhg_free(curr_nhe);
return curr_active;
}
@ -2177,7 +2488,7 @@ void zebra_nhg_dplane_result(struct zebra_dplane_ctx *ctx)
id = dplane_ctx_get_nhe_id(ctx);
if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
if (IS_ZEBRA_DEBUG_DPLANE_DETAIL || IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug(
"Nexthop dplane ctx %p, op %s, nexthop ID (%u), result %s",
ctx, dplane_op2str(op), id, dplane_res2str(status));

View File

@ -187,11 +187,17 @@ void zebra_nhg_free(struct nhg_hash_entry *nhe);
/* In order to clear a generic hash, we need a generic api, sigh. */
void zebra_nhg_hash_free(void *p);
/* Init an nhe, for use in a hash lookup for example. There's some fuzziness
* if the nhe represents only a single nexthop, so we try to capture that
* variant also.
*/
void zebra_nhe_init(struct nhg_hash_entry *nhe, afi_t afi,
const struct nexthop *nh);
/* Allocate, free backup nexthop info objects */
struct nhg_backup_info *zebra_nhg_backup_alloc(void);
void zebra_nhg_backup_free(struct nhg_backup_info **p);
struct nhg_hash_entry *zebra_nhg_get_backup_nhe(struct nhg_hash_entry *nhe);
struct nexthop_group *zebra_nhg_get_backup_nhg(struct nhg_hash_entry *nhe);
extern struct nhg_hash_entry *zebra_nhg_resolve(struct nhg_hash_entry *nhe);
@ -228,10 +234,14 @@ extern int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh,
/* Del via kernel */
extern int zebra_nhg_kernel_del(uint32_t id, vrf_id_t vrf_id);
/* Find via route creation */
/* Find an nhe based on a nexthop_group */
extern struct nhg_hash_entry *
zebra_nhg_rib_find(uint32_t id, struct nexthop_group *nhg, afi_t rt_afi);
/* Find an nhe based on a route's nhe, used during route creation */
struct nhg_hash_entry *
zebra_nhg_rib_find_nhe(struct nhg_hash_entry *rt_nhe, afi_t rt_afi);
/* Reference counter functions */
extern void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe);
extern void zebra_nhg_increment_ref(struct nhg_hash_entry *nhe);

View File

@ -213,7 +213,7 @@ static void route_entry_attach_ref(struct route_entry *re,
int route_entry_update_nhe(struct route_entry *re, struct nhg_hash_entry *new)
{
struct nhg_hash_entry *old = NULL;
struct nhg_hash_entry *old;
int ret = 0;
if (new == NULL) {
@ -223,7 +223,7 @@ int route_entry_update_nhe(struct route_entry *re, struct nhg_hash_entry *new)
goto done;
}
if (re->nhe_id != new->id) {
if ((re->nhe_id != 0) && (re->nhe_id != new->id)) {
old = re->nhe;
route_entry_attach_ref(re, new);
@ -2619,16 +2619,12 @@ int rib_add_multipath_nhe(afi_t afi, safi_t safi, struct prefix *p,
struct route_node *rn;
struct route_entry *same = NULL;
int ret = 0;
struct nexthop_group *ng;
if (!re || !re_nhe)
return -1;
assert(!src_p || !src_p->prefixlen || afi == AFI_IP6);
/* TODO */
ng = &(re_nhe->nhg);
/* Lookup table. */
table = zebra_vrf_get_table_with_table_id(afi, safi, re->vrf_id,
re->table);
@ -2647,7 +2643,8 @@ int rib_add_multipath_nhe(afi_t afi, safi_t safi, struct prefix *p,
return -1;
}
} else {
nhe = zebra_nhg_rib_find(0, ng, afi);
/* Lookup nhe from route information */
nhe = zebra_nhg_rib_find_nhe(re_nhe, afi);
if (!nhe) {
char buf[PREFIX_STRLEN] = "";
char buf2[PREFIX_STRLEN] = "";
@ -2752,7 +2749,7 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p,
struct nexthop_group *ng)
{
int ret;
struct nhg_hash_entry nhe = {};
struct nhg_hash_entry nhe;
if (!re)
return -1;
@ -2764,6 +2761,7 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p,
/*
* Use a temporary nhe to convey info to the common/main api.
*/
zebra_nhe_init(&nhe, afi, (ng ? ng->nexthop : NULL));
if (ng)
nhe.nhg.nexthop = ng->nexthop;
else if (re->nhe_id > 0)