Merge pull request #8675 from wesleycoakley/pbr-table-range-core-fix

pbrd: implement sparse table lookup for nhg cache
This commit is contained in:
Mark Stapp 2021-05-25 14:09:41 -04:00 committed by GitHub
commit 1efe743ac4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 127 additions and 32 deletions

View File

@ -39,12 +39,13 @@ DEFINE_MTYPE_STATIC(PBRD, PBR_NHG, "PBR Nexthop Groups");
struct hash *pbr_nhg_hash; struct hash *pbr_nhg_hash;
static struct hash *pbr_nhrc_hash; static struct hash *pbr_nhrc_hash;
static struct hash *pbr_nhg_allocated_id_hash;
static uint32_t pbr_nhg_low_table; static uint32_t pbr_nhg_low_table;
static uint32_t pbr_nhg_high_table; static uint32_t pbr_nhg_high_table;
static uint32_t pbr_next_unallocated_table_id;
static uint32_t pbr_nhg_low_rule; static uint32_t pbr_nhg_low_rule;
static uint32_t pbr_nhg_high_rule; static uint32_t pbr_nhg_high_rule;
static bool nhg_tableid[65535];
static void pbr_nht_install_nexthop_group(struct pbr_nexthop_group_cache *pnhgc, static void pbr_nht_install_nexthop_group(struct pbr_nexthop_group_cache *pnhgc,
struct nexthop_group nhg); struct nexthop_group nhg);
@ -194,7 +195,7 @@ static void *pbr_nhgc_alloc(void *p)
new = XCALLOC(MTYPE_PBR_NHG, sizeof(*new)); new = XCALLOC(MTYPE_PBR_NHG, sizeof(*new));
strlcpy(new->name, pnhgc->name, sizeof(pnhgc->name)); strlcpy(new->name, pnhgc->name, sizeof(pnhgc->name));
new->table_id = pbr_nht_get_next_tableid(false); pbr_nht_reserve_next_table_id(new);
DEBUGD(&pbr_dbg_nht, "%s: NHT: %s assigned Table ID: %u", __func__, DEBUGD(&pbr_dbg_nht, "%s: NHT: %s assigned Table ID: %u", __func__,
new->name, new->table_id); new->name, new->table_id);
@ -237,16 +238,22 @@ void pbr_nhgroup_add_nexthop_cb(const struct nexthop_group_cmd *nhgc,
struct pbr_nexthop_cache pnhc_find = {}; struct pbr_nexthop_cache pnhc_find = {};
struct pbr_nexthop_cache *pnhc; struct pbr_nexthop_cache *pnhc;
if (!pbr_nht_get_next_tableid(true)) {
zlog_warn(
"%s: Exhausted all table identifiers; cannot create nexthop-group cache for nexthop-group '%s'",
__func__, nhgc->name);
return;
}
/* find pnhgc by name */ /* find pnhgc by name */
strlcpy(pnhgc_find.name, nhgc->name, sizeof(pnhgc_find.name)); strlcpy(pnhgc_find.name, nhgc->name, sizeof(pnhgc_find.name));
pnhgc = hash_get(pbr_nhg_hash, &pnhgc_find, pbr_nhgc_alloc); pnhgc = hash_lookup(pbr_nhg_hash, &pnhgc_find);
if (!pnhgc) {
/* Check if configured table range is exhausted */
if (!pbr_nht_has_unallocated_table()) {
zlog_warn(
"%s: Exhausted all table identifiers; cannot create nexthop-group cache for nexthop-group '%s'",
__func__, nhgc->name);
return;
}
/* No nhgc but range not exhausted? Then alloc it */
pnhgc = hash_get(pbr_nhg_hash, &pnhgc_find, pbr_nhgc_alloc);
}
/* create & insert new pnhc into pnhgc->nhh */ /* create & insert new pnhc into pnhgc->nhh */
pnhc_find.nexthop = *nhop; pnhc_find.nexthop = *nhop;
@ -289,6 +296,13 @@ void pbr_nhgroup_del_nexthop_cb(const struct nexthop_group_cmd *nhgc,
strlcpy(pnhgc_find.name, nhgc->name, sizeof(pnhgc_find.name)); strlcpy(pnhgc_find.name, nhgc->name, sizeof(pnhgc_find.name));
pnhgc = hash_lookup(pbr_nhg_hash, &pnhgc_find); pnhgc = hash_lookup(pbr_nhg_hash, &pnhgc_find);
/*
* Ignore deletions of nhg we did not / could not allocate nhgc for
* Occurs when PBR table range is full but new nhg keep coming in
*/
if (!pnhgc)
return;
/* delete pnhc from pnhgc->nhh */ /* delete pnhc from pnhgc->nhh */
pnhc_find.nexthop = *nhop; pnhc_find.nexthop = *nhop;
pnhc = hash_release(pnhgc->nhh, &pnhc_find); pnhc = hash_release(pnhgc->nhh, &pnhc_find);
@ -533,7 +547,7 @@ void pbr_nht_add_individual_nexthop(struct pbr_map_sequence *pbrms,
pbr_nht_nexthop_make_name(pbrms->parent->name, PBR_NHC_NAMELEN, pbr_nht_nexthop_make_name(pbrms->parent->name, PBR_NHC_NAMELEN,
pbrms->seqno, find.name); pbrms->seqno, find.name);
if (!pbr_nht_get_next_tableid(true)) { if (!pbr_nht_has_unallocated_table()) {
zlog_warn( zlog_warn(
"%s: Exhausted all table identifiers; cannot create nexthop-group cache for nexthop-group '%s'", "%s: Exhausted all table identifiers; cannot create nexthop-group cache for nexthop-group '%s'",
__func__, find.name); __func__, find.name);
@ -610,7 +624,7 @@ struct pbr_nexthop_group_cache *pbr_nht_add_group(const char *name)
struct pbr_nexthop_group_cache *pnhgc; struct pbr_nexthop_group_cache *pnhgc;
struct pbr_nexthop_group_cache lookup; struct pbr_nexthop_group_cache lookup;
if (!pbr_nht_get_next_tableid(true)) { if (!pbr_nht_has_unallocated_table()) {
zlog_warn( zlog_warn(
"%s: Exhausted all table identifiers; cannot create nexthop-group cache for nexthop-group '%s'", "%s: Exhausted all table identifiers; cannot create nexthop-group cache for nexthop-group '%s'",
__func__, name); __func__, name);
@ -666,6 +680,18 @@ void pbr_nht_delete_group(const char *name)
strlcpy(pnhgc_find.name, name, sizeof(pnhgc_find.name)); strlcpy(pnhgc_find.name, name, sizeof(pnhgc_find.name));
pnhgc = hash_release(pbr_nhg_hash, &pnhgc_find); pnhgc = hash_release(pbr_nhg_hash, &pnhgc_find);
/*
* Ignore deletions of nh we did not / could not allocate nhgc for
* Occurs when PBR table range is full but new nhg keep coming in
*/
if (!pnhgc)
return;
/* Remove and recalculate the next table id */
hash_release(pbr_nhg_allocated_id_hash, pnhgc);
pbr_nht_update_next_unallocated_table_id();
pbr_nhgc_delete(pnhgc); pbr_nhgc_delete(pnhgc);
} }
@ -1146,6 +1172,24 @@ void pbr_nht_nexthop_interface_update(struct interface *ifp)
ifp); ifp);
} }
static bool pbr_nhg_allocated_id_hash_equal(const void *arg1, const void *arg2)
{
const struct pbr_nexthop_group_cache *left, *right;
left = (const struct pbr_nexthop_group_cache *)arg1;
right = (const struct pbr_nexthop_group_cache *)arg2;
return left->table_id == right->table_id;
}
static uint32_t pbr_nhg_allocated_id_hash_key(const void *arg)
{
const struct pbr_nexthop_group_cache *nhgc = arg;
/* table_id makes elements in this hash unique */
return nhgc->table_id;
}
static uint32_t pbr_nhg_hash_key(const void *arg) static uint32_t pbr_nhg_hash_key(const void *arg)
{ {
const struct pbr_nexthop_group_cache *nhgc = arg; const struct pbr_nexthop_group_cache *nhgc = arg;
@ -1163,29 +1207,62 @@ static bool pbr_nhg_hash_equal(const void *arg1, const void *arg2)
return !strcmp(nhgc1->name, nhgc2->name); return !strcmp(nhgc1->name, nhgc2->name);
} }
uint32_t pbr_nht_get_next_tableid(bool peek) uint32_t pbr_nht_find_next_unallocated_table_id(void)
{ {
uint32_t i; struct pbr_nexthop_group_cache iter;
bool found = false;
for (i = pbr_nhg_low_table; i <= pbr_nhg_high_table; i++) { /*
if (!nhg_tableid[i]) { * Find the smallest unallocated table id
found = true; * This can be non-trivial considering nhg removals / shifting upper &
break; * lower bounds, so start at the lowest in the range and continue until
} * an unallocated space is found
} */
for (iter.table_id = pbr_nhg_low_table;
iter.table_id < pbr_nhg_high_table; ++iter.table_id)
if (!hash_lookup(pbr_nhg_allocated_id_hash, &iter))
return iter.table_id;
if (found) { /* Configured range is full, cannot install anywhere */
nhg_tableid[i] = !peek; return 0;
return i; }
} else
bool pbr_nht_has_unallocated_table(void)
{
return !!pbr_next_unallocated_table_id;
}
void pbr_nht_update_next_unallocated_table_id(void)
{
pbr_next_unallocated_table_id =
pbr_nht_find_next_unallocated_table_id();
}
uint32_t pbr_nht_reserve_next_table_id(struct pbr_nexthop_group_cache *nhgc)
{
/* Nothing to reserve if all tables in range already used */
if (!pbr_next_unallocated_table_id)
return 0; return 0;
/* Reserve this table id */
nhgc->table_id = pbr_next_unallocated_table_id;
/* Mark table id as allocated in id-indexed hash */
hash_get(pbr_nhg_allocated_id_hash, nhgc, hash_alloc_intern);
/* Pre-compute the next unallocated table id */
pbr_nht_update_next_unallocated_table_id();
/* Present caller with reserved table id */
return nhgc->table_id;
} }
void pbr_nht_set_tableid_range(uint32_t low, uint32_t high) void pbr_nht_set_tableid_range(uint32_t low, uint32_t high)
{ {
pbr_nhg_low_table = low; pbr_nhg_low_table = low;
pbr_nhg_high_table = high; pbr_nhg_high_table = high;
/* Re-compute next unallocated id within new range */
pbr_nht_update_next_unallocated_table_id();
} }
void pbr_nht_write_table_range(struct vty *vty) void pbr_nht_write_table_range(struct vty *vty)
@ -1352,10 +1429,15 @@ void pbr_nht_init(void)
pbr_nhrc_hash = pbr_nhrc_hash =
hash_create_size(16, (unsigned int (*)(const void *))nexthop_hash, hash_create_size(16, (unsigned int (*)(const void *))nexthop_hash,
pbr_nhrc_hash_equal, "PBR NH Hash"); pbr_nhrc_hash_equal, "PBR NH Hash");
pbr_nhg_allocated_id_hash = hash_create_size(
16, pbr_nhg_allocated_id_hash_key,
pbr_nhg_allocated_id_hash_equal, "PBR Allocated Table Hash");
pbr_nhg_low_table = PBR_NHT_DEFAULT_LOW_TABLEID; pbr_nhg_low_table = PBR_NHT_DEFAULT_LOW_TABLEID;
pbr_nhg_high_table = PBR_NHT_DEFAULT_HIGH_TABLEID; pbr_nhg_high_table = PBR_NHT_DEFAULT_HIGH_TABLEID;
pbr_nhg_low_rule = PBR_NHT_DEFAULT_LOW_RULE; pbr_nhg_low_rule = PBR_NHT_DEFAULT_LOW_RULE;
pbr_nhg_high_rule = PBR_NHT_DEFAULT_HIGH_RULE; pbr_nhg_high_rule = PBR_NHT_DEFAULT_HIGH_RULE;
memset(&nhg_tableid, 0, 65535 * sizeof(uint8_t));
/* First unallocated table is lowest in range on init */
pbr_next_unallocated_table_id = PBR_NHT_DEFAULT_LOW_TABLEID;
} }

View File

@ -64,13 +64,26 @@ extern void pbr_nht_write_table_range(struct vty *vty);
extern void pbr_nht_set_tableid_range(uint32_t low, uint32_t high); extern void pbr_nht_set_tableid_range(uint32_t low, uint32_t high);
/* /*
* Get the next tableid to use for installation. * Find and reserve the next available table for installation;
* * Sequential calls to this function will reserve sequential table numbers
* peek * until the configured range is exhausted; calls made after exhaustion always
* If set to true, retrieves the next ID without marking it used. The next * return 0
* call will return the same ID.
*/ */
extern uint32_t pbr_nht_get_next_tableid(bool peek); extern uint32_t
pbr_nht_reserve_next_table_id(struct pbr_nexthop_group_cache *nhgc);
/*
* Get the next tableid to use for installation to kernel
*/
extern uint32_t pbr_nht_find_next_unallocated_table_id(void);
/*
* Calculate where the next table representing a nhg will go in kernel
*/
extern void pbr_nht_update_next_unallocated_table_id(void);
/*
* Indicate if there are free spots to install a table to kernel within the
* configured PBR table range
*/
extern bool pbr_nht_has_unallocated_table(void);
/* /*
* Get the next rule number to use for installation * Get the next rule number to use for installation
*/ */