pimd : packet processing optimization on rp change

Problem Statement:
==================
on rp_change, PIM processes all the upstream in a loop and for selected
upstreams PIM has to send join/prune based on the RPF changed.
join and prune packets are not getting aggregated in a single packet.

Root Cause Analysis:
====================
on pim_rp_change pim_upstream_update() gets called for selected upstreams.
This API calculates to whom it has to send join and to whom it has to
send prune via API pim_zebra_upstream_rpf_changed(). This API peprares
the upstream_switch_list list per interface and inserts the group and
sources.
Now PIM is still in the pim_upstream_update() API context, i.e PIM
is still processing the same upstream. In the last there is a
call to pim_zebra_update_all_interfaces() which processes the
upstream_switch_list list, sends the packets out and clears the list.

Fix:
====
Don't process the upstream_switch_list in the upstream context.
process all the upstreams prepare the upstream_switch_list and then
process in one go. This will club all the S,G entries.
It also saves list cleanup with respect to memory allocation and
deallocation multiple times.

Signed-off-by: Vishal Dhingra <rac.vishaldhingra@gmail.com>
Signed-off-by: Mobashshera Rasool <mrasool@vmware.com>
This commit is contained in:
Mobashshera Rasool 2021-11-05 00:11:57 -07:00
parent 3c52293809
commit 62596d9a1f
2 changed files with 32 additions and 5 deletions

View File

@ -35,6 +35,7 @@
#include "pim_nht.h"
#include "pim_bsm.h"
#include "pim_time.h"
#include "pim_zebra.h"
/* Functions forward declaration */
static void pim_bs_timer_start(struct bsm_scope *scope, int bs_timeout);
@ -579,6 +580,7 @@ void pim_bsm_clear(struct pim_instance *pim)
struct rp_info *rp_all;
struct pim_upstream *up;
struct rp_info *rp_info;
bool upstream_updated = false;
if (pim->global_scope.current_bsr.s_addr)
pim_nht_bsr_del(pim, pim->global_scope.current_bsr);
@ -681,8 +683,12 @@ void pim_bsm_clear(struct pim_instance *pim)
} else {
/* RP found for the group grp */
pim_upstream_update(pim, up);
upstream_updated = true;
}
}
if (upstream_updated)
pim_zebra_update_all_interfaces(pim);
}
static bool pim_bsm_send_intf(uint8_t *buf, int len, struct interface *ifp,

View File

@ -413,7 +413,6 @@ void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
old_rpf.source_nexthop.interface))
pim_zebra_upstream_rpf_changed(pim, up, &old_rpf);
pim_zebra_update_all_interfaces(pim);
}
int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr,
@ -431,6 +430,7 @@ int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr,
struct prefix nht_p;
struct route_node *rn;
struct pim_upstream *up;
bool upstream_updated = false;
if (rp_addr.s_addr == INADDR_ANY ||
rp_addr.s_addr == INADDR_NONE)
@ -547,10 +547,14 @@ int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr,
grp.u.prefix4 = up->sg.grp;
trp_info = pim_rp_find_match_group(
pim, &grp);
if (trp_info == rp_all)
if (trp_info == rp_all) {
pim_upstream_update(pim, up);
upstream_updated = true;
}
}
}
if (upstream_updated)
pim_zebra_update_all_interfaces(pim);
pim_rp_check_interfaces(pim, rp_all);
pim_rp_refresh_group_to_rp_mapping(pim);
@ -634,11 +638,16 @@ int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr,
grp.u.prefix4 = up->sg.grp;
trp_info = pim_rp_find_match_group(pim, &grp);
if (trp_info == rp_info)
if (trp_info == rp_info) {
pim_upstream_update(pim, up);
upstream_updated = true;
}
}
}
if (upstream_updated)
pim_zebra_update_all_interfaces(pim);
pim_rp_check_interfaces(pim, rp_info);
pim_rp_refresh_group_to_rp_mapping(pim);
@ -695,6 +704,7 @@ int pim_rp_del(struct pim_instance *pim, struct in_addr rp_addr,
struct bsgrp_node *bsgrp = NULL;
struct bsm_rpinfo *bsrp = NULL;
char rp_str[INET_ADDRSTRLEN];
bool upstream_updated = false;
if (!inet_ntop(AF_INET, &rp_addr, rp_str, sizeof(rp_str)))
snprintf(rp_str, sizeof(rp_str), "<rp?>");
@ -837,11 +847,16 @@ int pim_rp_del(struct pim_instance *pim, struct in_addr rp_addr,
}
/* RP found for the group grp */
else
else {
pim_upstream_update(pim, up);
upstream_updated = true;
}
}
}
if (upstream_updated)
pim_zebra_update_all_interfaces(pim);
XFREE(MTYPE_PIM_RP, rp_info);
return PIM_SUCCESS;
}
@ -854,6 +869,7 @@ int pim_rp_change(struct pim_instance *pim, struct in_addr new_rp_addr,
int result = 0;
struct rp_info *rp_info = NULL;
struct pim_upstream *up;
bool upstream_updated = false;
rn = route_node_lookup(pim->rp_table, &group);
if (!rn) {
@ -908,11 +924,16 @@ int pim_rp_change(struct pim_instance *pim, struct in_addr new_rp_addr,
grp.u.prefix4 = up->sg.grp;
trp_info = pim_rp_find_match_group(pim, &grp);
if (trp_info == rp_info)
if (trp_info == rp_info) {
pim_upstream_update(pim, up);
upstream_updated = true;
}
}
}
if (upstream_updated)
pim_zebra_update_all_interfaces(pim);
/* Register new RP addr with Zebra NHT */
nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
if (PIM_DEBUG_PIM_NHT_RP)