From bf2f7839455a7a820ca015c86b08fe276e888f14 Mon Sep 17 00:00:00 2001 From: Duncan Eastoe Date: Fri, 18 Dec 2020 14:21:18 +0000 Subject: [PATCH] zebra: reduce atomic ops in fpm_nl_process() Maintain the peak ctxqueue length in a local variable, and perform a single atomic update after processing all contexts. Generally this results in at least one less atomic operation per context. Signed-off-by: Duncan Eastoe --- zebra/dplane_fpm_nl.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c index bd7b604bad..5133258eb3 100644 --- a/zebra/dplane_fpm_nl.c +++ b/zebra/dplane_fpm_nl.c @@ -1421,7 +1421,7 @@ static int fpm_nl_process(struct zebra_dplane_provider *prov) struct zebra_dplane_ctx *ctx; struct fpm_nl_ctx *fnc; int counter, limit; - uint64_t cur_queue, peak_queue; + uint64_t cur_queue, peak_queue = 0, stored_peak_queue; fnc = dplane_provider_get_data(prov); limit = dplane_provider_get_work_limit(prov); @@ -1449,13 +1449,8 @@ static int fpm_nl_process(struct zebra_dplane_provider *prov) cur_queue = atomic_load_explicit( &fnc->counters.ctxqueue_len, memory_order_relaxed); - peak_queue = atomic_load_explicit( - &fnc->counters.ctxqueue_len_peak, - memory_order_relaxed); if (peak_queue < cur_queue) - atomic_store_explicit( - &fnc->counters.ctxqueue_len_peak, - cur_queue, memory_order_relaxed); + peak_queue = cur_queue; continue; } @@ -1463,6 +1458,13 @@ static int fpm_nl_process(struct zebra_dplane_provider *prov) dplane_provider_enqueue_out_ctx(prov, ctx); } + /* Update peak queue length, if we just observed a new peak */ + stored_peak_queue = atomic_load_explicit( + &fnc->counters.ctxqueue_len_peak, memory_order_relaxed); + if (stored_peak_queue < peak_queue) + atomic_store_explicit(&fnc->counters.ctxqueue_len_peak, + peak_queue, memory_order_relaxed); + if (atomic_load_explicit(&fnc->counters.ctxqueue_len, memory_order_relaxed) > 0)