mirror of
https://git.proxmox.com/git/mirror_frr
synced 2025-08-11 01:40:33 +00:00
Merge pull request #3045 from opensourcerouting/atoms
READY: lists/skiplists/rb-trees new API & sequence lock & atomic lists
This commit is contained in:
commit
31e944a8a7
@ -241,9 +241,9 @@ void bgp_sync_init(struct peer *peer)
|
||||
FOREACH_AFI_SAFI (afi, safi) {
|
||||
sync = XCALLOC(MTYPE_BGP_SYNCHRONISE,
|
||||
sizeof(struct bgp_synchronize));
|
||||
BGP_ADV_FIFO_INIT(&sync->update);
|
||||
BGP_ADV_FIFO_INIT(&sync->withdraw);
|
||||
BGP_ADV_FIFO_INIT(&sync->withdraw_low);
|
||||
bgp_adv_fifo_init(&sync->update);
|
||||
bgp_adv_fifo_init(&sync->withdraw);
|
||||
bgp_adv_fifo_init(&sync->withdraw_low);
|
||||
peer->sync[afi][safi] = sync;
|
||||
}
|
||||
}
|
||||
|
@ -21,17 +21,12 @@
|
||||
#ifndef _QUAGGA_BGP_ADVERTISE_H
|
||||
#define _QUAGGA_BGP_ADVERTISE_H
|
||||
|
||||
#include <lib/fifo.h>
|
||||
#include "lib/typesafe.h"
|
||||
|
||||
PREDECL_LIST(bgp_adv_fifo)
|
||||
|
||||
struct update_subgroup;
|
||||
|
||||
/* BGP advertise FIFO. */
|
||||
struct bgp_advertise_fifo {
|
||||
struct bgp_advertise *next;
|
||||
struct bgp_advertise *prev;
|
||||
uint32_t count;
|
||||
};
|
||||
|
||||
/* BGP advertise attribute. */
|
||||
struct bgp_advertise_attr {
|
||||
/* Head of advertisement pointer. */
|
||||
@ -46,7 +41,7 @@ struct bgp_advertise_attr {
|
||||
|
||||
struct bgp_advertise {
|
||||
/* FIFO for advertisement. */
|
||||
struct bgp_advertise_fifo fifo;
|
||||
struct bgp_adv_fifo_item fifo;
|
||||
|
||||
/* Link list for same attribute advertise. */
|
||||
struct bgp_advertise *next;
|
||||
@ -65,6 +60,8 @@ struct bgp_advertise {
|
||||
struct bgp_path_info *pathi;
|
||||
};
|
||||
|
||||
DECLARE_LIST(bgp_adv_fifo, struct bgp_advertise, fifo)
|
||||
|
||||
/* BGP adjacency out. */
|
||||
struct bgp_adj_out {
|
||||
/* RB Tree of adjacency entries */
|
||||
@ -110,9 +107,9 @@ struct bgp_adj_in {
|
||||
|
||||
/* BGP advertisement list. */
|
||||
struct bgp_synchronize {
|
||||
struct bgp_advertise_fifo update;
|
||||
struct bgp_advertise_fifo withdraw;
|
||||
struct bgp_advertise_fifo withdraw_low;
|
||||
struct bgp_adv_fifo_head update;
|
||||
struct bgp_adv_fifo_head withdraw;
|
||||
struct bgp_adv_fifo_head withdraw_low;
|
||||
};
|
||||
|
||||
/* BGP adjacency linked list. */
|
||||
@ -138,36 +135,6 @@ struct bgp_synchronize {
|
||||
#define BGP_ADJ_IN_ADD(N, A) BGP_PATH_INFO_ADD(N, A, adj_in)
|
||||
#define BGP_ADJ_IN_DEL(N, A) BGP_PATH_INFO_DEL(N, A, adj_in)
|
||||
|
||||
#define BGP_ADV_FIFO_ADD(F, N) \
|
||||
do { \
|
||||
FIFO_ADD((F), (N)); \
|
||||
(F)->count++; \
|
||||
} while (0)
|
||||
|
||||
#define BGP_ADV_FIFO_DEL(F, N) \
|
||||
do { \
|
||||
FIFO_DEL((N)); \
|
||||
(F)->count--; \
|
||||
} while (0)
|
||||
|
||||
#define BGP_ADV_FIFO_INIT(F) \
|
||||
do { \
|
||||
FIFO_INIT((F)); \
|
||||
(F)->count = 0; \
|
||||
} while (0)
|
||||
|
||||
#define BGP_ADV_FIFO_COUNT(F) (F)->count
|
||||
|
||||
#define BGP_ADV_FIFO_EMPTY(F) \
|
||||
(((struct bgp_advertise_fifo *)(F))->next \
|
||||
== (struct bgp_advertise *)(F))
|
||||
|
||||
#define BGP_ADV_FIFO_HEAD(F) \
|
||||
((((struct bgp_advertise_fifo *)(F))->next \
|
||||
== (struct bgp_advertise *)(F)) \
|
||||
? NULL \
|
||||
: (F)->next)
|
||||
|
||||
/* Prototypes. */
|
||||
extern int bgp_adj_out_lookup(struct peer *, struct bgp_node *, uint32_t);
|
||||
extern void bgp_adj_in_set(struct bgp_node *, struct peer *, struct attr *,
|
||||
|
@ -25,7 +25,6 @@
|
||||
#include "stream.h"
|
||||
#include "mpls.h"
|
||||
#include "vty.h"
|
||||
#include "fifo.h"
|
||||
#include "linklist.h"
|
||||
#include "skiplist.h"
|
||||
#include "workqueue.h"
|
||||
@ -50,34 +49,10 @@ static struct labelpool *lp;
|
||||
#define LP_CHUNK_SIZE 50
|
||||
|
||||
DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CHUNK, "BGP Label Chunk")
|
||||
DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_FIFO, "BGP Label FIFO")
|
||||
DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_FIFO, "BGP Label FIFO item")
|
||||
DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CB, "BGP Dynamic Label Assignment")
|
||||
DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CBQ, "BGP Dynamic Label Callback")
|
||||
|
||||
#define LABEL_FIFO_ADD(F, N) \
|
||||
do { \
|
||||
FIFO_ADD((F), (N)); \
|
||||
(F)->count++; \
|
||||
} while (0)
|
||||
|
||||
#define LABEL_FIFO_DEL(F, N) \
|
||||
do { \
|
||||
FIFO_DEL((N)); \
|
||||
(F)->count--; \
|
||||
} while (0)
|
||||
|
||||
#define LABEL_FIFO_INIT(F) \
|
||||
do { \
|
||||
FIFO_INIT((F)); \
|
||||
(F)->count = 0; \
|
||||
} while (0)
|
||||
|
||||
#define LABEL_FIFO_COUNT(F) ((F)->count)
|
||||
|
||||
#define LABEL_FIFO_EMPTY(F) FIFO_EMPTY(F)
|
||||
|
||||
#define LABEL_FIFO_HEAD(F) ((F)->next == (F) ? NULL : (F)->next)
|
||||
|
||||
struct lp_chunk {
|
||||
uint32_t first;
|
||||
uint32_t last;
|
||||
@ -98,15 +73,13 @@ struct lp_lcb {
|
||||
int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
|
||||
};
|
||||
|
||||
/* XXX same first elements as "struct fifo" */
|
||||
struct lp_fifo {
|
||||
struct lp_fifo *next;
|
||||
struct lp_fifo *prev;
|
||||
|
||||
uint32_t count;
|
||||
struct lp_fifo_item fifo;
|
||||
struct lp_lcb lcb;
|
||||
};
|
||||
|
||||
DECLARE_LIST(lp_fifo, struct lp_fifo, fifo)
|
||||
|
||||
struct lp_cbq_item {
|
||||
int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
|
||||
int type;
|
||||
@ -199,8 +172,7 @@ void bgp_lp_init(struct thread_master *master, struct labelpool *pool)
|
||||
lp->inuse = skiplist_new(0, NULL, NULL);
|
||||
lp->chunks = list_new();
|
||||
lp->chunks->del = lp_chunk_free;
|
||||
lp->requests = XCALLOC(MTYPE_BGP_LABEL_FIFO, sizeof(struct lp_fifo));
|
||||
LABEL_FIFO_INIT(lp->requests);
|
||||
lp_fifo_init(&lp->requests);
|
||||
lp->callback_q = work_queue_new(master, "label callbacks");
|
||||
|
||||
lp->callback_q->spec.workfunc = lp_cbq_docallback;
|
||||
@ -223,13 +195,9 @@ void bgp_lp_finish(void)
|
||||
|
||||
list_delete(&lp->chunks);
|
||||
|
||||
while ((lf = LABEL_FIFO_HEAD(lp->requests))) {
|
||||
|
||||
LABEL_FIFO_DEL(lp->requests, lf);
|
||||
while ((lf = lp_fifo_pop(&lp->requests)))
|
||||
XFREE(MTYPE_BGP_LABEL_FIFO, lf);
|
||||
}
|
||||
XFREE(MTYPE_BGP_LABEL_FIFO, lp->requests);
|
||||
lp->requests = NULL;
|
||||
lp_fifo_fini(&lp->requests);
|
||||
|
||||
work_queue_free_and_null(&lp->callback_q);
|
||||
|
||||
@ -385,9 +353,9 @@ void bgp_lp_get(
|
||||
sizeof(struct lp_fifo));
|
||||
|
||||
lf->lcb = *lcb;
|
||||
LABEL_FIFO_ADD(lp->requests, lf);
|
||||
lp_fifo_add_tail(&lp->requests, lf);
|
||||
|
||||
if (LABEL_FIFO_COUNT(lp->requests) > lp->pending_count) {
|
||||
if (lp_fifo_count(&lp->requests) > lp->pending_count) {
|
||||
if (!zclient_send_get_label_chunk(zclient, 0, LP_CHUNK_SIZE)) {
|
||||
lp->pending_count += LP_CHUNK_SIZE;
|
||||
return;
|
||||
@ -441,11 +409,11 @@ void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
|
||||
lp->pending_count -= (last - first + 1);
|
||||
|
||||
if (debug) {
|
||||
zlog_debug("%s: %u pending requests", __func__,
|
||||
LABEL_FIFO_COUNT(lp->requests));
|
||||
zlog_debug("%s: %zu pending requests", __func__,
|
||||
lp_fifo_count(&lp->requests));
|
||||
}
|
||||
|
||||
while ((lf = LABEL_FIFO_HEAD(lp->requests))) {
|
||||
while ((lf = lp_fifo_first(&lp->requests))) {
|
||||
|
||||
struct lp_lcb *lcb;
|
||||
void *labelid = lf->lcb.labelid;
|
||||
@ -504,7 +472,7 @@ void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
|
||||
work_queue_add(lp->callback_q, q);
|
||||
|
||||
finishedrequest:
|
||||
LABEL_FIFO_DEL(lp->requests, lf);
|
||||
lp_fifo_del(&lp->requests, lf);
|
||||
XFREE(MTYPE_BGP_LABEL_FIFO, lf);
|
||||
}
|
||||
}
|
||||
@ -533,7 +501,7 @@ void bgp_lp_event_zebra_up(void)
|
||||
/*
|
||||
* Get label chunk allocation request dispatched to zebra
|
||||
*/
|
||||
labels_needed = LABEL_FIFO_COUNT(lp->requests) +
|
||||
labels_needed = lp_fifo_count(&lp->requests) +
|
||||
skiplist_count(lp->inuse);
|
||||
|
||||
/* round up */
|
||||
@ -588,7 +556,7 @@ void bgp_lp_event_zebra_up(void)
|
||||
sizeof(struct lp_fifo));
|
||||
|
||||
lf->lcb = *lcb;
|
||||
LABEL_FIFO_ADD(lp->requests, lf);
|
||||
lp_fifo_add_tail(&lp->requests, lf);
|
||||
}
|
||||
|
||||
skiplist_delete_first(lp->inuse);
|
||||
|
@ -31,11 +31,13 @@
|
||||
#define LP_TYPE_VRF 0x00000001
|
||||
#define LP_TYPE_BGP_LU 0x00000002
|
||||
|
||||
PREDECL_LIST(lp_fifo)
|
||||
|
||||
struct labelpool {
|
||||
struct skiplist *ledger; /* all requests */
|
||||
struct skiplist *inuse; /* individual labels */
|
||||
struct list *chunks; /* granted by zebra */
|
||||
struct lp_fifo *requests; /* blocked on zebra */
|
||||
struct lp_fifo_head requests; /* blocked on zebra */
|
||||
struct work_queue *callback_q;
|
||||
uint32_t pending_count; /* requested from zebra */
|
||||
};
|
||||
|
@ -83,9 +83,9 @@ static void sync_init(struct update_subgroup *subgrp)
|
||||
{
|
||||
subgrp->sync =
|
||||
XCALLOC(MTYPE_BGP_SYNCHRONISE, sizeof(struct bgp_synchronize));
|
||||
BGP_ADV_FIFO_INIT(&subgrp->sync->update);
|
||||
BGP_ADV_FIFO_INIT(&subgrp->sync->withdraw);
|
||||
BGP_ADV_FIFO_INIT(&subgrp->sync->withdraw_low);
|
||||
bgp_adv_fifo_init(&subgrp->sync->update);
|
||||
bgp_adv_fifo_init(&subgrp->sync->withdraw);
|
||||
bgp_adv_fifo_init(&subgrp->sync->withdraw_low);
|
||||
subgrp->hash =
|
||||
hash_create(baa_hash_key, baa_hash_cmp, "BGP SubGroup Hash");
|
||||
|
||||
|
@ -590,9 +590,9 @@ static inline void bgp_announce_peer(struct peer *peer)
|
||||
*/
|
||||
static inline int advertise_list_is_empty(struct update_subgroup *subgrp)
|
||||
{
|
||||
if (!BGP_ADV_FIFO_EMPTY(&subgrp->sync->update)
|
||||
|| !BGP_ADV_FIFO_EMPTY(&subgrp->sync->withdraw)
|
||||
|| !BGP_ADV_FIFO_EMPTY(&subgrp->sync->withdraw_low)) {
|
||||
if (bgp_adv_fifo_count(&subgrp->sync->update)
|
||||
|| bgp_adv_fifo_count(&subgrp->sync->withdraw)
|
||||
|| bgp_adv_fifo_count(&subgrp->sync->withdraw_low)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -422,7 +422,7 @@ bgp_advertise_clean_subgroup(struct update_subgroup *subgrp,
|
||||
struct bgp_advertise *adv;
|
||||
struct bgp_advertise_attr *baa;
|
||||
struct bgp_advertise *next;
|
||||
struct bgp_advertise_fifo *fhead;
|
||||
struct bgp_adv_fifo_head *fhead;
|
||||
|
||||
adv = adj->adv;
|
||||
baa = adv->baa;
|
||||
@ -444,7 +444,7 @@ bgp_advertise_clean_subgroup(struct update_subgroup *subgrp,
|
||||
|
||||
|
||||
/* Unlink myself from advertisement FIFO. */
|
||||
BGP_ADV_FIFO_DEL(fhead, adv);
|
||||
bgp_adv_fifo_del(fhead, adv);
|
||||
|
||||
/* Free memory. */
|
||||
bgp_advertise_free(adj->adv);
|
||||
@ -507,7 +507,7 @@ void bgp_adj_out_set_subgroup(struct bgp_node *rn,
|
||||
* If the update adv list is empty, trigger the member peers'
|
||||
* mrai timers so the socket writes can happen.
|
||||
*/
|
||||
if (BGP_ADV_FIFO_EMPTY(&subgrp->sync->update)) {
|
||||
if (!bgp_adv_fifo_count(&subgrp->sync->update)) {
|
||||
struct peer_af *paf;
|
||||
|
||||
SUBGRP_FOREACH_PEER (subgrp, paf) {
|
||||
@ -515,7 +515,7 @@ void bgp_adj_out_set_subgroup(struct bgp_node *rn,
|
||||
}
|
||||
}
|
||||
|
||||
BGP_ADV_FIFO_ADD(&subgrp->sync->update, &adv->fifo);
|
||||
bgp_adv_fifo_add_tail(&subgrp->sync->update, adv);
|
||||
|
||||
subgrp->version = max(subgrp->version, rn->version);
|
||||
}
|
||||
@ -550,11 +550,11 @@ void bgp_adj_out_unset_subgroup(struct bgp_node *rn,
|
||||
|
||||
/* Note if we need to trigger a packet write */
|
||||
trigger_write =
|
||||
BGP_ADV_FIFO_EMPTY(&subgrp->sync->withdraw);
|
||||
!bgp_adv_fifo_count(&subgrp->sync->withdraw);
|
||||
|
||||
/* Add to synchronization entry for withdraw
|
||||
* announcement. */
|
||||
BGP_ADV_FIFO_ADD(&subgrp->sync->withdraw, &adv->fifo);
|
||||
bgp_adv_fifo_add_tail(&subgrp->sync->withdraw, adv);
|
||||
|
||||
if (trigger_write)
|
||||
subgroup_trigger_write(subgrp);
|
||||
|
@ -664,11 +664,11 @@ int subgroup_packets_to_build(struct update_subgroup *subgrp)
|
||||
if (!subgrp)
|
||||
return 0;
|
||||
|
||||
adv = BGP_ADV_FIFO_HEAD(&subgrp->sync->withdraw);
|
||||
adv = bgp_adv_fifo_first(&subgrp->sync->withdraw);
|
||||
if (adv)
|
||||
return 1;
|
||||
|
||||
adv = BGP_ADV_FIFO_HEAD(&subgrp->sync->update);
|
||||
adv = bgp_adv_fifo_first(&subgrp->sync->update);
|
||||
if (adv)
|
||||
return 1;
|
||||
|
||||
@ -725,7 +725,7 @@ struct bpacket *subgroup_update_packet(struct update_subgroup *subgrp)
|
||||
addpath_encode = bgp_addpath_encode_tx(peer, afi, safi);
|
||||
addpath_overhead = addpath_encode ? BGP_ADDPATH_ID_LEN : 0;
|
||||
|
||||
adv = BGP_ADV_FIFO_HEAD(&subgrp->sync->update);
|
||||
adv = bgp_adv_fifo_first(&subgrp->sync->update);
|
||||
while (adv) {
|
||||
assert(adv->rn);
|
||||
rn = adv->rn;
|
||||
@ -966,7 +966,7 @@ struct bpacket *subgroup_withdraw_packet(struct update_subgroup *subgrp)
|
||||
addpath_encode = bgp_addpath_encode_tx(peer, afi, safi);
|
||||
addpath_overhead = addpath_encode ? BGP_ADDPATH_ID_LEN : 0;
|
||||
|
||||
while ((adv = BGP_ADV_FIFO_HEAD(&subgrp->sync->withdraw)) != NULL) {
|
||||
while ((adv = bgp_adv_fifo_first(&subgrp->sync->withdraw)) != NULL) {
|
||||
assert(adv->rn);
|
||||
adj = adv->adj;
|
||||
rn = adv->rn;
|
||||
|
74
configure.ac
74
configure.ac
@ -926,6 +926,80 @@ AC_CHECK_HEADERS([pthread_np.h],,, [
|
||||
])
|
||||
AC_CHECK_FUNCS([pthread_setname_np pthread_set_name_np])
|
||||
|
||||
needsync=true
|
||||
|
||||
AS_IF([$needsync], [
|
||||
dnl Linux
|
||||
AC_MSG_CHECKING([for Linux futex() support])
|
||||
AC_LINK_IFELSE([AC_LANG_PROGRAM([
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
#include <unistd.h>
|
||||
#include <limits.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <linux/futex.h>
|
||||
|
||||
int main(void);
|
||||
],
|
||||
[
|
||||
{
|
||||
return syscall(SYS_futex, NULL, FUTEX_WAIT, 0, NULL, NULL, 0);
|
||||
}
|
||||
])], [
|
||||
AC_MSG_RESULT([yes])
|
||||
AC_DEFINE(HAVE_SYNC_LINUX_FUTEX,,Have Linux futex support)
|
||||
needsync=false
|
||||
], [
|
||||
AC_MSG_RESULT([no])
|
||||
])
|
||||
])
|
||||
|
||||
AS_IF([$needsync], [
|
||||
dnl FreeBSD
|
||||
AC_MSG_CHECKING([for FreeBSD _umtx_op() support])
|
||||
AC_LINK_IFELSE([AC_LANG_PROGRAM([
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/umtx.h>
|
||||
int main(void);
|
||||
],
|
||||
[
|
||||
{
|
||||
return _umtx_op(NULL, UMTX_OP_WAIT_UINT, 0, NULL, NULL);
|
||||
}
|
||||
])], [
|
||||
AC_MSG_RESULT([yes])
|
||||
AC_DEFINE(HAVE_SYNC_UMTX_OP,,Have FreeBSD _umtx_op() support)
|
||||
needsync=false
|
||||
], [
|
||||
AC_MSG_RESULT([no])
|
||||
])
|
||||
])
|
||||
|
||||
AS_IF([$needsync], [
|
||||
dnl OpenBSD patch (not upstream at the time of writing this)
|
||||
dnl https://marc.info/?l=openbsd-tech&m=147299508409549&w=2
|
||||
AC_MSG_CHECKING([for OpenBSD futex() support])
|
||||
AC_LINK_IFELSE([AC_LANG_PROGRAM([
|
||||
#include <sys/futex.h>
|
||||
int main(void);
|
||||
],
|
||||
[
|
||||
{
|
||||
return futex(NULL, FUTEX_WAIT, 0, NULL, NULL, 0);
|
||||
}
|
||||
])], [
|
||||
AC_MSG_RESULT([yes])
|
||||
AC_DEFINE(HAVE_SYNC_OPENBSD_FUTEX,,Have OpenBSD futex support)
|
||||
needsync=false
|
||||
], [
|
||||
AC_MSG_RESULT([no])
|
||||
])
|
||||
])
|
||||
|
||||
dnl Utility macro to avoid retyping includes all the time
|
||||
m4_define([FRR_INCLUDES],
|
||||
[#ifdef SUNOS_5
|
||||
|
13
debian/copyright
vendored
13
debian/copyright
vendored
@ -324,19 +324,6 @@ Copyright:
|
||||
Copyright (c) 2006, 2007 Pierre-Yves Ritschard <pyr@openbsd.org>
|
||||
Copyright (c) 2006, 2007, 2008 Reyk Floeter <reyk@openbsd.org>
|
||||
|
||||
Files: isisd/dict.*
|
||||
Copyright: Copyright (C) 1997 Kaz Kylheku <kaz@ashi.footprints.net>
|
||||
License: custom-BSD-like
|
||||
All rights are reserved by the author, with the following exceptions:
|
||||
Permission is granted to freely reproduce and distribute this software,
|
||||
possibly in exchange for a fee, provided that this copyright notice appears
|
||||
intact. Permission is also granted to adapt this software to produce
|
||||
derivative works, as long as the modified versions carry this copyright
|
||||
notice and additional notices stating that the work has been modified.
|
||||
This source code may be translated into executable form and incorporated
|
||||
into proprietary software; there is no requirement for such software to
|
||||
contain a copyright notice related to this source.
|
||||
|
||||
Files: qpb/qpb.proto fpm/fpm.proto
|
||||
License: ISC
|
||||
Copyright: Copyright (C) 2016 Sproute Networks, Inc.
|
||||
|
@ -7,8 +7,9 @@ Library Facilities (libfrr)
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
logging
|
||||
memtypes
|
||||
lists
|
||||
logging
|
||||
hooks
|
||||
cli
|
||||
modules
|
||||
|
594
doc/developer/lists.rst
Normal file
594
doc/developer/lists.rst
Normal file
@ -0,0 +1,594 @@
|
||||
List implementations
|
||||
====================
|
||||
|
||||
.. note::
|
||||
|
||||
The term *list* is used generically for lists, skiplists, trees and hash
|
||||
tables in this document.
|
||||
|
||||
Common list interface
|
||||
---------------------
|
||||
|
||||
FRR includes a set of list-like data structure implementations with abstracted
|
||||
common APIs. The purpose of this is easily allow swapping out one
|
||||
data structure for another while also making the code easier to read and write.
|
||||
There is one API for unsorted lists and a similar but not identical API for
|
||||
sorted lists.
|
||||
|
||||
For unsorted lists, the following implementations exist:
|
||||
|
||||
- single-linked list with tail pointer (e.g. STAILQ in BSD)
|
||||
|
||||
- atomic single-linked list with tail pointer
|
||||
|
||||
|
||||
For sorted lists, these data structures are implemented:
|
||||
|
||||
- single-linked list
|
||||
|
||||
- atomic single-linked list
|
||||
|
||||
- skiplist
|
||||
|
||||
- red-black tree (based on OpenBSD RB_TREE)
|
||||
|
||||
- hash table (note below)
|
||||
|
||||
Except for hash tables, each of the sorted data structures has a variant with
|
||||
unique and non-unique list items. Hash tables always require unique items
|
||||
and mostly follow the "sorted" API but use the hash value as sorting
|
||||
key. Also, iterating while modifying does not work with hash tables.
|
||||
|
||||
|
||||
The following sorted structures are likely to be implemented at some point
|
||||
in the future:
|
||||
|
||||
- atomic skiplist
|
||||
|
||||
- atomic hash table (note below)
|
||||
|
||||
|
||||
The APIs are all designed to be as type-safe as possible. This means that
|
||||
there will be a compiler warning when an item doesn't match the list, or
|
||||
the return value has a different type, or other similar situations. **You
|
||||
should never use casts with these APIs.** If a cast is neccessary in relation
|
||||
to these APIs, there is probably something wrong with the overall design.
|
||||
|
||||
Only the following pieces use dynamically allocated memory:
|
||||
|
||||
- the hash table itself is dynamically grown and shrunk
|
||||
|
||||
- skiplists store up to 4 next pointers inline but will dynamically allocate
|
||||
memory to hold an item's 5th up to 16th next pointer (if they exist)
|
||||
|
||||
Cheat sheet
|
||||
-----------
|
||||
|
||||
Available types:
|
||||
|
||||
::
|
||||
|
||||
DECLARE_LIST
|
||||
DECLARE_ATOMLIST
|
||||
|
||||
DECLARE_SORTLIST_UNIQ
|
||||
DECLARE_SORTLIST_NONUNIQ
|
||||
DECLARE_ATOMLIST_UNIQ
|
||||
DECLARE_ATOMLIST_NONUNIQ
|
||||
DECLARE_SKIPLIST_UNIQ
|
||||
DECLARE_SKIPLIST_NONUNIQ
|
||||
DECLARE_RBTREE_UNIQ
|
||||
DECLARE_RBTREE_NONUNIQ
|
||||
|
||||
DECLARE_HASH
|
||||
|
||||
Functions provided:
|
||||
|
||||
+------------------------------------+------+------+---------+------------+
|
||||
| Function | LIST | HASH | \*_UNIQ | \*_NONUNIQ |
|
||||
+====================================+======+======+=========+============+
|
||||
| _init, _fini | yes | yes | yes | yes |
|
||||
+------------------------------------+------+------+---------+------------+
|
||||
| _first, _next, _next_safe | yes | yes | yes | yes |
|
||||
+------------------------------------+------+------+---------+------------+
|
||||
| _add_head, _add_tail, _add_after | yes | -- | -- | -- |
|
||||
+------------------------------------+------+------+---------+------------+
|
||||
| _add | -- | yes | yes | yes |
|
||||
+------------------------------------+------+------+---------+------------+
|
||||
| _del, _pop | yes | yes | yes | yes |
|
||||
+------------------------------------+------+------+---------+------------+
|
||||
| _find | -- | yes | yes | -- |
|
||||
+------------------------------------+------+------+---------+------------+
|
||||
| _find_lt, _find_gteq | -- | -- | yes | yes |
|
||||
+------------------------------------+------+------+---------+------------+
|
||||
| use with for_each() macros | yes | yes | yes | yes |
|
||||
+------------------------------------+------+------+---------+------------+
|
||||
|
||||
|
||||
Datastructure type setup
|
||||
------------------------
|
||||
|
||||
Each of the data structures has a ``PREDECL_*`` and a ``DECLARE_*`` macro to
|
||||
set up an "instantiation" of the list. This works somewhat similar to C++
|
||||
templating, though much simpler.
|
||||
|
||||
**In all following text, the Z prefix is replaced with a name choosen
|
||||
for the instance of the datastructure.**
|
||||
|
||||
The common setup pattern will look like this:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
PREDECL_XXX(Z)
|
||||
struct item {
|
||||
int otherdata;
|
||||
struct Z_item mylistitem;
|
||||
}
|
||||
|
||||
struct Z_head mylisthead;
|
||||
|
||||
/* unsorted: */
|
||||
DECLARE_XXX(Z, struct item, mylistitem)
|
||||
|
||||
/* sorted, items that compare as equal cannot be added to list */
|
||||
int compare_func(const struct item *a, const struct item *b);
|
||||
DECLARE_XXX_UNIQ(Z, struct item, mylistitem, compare_func)
|
||||
|
||||
/* sorted, items that compare as equal can be added to list */
|
||||
int compare_func(const struct item *a, const struct item *b);
|
||||
DECLARE_XXX_NONUNIQ(Z, struct item, mylistitem, compare_func)
|
||||
|
||||
/* hash tables: */
|
||||
int compare_func(const struct item *a, const struct item *b);
|
||||
uint32_t hash_func(const struct item *a);
|
||||
DECLARE_XXX(Z, struct item, mylistitem, compare_func, hash_func)
|
||||
|
||||
``XXX`` is replaced with the name of the data structure, e.g. ``SKIPLIST``
|
||||
or ``ATOMLIST``. The ``DECLARE_XXX`` invocation can either occur in a `.h`
|
||||
file (if the list needs to be accessed from several C files) or it can be
|
||||
placed in a `.c` file (if the list is only accessed from that file.) The
|
||||
``PREDECL_XXX`` invocation defines the ``struct Z_item`` and ``struct
|
||||
Z_head`` types and must therefore occur before these are used.
|
||||
|
||||
To switch between compatible data structures, only these two lines need to be
|
||||
changes. To switch to a data structure with a different API, some source
|
||||
changes are necessary.
|
||||
|
||||
Common iteration macros
|
||||
-----------------------
|
||||
|
||||
The following iteration macros work across all data structures:
|
||||
|
||||
.. c:function:: for_each(Z, head, item)
|
||||
|
||||
Equivalent to:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
for (item = Z_first(head); item; item = Z_next(head, item))
|
||||
|
||||
Note that this will fail if the list is modified while being iterated
|
||||
over.
|
||||
|
||||
.. c:function:: for_each_safe(Z, head, item)
|
||||
|
||||
Same as the previous, but the next element is pre-loaded into a "hidden"
|
||||
variable (named ``Z_safe``.) Equivalent to:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
for (item = Z_first(head); item; item = next) {
|
||||
next = Z_next_safe(head, item);
|
||||
...
|
||||
}
|
||||
|
||||
.. warning::
|
||||
|
||||
Iterating over hash tables while adding or removing items is not
|
||||
possible. The iteration position will be corrupted when the hash
|
||||
tables is resized while iterating. This will cause items to be
|
||||
skipped or iterated over twice.
|
||||
|
||||
.. c:function:: for_each_from(Z, head, item, from)
|
||||
|
||||
Iterates over the list, starting at item ``from``. This variant is "safe"
|
||||
as in the previous macro. Equivalent to:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
for (item = from; item; item = from) {
|
||||
from = Z_next_safe(head, item);
|
||||
...
|
||||
}
|
||||
|
||||
.. note::
|
||||
|
||||
The ``from`` variable is written to. This is intentional - you can
|
||||
resume iteration after breaking out of the loop by keeping the ``from``
|
||||
value persistent and reusing it for the next loop.
|
||||
|
||||
Common API
|
||||
----------
|
||||
|
||||
The following documentation assumes that a list has been defined using
|
||||
``Z`` as the name, and ``itemtype`` being the type of the list items (e.g.
|
||||
``struct item``.)
|
||||
|
||||
.. c:function:: void Z_init(struct Z_head *)
|
||||
|
||||
Initializes the list for use. For most implementations, this just sets
|
||||
some values. Hash tables are the only implementation that allocates
|
||||
memory in this call.
|
||||
|
||||
.. c:function:: void Z_fini(struct Z_head *)
|
||||
|
||||
Reverse the effects of :c:func:`Z_init()`. The list must be empty
|
||||
when this function is called.
|
||||
|
||||
.. warning::
|
||||
|
||||
This function may ``assert()`` if the list is not empty.
|
||||
|
||||
.. c:function:: size_t Z_count(struct Z_head *)
|
||||
|
||||
Returns the number of items in a structure. All structures store a
|
||||
counter in their `Z_head` so that calling this function completes
|
||||
in O(1).
|
||||
|
||||
.. note::
|
||||
|
||||
For atomic lists with concurrent access, the value will already be
|
||||
outdated by the time this function returns and can therefore only be
|
||||
used as an estimate.
|
||||
|
||||
.. c:function:: itemtype *Z_first(struct Z_head *)
|
||||
|
||||
Returns the first item in the structure, or ``NULL`` if the structure is
|
||||
empty. This is O(1) for all data structures except red-black trees
|
||||
where it is O(log n).
|
||||
|
||||
.. c:function:: itemtype *Z_pop(struct Z_head *)
|
||||
|
||||
Remove and return the first item in the structure, or ``NULL`` if the
|
||||
structure is empty. Like :c:func:`Z_first`, this is O(1) for all
|
||||
data structures except red-black trees where it is O(log n) again.
|
||||
|
||||
This function can be used to build queues (with unsorted structures) or
|
||||
priority queues (with sorted structures.)
|
||||
|
||||
Another common pattern is deleting all list items:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
while ((item = Z_pop(head)))
|
||||
item_free(item);
|
||||
|
||||
.. note::
|
||||
|
||||
This function can - and should - be used with hash tables. It is not
|
||||
affected by the "modification while iterating" problem. To remove
|
||||
all items from a hash table, use the loop demonstrated above.
|
||||
|
||||
.. c:function:: itemtype *Z_next(struct Z_head *, itemtype *prev)
|
||||
|
||||
Return the item that follows after ``prev``, or ``NULL`` if ``prev`` is
|
||||
the last item.
|
||||
|
||||
.. warning::
|
||||
|
||||
``prev`` must not be ``NULL``! Use :c:func:`Z_next_safe()` if
|
||||
``prev`` might be ``NULL``.
|
||||
|
||||
.. c:function:: itemtype *Z_next_safe(struct Z_head *, itemtype *prev)
|
||||
|
||||
Same as :c:func:`Z_next()`, except that ``NULL`` is returned if
|
||||
``prev`` is ``NULL``.
|
||||
|
||||
.. c:function:: itemtype *Z_del(struct Z_head *, itemtype *item)
|
||||
|
||||
Remove ``item`` from the list and return it.
|
||||
|
||||
.. note::
|
||||
|
||||
This function's behaviour is undefined if ``item`` is not actually
|
||||
on the list. Some structures return ``NULL`` in this case while others
|
||||
return ``item``. The function may also call ``assert()`` (but most
|
||||
don't.)
|
||||
|
||||
.. todo::
|
||||
|
||||
``Z_del_after()`` / ``Z_del_hint()``?
|
||||
|
||||
API for unsorted structures
|
||||
---------------------------
|
||||
|
||||
Since the insertion position is not pre-defined for unsorted data, there
|
||||
are several functions exposed to insert data:
|
||||
|
||||
.. note::
|
||||
|
||||
``item`` must not be ``NULL`` for any of the following functions.
|
||||
|
||||
.. c:function:: DECLARE_XXX(Z, type, field)
|
||||
|
||||
:param listtype XXX: ``LIST`` or ``ATOMLIST`` to select a data structure
|
||||
implementation.
|
||||
:param token Z: Gives the name prefix that is used for the functions
|
||||
created for this instantiation. ``DECLARE_XXX(foo, ...)``
|
||||
gives ``struct foo_item``, ``foo_add_head()``, ``foo_count()``, etc. Note
|
||||
that this must match the value given in ``PREDECL_XXX(foo)``.
|
||||
:param typename type: Specifies the data type of the list items, e.g.
|
||||
``struct item``. Note that ``struct`` must be added here, it is not
|
||||
automatically added.
|
||||
:param token field: References a struct member of ``type`` that must be
|
||||
typed as ``struct foo_item``. This struct member is used to
|
||||
store "next" pointers or other data structure specific data.
|
||||
|
||||
.. c:function:: void Z_add_head(struct Z_head *, itemtype *item)
|
||||
|
||||
Insert an item at the beginning of the structure, before the first item.
|
||||
This is an O(1) operation for non-atomic lists.
|
||||
|
||||
.. c:function:: void Z_add_tail(struct Z_head *, itemtype *item)
|
||||
|
||||
Insert an item at the end of the structure, after the last item.
|
||||
This is also an O(1) operation for non-atomic lists.
|
||||
|
||||
.. c:function:: void Z_add_after(struct Z_head *, itemtype *after, itemtype *item)
|
||||
|
||||
Insert ``item`` behind ``after``. If ``after`` is ``NULL``, the item is
|
||||
inserted at the beginning of the list as with :c:func:`Z_add_head`.
|
||||
This is also an O(1) operation for non-atomic lists.
|
||||
|
||||
A common pattern is to keep a "previous" pointer around while iterating:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
itemtype *prev = NULL, *item;
|
||||
|
||||
for_each_safe(Z, head, item) {
|
||||
if (something) {
|
||||
Z_add_after(head, prev, item);
|
||||
break;
|
||||
}
|
||||
prev = item;
|
||||
}
|
||||
|
||||
.. todo::
|
||||
|
||||
maybe flip the order of ``item`` & ``after``?
|
||||
``Z_add_after(head, item, after)``
|
||||
|
||||
API for sorted structures
|
||||
-------------------------
|
||||
|
||||
Sorted data structures do not need to have an insertion position specified,
|
||||
therefore the insertion calls are different from unsorted lists. Also,
|
||||
sorted lists can be searched for a value.
|
||||
|
||||
.. c:function:: DECLARE_XXX_UNIQ(Z, type, field, compare_func)
|
||||
|
||||
:param listtype XXX: One of the following:
|
||||
``SORTLIST`` (single-linked sorted list), ``SKIPLIST`` (skiplist),
|
||||
``RBTREE`` (RB-tree) or ``ATOMSORT`` (atomic single-linked list).
|
||||
:param token Z: Gives the name prefix that is used for the functions
|
||||
created for this instantiation. ``DECLARE_XXX(foo, ...)``
|
||||
gives ``struct foo_item``, ``foo_add()``, ``foo_count()``, etc. Note
|
||||
that this must match the value given in ``PREDECL_XXX(foo)``.
|
||||
:param typename type: Specifies the data type of the list items, e.g.
|
||||
``struct item``. Note that ``struct`` must be added here, it is not
|
||||
automatically added.
|
||||
:param token field: References a struct member of ``type`` that must be
|
||||
typed as ``struct foo_item``. This struct member is used to
|
||||
store "next" pointers or other data structure specific data.
|
||||
:param funcptr compare_func: Item comparison function, must have the
|
||||
following function signature:
|
||||
``int function(const itemtype *, const itemtype*)``. This function
|
||||
may be static if the list is only used in one file.
|
||||
|
||||
.. c:function:: DECLARE_XXX_NONUNIQ(Z, type, field, compare_func)
|
||||
|
||||
Same as above, but allow adding multiple items to the list that compare
|
||||
as equal in ``compare_func``. Ordering between these items is undefined
|
||||
and depends on the list implementation.
|
||||
|
||||
.. c:function:: itemtype *Z_add(struct Z_head *, itemtype *item)
|
||||
|
||||
Insert an item at the appropriate sorted position. If another item exists
|
||||
in the list that compares as equal (``compare_func()`` == 0), ``item`` is
|
||||
not inserted into the list and the already-existing item in the list is
|
||||
returned. Otherwise, on successful insertion, ``NULL`` is returned.
|
||||
|
||||
For ``_NONUNIQ`` lists, this function always returns NULL since ``item``
|
||||
can always be successfully added to the list.
|
||||
|
||||
.. c:function:: itemtype *Z_find(struct Z_head *, const itemtype *ref)
|
||||
|
||||
Search the list for an item that compares equal to ``ref``. If no equal
|
||||
item is found, return ``NULL``.
|
||||
|
||||
This function is likely used with a temporary stack-allocated value for
|
||||
``ref`` like so:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
itemtype searchfor = { .foo = 123 };
|
||||
|
||||
itemtype *item = Z_find(head, &searchfor);
|
||||
|
||||
.. note::
|
||||
|
||||
The ``Z_find()`` function is only available for lists that contain
|
||||
unique items (i.e. ``DECLARE_XXX_UNIQ``.) This is because on a list
|
||||
containing non-unique items, more than one item may compare as equal to
|
||||
the item that is searched for.
|
||||
|
||||
.. c:function:: itemtype *Z_find_gteq(struct Z_head *, const itemtype *ref)
|
||||
|
||||
Search the list for an item that compares greater or equal to
|
||||
``ref``. See :c:func:`Z_find()` above.
|
||||
|
||||
.. c:function:: itemtype *Z_find_lt(struct Z_head *, const itemtype *ref)
|
||||
|
||||
Search the list for an item that compares less than
|
||||
``ref``. See :c:func:`Z_find()` above.
|
||||
|
||||
|
||||
API for hash tables
|
||||
-------------------
|
||||
|
||||
.. c:function:: DECLARE_XXX(Z, type, field, compare_func, hash_func)
|
||||
|
||||
:param listtype XXX: Only ``HASH`` is currently available.
|
||||
:param token Z: Gives the name prefix that is used for the functions
|
||||
created for this instantiation. ``DECLARE_XXX(foo, ...)``
|
||||
gives ``struct foo_item``, ``foo_add()``, ``foo_count()``, etc. Note
|
||||
that this must match the value given in ``PREDECL_XXX(foo)``.
|
||||
:param typename type: Specifies the data type of the list items, e.g.
|
||||
``struct item``. Note that ``struct`` must be added here, it is not
|
||||
automatically added.
|
||||
:param token field: References a struct member of ``type`` that must be
|
||||
typed as ``struct foo_item``. This struct member is used to
|
||||
store "next" pointers or other data structure specific data.
|
||||
:param funcptr compare_func: Item comparison function, must have the
|
||||
following function signature:
|
||||
``int function(const itemtype *, const itemtype*)``. This function
|
||||
may be static if the list is only used in one file. For hash tables,
|
||||
this function is only used to check for equality, the ordering is
|
||||
ignored.
|
||||
:param funcptr hash_func: Hash calculation function, must have the
|
||||
following function signature:
|
||||
``uint32_t function(const itemtype *)``. The hash value for items
|
||||
stored in a hash table is cached in each item, so this value need not
|
||||
be cached by the user code.
|
||||
|
||||
.. warning::
|
||||
|
||||
Items that compare as equal cannot be inserted. Refer to the notes
|
||||
about sorted structures in the previous section.
|
||||
|
||||
.. c:function:: void Z_init_size(struct Z_head *, size_t size)
|
||||
|
||||
Same as :c:func:`Z_init()` but preset the minimum hash table to
|
||||
``size``.
|
||||
|
||||
Hash tables also support :c:func:`Z_add()` and :c:func:`Z_find()` with
|
||||
the same semantics as noted above. :c:func:`Z_find_gteq()` and
|
||||
:c:func:`Z_find_lt()` are **not** provided for hash tables.
|
||||
|
||||
|
||||
Atomic lists
|
||||
------------
|
||||
|
||||
`atomlist.h` provides an unsorted and a sorted atomic single-linked list.
|
||||
Since atomic memory accesses can be considerably slower than plain memory
|
||||
accessses (depending on the CPU type), these lists should only be used where
|
||||
neccessary.
|
||||
|
||||
The following guarantees are provided regarding concurrent access:
|
||||
|
||||
- the operations are lock-free but not wait-free.
|
||||
|
||||
Lock-free means that it is impossible for all threads to be blocked. Some
|
||||
thread will always make progress, regardless of what other threads do. (This
|
||||
even includes a random thread being stopped by a debugger in a random
|
||||
location.)
|
||||
|
||||
Wait-free implies that the time any single thread might spend in one of the
|
||||
calls is bounded. This is not provided here since it is not normally
|
||||
relevant to practical operations. What this means is that if some thread is
|
||||
hammering a particular list with requests, it is possible that another
|
||||
thread is blocked for an extended time. The lock-free guarantee still
|
||||
applies since the hammering thread is making progress.
|
||||
|
||||
- without a RCU mechanism in place, the point of contention for atomic lists
|
||||
is memory deallocation. As it is, **a rwlock is required for correct
|
||||
operation**. The *read* lock must be held for all accesses, including
|
||||
reading the list, adding items to the list, and removing items from the
|
||||
list. The *write* lock must be acquired and released before deallocating
|
||||
any list element. If this is not followed, an use-after-free can occur
|
||||
as a MT race condition when an element gets deallocated while another
|
||||
thread is accessing the list.
|
||||
|
||||
.. note::
|
||||
|
||||
The *write* lock does not need to be held for deleting items from the
|
||||
list, and there should not be any instructions between the
|
||||
``pthread_rwlock_wrlock`` and ``pthread_rwlock_unlock``. The write lock
|
||||
is used as a sequence point, not as an exclusion mechanism.
|
||||
|
||||
- insertion operations are always safe to do with the read lock held.
|
||||
Added items are immediately visible after the insertion call returns and
|
||||
should not be touched anymore.
|
||||
|
||||
- when removing a *particular* (pre-determined) item, the caller must ensure
|
||||
that no other thread is attempting to remove that same item. If this cannot
|
||||
be guaranteed by architecture, a separate lock might need to be added.
|
||||
|
||||
- concurrent `pop` calls are always safe to do with only the read lock held.
|
||||
This does not fall under the previous rule since the `pop` call will select
|
||||
the next item if the first is already being removed by another thread.
|
||||
|
||||
**Deallocation locking still applies.** Assume another thread starts
|
||||
reading the list, but gets task-switched by the kernel while reading the
|
||||
first item. `pop` will happily remove and return that item. If it is
|
||||
deallocated without acquiring and releasing the write lock, the other thread
|
||||
will later resume execution and try to access the now-deleted element.
|
||||
|
||||
- the list count should be considered an estimate. Since there might be
|
||||
concurrent insertions or removals in progress, it might already be outdated
|
||||
by the time the call returns. No attempt is made to have it be correct even
|
||||
for a nanosecond.
|
||||
|
||||
Overall, atomic lists are well-suited for MT queues; concurrent insertion,
|
||||
iteration and removal operations will work with the read lock held.
|
||||
|
||||
Code snippets
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
Iteration:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct item *i;
|
||||
|
||||
pthread_rwlock_rdlock(&itemhead_rwlock);
|
||||
for_each(itemlist, &itemhead, i) {
|
||||
/* lock must remain held while iterating */
|
||||
...
|
||||
}
|
||||
pthread_rwlock_unlock(&itemhead_rwlock);
|
||||
|
||||
Head removal (pop) and deallocation:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct item *i;
|
||||
|
||||
pthread_rwlock_rdlock(&itemhead_rwlock);
|
||||
i = itemlist_pop(&itemhead);
|
||||
pthread_rwlock_unlock(&itemhead_rwlock);
|
||||
|
||||
/* i might still be visible for another thread doing an
|
||||
* for_each() (but won't be returned by another pop()) */
|
||||
...
|
||||
|
||||
pthread_rwlock_wrlock(&itemhead_rwlock);
|
||||
pthread_rwlock_unlock(&itemhead_rwlock);
|
||||
/* i now guaranteed to be gone from the list.
|
||||
* note nothing between wrlock() and unlock() */
|
||||
XFREE(MTYPE_ITEM, i);
|
||||
|
||||
FRR lists
|
||||
---------
|
||||
|
||||
.. TODO::
|
||||
|
||||
document
|
||||
|
||||
BSD lists
|
||||
---------
|
||||
|
||||
.. TODO::
|
||||
|
||||
refer to external docs
|
@ -30,6 +30,7 @@ dev_RSTFILES = \
|
||||
doc/developer/include-compile.rst \
|
||||
doc/developer/index.rst \
|
||||
doc/developer/library.rst \
|
||||
doc/developer/lists.rst \
|
||||
doc/developer/logging.rst \
|
||||
doc/developer/maintainer-release-build.rst \
|
||||
doc/developer/memtypes.rst \
|
||||
|
1510
isisd/dict.c
1510
isisd/dict.c
File diff suppressed because it is too large
Load Diff
121
isisd/dict.h
121
isisd/dict.h
@ -1,121 +0,0 @@
|
||||
/*
|
||||
* Dictionary Abstract Data Type
|
||||
* Copyright (C) 1997 Kaz Kylheku <kaz@ashi.footprints.net>
|
||||
*
|
||||
* Free Software License:
|
||||
*
|
||||
* All rights are reserved by the author, with the following exceptions:
|
||||
* Permission is granted to freely reproduce and distribute this software,
|
||||
* possibly in exchange for a fee, provided that this copyright notice appears
|
||||
* intact. Permission is also granted to adapt this software to produce
|
||||
* derivative works, as long as the modified versions carry this copyright
|
||||
* notice and additional notices stating that the work has been modified.
|
||||
* This source code may be translated into executable form and incorporated
|
||||
* into proprietary software; there is no requirement for such software to
|
||||
* contain a copyright notice related to this source.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef DICT_H
|
||||
#define DICT_H
|
||||
|
||||
#include <limits.h>
|
||||
|
||||
/*
|
||||
* Blurb for inclusion into C++ translation units
|
||||
*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef unsigned long dictcount_t;
|
||||
#define DICTCOUNT_T_MAX ULONG_MAX
|
||||
|
||||
/*
|
||||
* The dictionary is implemented as a red-black tree
|
||||
*/
|
||||
|
||||
typedef enum { dnode_red, dnode_black } dnode_color_t;
|
||||
|
||||
typedef struct dnode_t {
|
||||
struct dnode_t *dict_left;
|
||||
struct dnode_t *dict_right;
|
||||
struct dnode_t *dict_parent;
|
||||
dnode_color_t dict_color;
|
||||
const void *dict_key;
|
||||
void *dict_data;
|
||||
} dnode_t;
|
||||
|
||||
typedef int (*dict_comp_t)(const void *, const void *);
|
||||
typedef dnode_t *(*dnode_alloc_t)(void *);
|
||||
typedef void (*dnode_free_t)(dnode_t *, void *);
|
||||
|
||||
typedef struct dict_t {
|
||||
dnode_t dict_nilnode;
|
||||
dictcount_t dict_nodecount;
|
||||
dictcount_t dict_maxcount;
|
||||
dict_comp_t dict_compare;
|
||||
dnode_alloc_t dict_allocnode;
|
||||
dnode_free_t dict_freenode;
|
||||
void *dict_context;
|
||||
int dict_dupes;
|
||||
} dict_t;
|
||||
|
||||
typedef void (*dnode_process_t)(dict_t *, dnode_t *, void *);
|
||||
|
||||
typedef struct dict_load_t {
|
||||
dict_t *dict_dictptr;
|
||||
dnode_t dict_nilnode;
|
||||
} dict_load_t;
|
||||
|
||||
extern dict_t *dict_create(dictcount_t, dict_comp_t);
|
||||
extern void dict_set_allocator(dict_t *, dnode_alloc_t, dnode_free_t, void *);
|
||||
extern void dict_destroy(dict_t *);
|
||||
extern void dict_free_nodes(dict_t *);
|
||||
extern void dict_free(dict_t *);
|
||||
extern dict_t *dict_init(dict_t *, dictcount_t, dict_comp_t);
|
||||
extern void dict_init_like(dict_t *, const dict_t *);
|
||||
extern int dict_verify(dict_t *);
|
||||
extern int dict_similar(const dict_t *, const dict_t *);
|
||||
extern dnode_t *dict_lookup(dict_t *, const void *);
|
||||
extern dnode_t *dict_lower_bound(dict_t *, const void *);
|
||||
extern dnode_t *dict_upper_bound(dict_t *, const void *);
|
||||
extern void dict_insert(dict_t *, dnode_t *, const void *);
|
||||
extern dnode_t *dict_delete(dict_t *, dnode_t *);
|
||||
extern int dict_alloc_insert(dict_t *, const void *, void *);
|
||||
extern void dict_delete_free(dict_t *, dnode_t *);
|
||||
extern dnode_t *dict_first(dict_t *);
|
||||
extern dnode_t *dict_last(dict_t *);
|
||||
extern dnode_t *dict_next(dict_t *, dnode_t *);
|
||||
extern dnode_t *dict_prev(dict_t *, dnode_t *);
|
||||
extern dictcount_t dict_count(dict_t *);
|
||||
extern int dict_isempty(dict_t *);
|
||||
extern int dict_isfull(dict_t *);
|
||||
extern int dict_contains(dict_t *, dnode_t *);
|
||||
extern void dict_allow_dupes(dict_t *);
|
||||
extern int dnode_is_in_a_dict(dnode_t *);
|
||||
extern dnode_t *dnode_create(void *);
|
||||
extern dnode_t *dnode_init(dnode_t *, void *);
|
||||
extern void dnode_destroy(dnode_t *);
|
||||
extern void *dnode_get(dnode_t *);
|
||||
extern const void *dnode_getkey(dnode_t *);
|
||||
extern void dnode_put(dnode_t *, void *);
|
||||
extern void dict_process(dict_t *, void *, dnode_process_t);
|
||||
extern void dict_load_begin(dict_load_t *, dict_t *);
|
||||
extern void dict_load_next(dict_load_t *, dnode_t *, const void *);
|
||||
extern void dict_load_end(dict_load_t *);
|
||||
extern void dict_merge(dict_t *, dict_t *);
|
||||
|
||||
#define dict_isfull(D) ((D)->dict_nodecount == (D)->dict_maxcount)
|
||||
#define dict_count(D) ((D)->dict_nodecount)
|
||||
#define dict_isempty(D) ((D)->dict_nodecount == 0)
|
||||
#define dnode_get(N) ((N)->dict_data)
|
||||
#define dnode_getkey(N) ((N)->dict_key)
|
||||
#define dnode_put(N, X) ((N)->dict_data = (X))
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@ -32,7 +32,6 @@
|
||||
#include "if.h"
|
||||
#include "stream.h"
|
||||
|
||||
#include "isisd/dict.h"
|
||||
#include "isisd/isis_constants.h"
|
||||
#include "isisd/isis_common.h"
|
||||
#include "isisd/isis_flags.h"
|
||||
|
@ -34,7 +34,6 @@
|
||||
#include "if.h"
|
||||
#include "lib_errors.h"
|
||||
|
||||
#include "isisd/dict.h"
|
||||
#include "isisd/isis_constants.h"
|
||||
#include "isisd/isis_common.h"
|
||||
#include "isisd/isis_circuit.h"
|
||||
|
@ -40,7 +40,6 @@
|
||||
#include "qobj.h"
|
||||
#include "lib/northbound_cli.h"
|
||||
|
||||
#include "isisd/dict.h"
|
||||
#include "isisd/isis_constants.h"
|
||||
#include "isisd/isis_common.h"
|
||||
#include "isisd/isis_flags.h"
|
||||
@ -540,7 +539,6 @@ static void isis_circuit_update_all_srmflags(struct isis_circuit *circuit,
|
||||
{
|
||||
struct isis_area *area;
|
||||
struct isis_lsp *lsp;
|
||||
dnode_t *dnode;
|
||||
int level;
|
||||
|
||||
assert(circuit);
|
||||
@ -550,14 +548,10 @@ static void isis_circuit_update_all_srmflags(struct isis_circuit *circuit,
|
||||
if (!(level & circuit->is_type))
|
||||
continue;
|
||||
|
||||
if (!area->lspdb[level - 1]
|
||||
|| !dict_count(area->lspdb[level - 1]))
|
||||
if (!lspdb_count(&area->lspdb[level - 1]))
|
||||
continue;
|
||||
|
||||
for (dnode = dict_first(area->lspdb[level - 1]);
|
||||
dnode != NULL;
|
||||
dnode = dict_next(area->lspdb[level - 1], dnode)) {
|
||||
lsp = dnode_get(dnode);
|
||||
for_each (lspdb, &area->lspdb[level - 1], lsp) {
|
||||
if (is_set) {
|
||||
isis_tx_queue_add(circuit->tx_queue, lsp,
|
||||
TX_LSP_NORMAL);
|
||||
|
@ -32,7 +32,6 @@
|
||||
#include "prefix.h"
|
||||
#include "stream.h"
|
||||
|
||||
#include "isisd/dict.h"
|
||||
#include "isisd/isis_constants.h"
|
||||
#include "isisd/isis_common.h"
|
||||
#include "isisd/isis_flags.h"
|
||||
|
@ -38,7 +38,6 @@
|
||||
#include "if.h"
|
||||
#include "lib_errors.h"
|
||||
|
||||
#include "isisd/dict.h"
|
||||
#include "isisd/isis_constants.h"
|
||||
#include "isisd/isis_common.h"
|
||||
#include "isisd/isis_circuit.h"
|
||||
|
@ -32,7 +32,6 @@
|
||||
#include "stream.h"
|
||||
#include "if.h"
|
||||
|
||||
#include "isisd/dict.h"
|
||||
#include "isisd/isis_constants.h"
|
||||
#include "isisd/isis_common.h"
|
||||
#include "isisd/isis_misc.h"
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include "if.h"
|
||||
#include "thread.h"
|
||||
|
||||
#include "isisd/dict.h"
|
||||
#include "isisd/isis_constants.h"
|
||||
#include "isisd/isis_common.h"
|
||||
#include "isisd/isis_flags.h"
|
||||
|
@ -32,7 +32,6 @@
|
||||
#include "stream.h"
|
||||
#include "table.h"
|
||||
|
||||
#include "isisd/dict.h"
|
||||
#include "isisd/isis_constants.h"
|
||||
#include "isisd/isis_common.h"
|
||||
#include "isisd/isis_flags.h"
|
||||
|
295
isisd/isis_lsp.c
295
isisd/isis_lsp.c
@ -40,7 +40,6 @@
|
||||
#include "srcdest_table.h"
|
||||
#include "lib_errors.h"
|
||||
|
||||
#include "isisd/dict.h"
|
||||
#include "isisd/isis_constants.h"
|
||||
#include "isisd/isis_common.h"
|
||||
#include "isisd/isis_flags.h"
|
||||
@ -63,41 +62,38 @@ static int lsp_refresh(struct thread *thread);
|
||||
static int lsp_l1_refresh_pseudo(struct thread *thread);
|
||||
static int lsp_l2_refresh_pseudo(struct thread *thread);
|
||||
|
||||
static void lsp_destroy(struct isis_lsp *lsp);
|
||||
|
||||
int lsp_id_cmp(uint8_t *id1, uint8_t *id2)
|
||||
{
|
||||
return memcmp(id1, id2, ISIS_SYS_ID_LEN + 2);
|
||||
}
|
||||
|
||||
dict_t *lsp_db_init(void)
|
||||
int lspdb_compare(const struct isis_lsp *a, const struct isis_lsp *b)
|
||||
{
|
||||
dict_t *dict;
|
||||
|
||||
dict = dict_create(DICTCOUNT_T_MAX, (dict_comp_t)lsp_id_cmp);
|
||||
|
||||
return dict;
|
||||
return memcmp(a->hdr.lsp_id, b->hdr.lsp_id, sizeof(a->hdr.lsp_id));
|
||||
}
|
||||
|
||||
struct isis_lsp *lsp_search(uint8_t *id, dict_t *lspdb)
|
||||
void lsp_db_init(struct lspdb_head *head)
|
||||
{
|
||||
dnode_t *node;
|
||||
lspdb_init(head);
|
||||
}
|
||||
|
||||
#ifdef EXTREME_DEBUG
|
||||
dnode_t *dn;
|
||||
void lsp_db_fini(struct lspdb_head *head)
|
||||
{
|
||||
struct isis_lsp *lsp;
|
||||
|
||||
zlog_debug("searching db");
|
||||
for (dn = dict_first(lspdb); dn; dn = dict_next(lspdb, dn)) {
|
||||
zlog_debug("%s\t%pX",
|
||||
rawlspid_print((uint8_t *)dnode_getkey(dn)),
|
||||
dnode_get(dn));
|
||||
}
|
||||
#endif /* EXTREME DEBUG */
|
||||
while ((lsp = lspdb_pop(head)))
|
||||
lsp_destroy(lsp);
|
||||
lspdb_fini(head);
|
||||
}
|
||||
|
||||
node = dict_lookup(lspdb, id);
|
||||
struct isis_lsp *lsp_search(struct lspdb_head *head, const uint8_t *id)
|
||||
{
|
||||
struct isis_lsp searchfor;
|
||||
memcpy(searchfor.hdr.lsp_id, id, sizeof(searchfor.hdr.lsp_id));
|
||||
|
||||
if (node)
|
||||
return (struct isis_lsp *)dnode_get(node);
|
||||
|
||||
return NULL;
|
||||
return lspdb_find(head, &searchfor);
|
||||
}
|
||||
|
||||
static void lsp_clear_data(struct isis_lsp *lsp)
|
||||
@ -109,7 +105,7 @@ static void lsp_clear_data(struct isis_lsp *lsp)
|
||||
lsp->tlvs = NULL;
|
||||
}
|
||||
|
||||
static void lsp_remove_frags(struct list *frags, dict_t *lspdb);
|
||||
static void lsp_remove_frags(struct lspdb_head *head, struct list *frags);
|
||||
|
||||
static void lsp_destroy(struct isis_lsp *lsp)
|
||||
{
|
||||
@ -128,8 +124,8 @@ static void lsp_destroy(struct isis_lsp *lsp)
|
||||
|
||||
if (!LSP_FRAGMENT(lsp->hdr.lsp_id)) {
|
||||
if (lsp->lspu.frags) {
|
||||
lsp_remove_frags(lsp->lspu.frags,
|
||||
lsp->area->lspdb[lsp->level - 1]);
|
||||
lsp_remove_frags(&lsp->area->lspdb[lsp->level - 1],
|
||||
lsp->lspu.frags);
|
||||
list_delete(&lsp->lspu.frags);
|
||||
}
|
||||
} else {
|
||||
@ -148,56 +144,34 @@ static void lsp_destroy(struct isis_lsp *lsp)
|
||||
XFREE(MTYPE_ISIS_LSP, lsp);
|
||||
}
|
||||
|
||||
void lsp_db_destroy(dict_t *lspdb)
|
||||
{
|
||||
dnode_t *dnode, *next;
|
||||
struct isis_lsp *lsp;
|
||||
|
||||
dnode = dict_first(lspdb);
|
||||
while (dnode) {
|
||||
next = dict_next(lspdb, dnode);
|
||||
lsp = dnode_get(dnode);
|
||||
lsp_destroy(lsp);
|
||||
dict_delete_free(lspdb, dnode);
|
||||
dnode = next;
|
||||
}
|
||||
|
||||
dict_free(lspdb);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove all the frags belonging to the given lsp
|
||||
*/
|
||||
static void lsp_remove_frags(struct list *frags, dict_t *lspdb)
|
||||
static void lsp_remove_frags(struct lspdb_head *head, struct list *frags)
|
||||
{
|
||||
dnode_t *dnode;
|
||||
struct listnode *lnode, *lnnode;
|
||||
struct isis_lsp *lsp;
|
||||
|
||||
for (ALL_LIST_ELEMENTS(frags, lnode, lnnode, lsp)) {
|
||||
dnode = dict_lookup(lspdb, lsp->hdr.lsp_id);
|
||||
lsp = lsp_search(head, lsp->hdr.lsp_id);
|
||||
lspdb_del(head, lsp);
|
||||
lsp_destroy(lsp);
|
||||
dnode_destroy(dict_delete(lspdb, dnode));
|
||||
}
|
||||
}
|
||||
|
||||
void lsp_search_and_destroy(uint8_t *id, dict_t *lspdb)
|
||||
void lsp_search_and_destroy(struct lspdb_head *head, const uint8_t *id)
|
||||
{
|
||||
dnode_t *node;
|
||||
struct isis_lsp *lsp;
|
||||
|
||||
node = dict_lookup(lspdb, id);
|
||||
if (node) {
|
||||
node = dict_delete(lspdb, node);
|
||||
lsp = dnode_get(node);
|
||||
lsp = lsp_search(head, id);
|
||||
if (lsp) {
|
||||
lspdb_del(head, lsp);
|
||||
/*
|
||||
* If this is a zero lsp, remove all the frags now
|
||||
*/
|
||||
if (LSP_FRAGMENT(lsp->hdr.lsp_id) == 0) {
|
||||
if (lsp->lspu.frags)
|
||||
lsp_remove_frags(lsp->lspu.frags, lspdb);
|
||||
lsp_remove_frags(head, lsp->lspu.frags);
|
||||
} else {
|
||||
/*
|
||||
* else just remove this frag, from the zero lsps' frag
|
||||
@ -209,7 +183,6 @@ void lsp_search_and_destroy(uint8_t *id, dict_t *lspdb)
|
||||
lsp);
|
||||
}
|
||||
lsp_destroy(lsp);
|
||||
dnode_destroy(node);
|
||||
}
|
||||
}
|
||||
|
||||
@ -514,7 +487,7 @@ void lsp_update(struct isis_lsp *lsp, struct isis_lsp_hdr *hdr,
|
||||
|
||||
memcpy(lspid, lsp->hdr.lsp_id, ISIS_SYS_ID_LEN + 1);
|
||||
LSP_FRAGMENT(lspid) = 0;
|
||||
lsp0 = lsp_search(lspid, area->lspdb[level - 1]);
|
||||
lsp0 = lsp_search(&area->lspdb[level - 1], lspid);
|
||||
if (lsp0)
|
||||
lsp_link_fragment(lsp, lsp0);
|
||||
}
|
||||
@ -582,9 +555,9 @@ struct isis_lsp *lsp_new(struct isis_area *area, uint8_t *lsp_id,
|
||||
return lsp;
|
||||
}
|
||||
|
||||
void lsp_insert(struct isis_lsp *lsp, dict_t *lspdb)
|
||||
void lsp_insert(struct lspdb_head *head, struct isis_lsp *lsp)
|
||||
{
|
||||
dict_alloc_insert(lspdb, lsp->hdr.lsp_id, lsp);
|
||||
lspdb_add(head, lsp);
|
||||
if (lsp->hdr.seqno)
|
||||
isis_spf_schedule(lsp->area, lsp->level);
|
||||
}
|
||||
@ -592,13 +565,16 @@ void lsp_insert(struct isis_lsp *lsp, dict_t *lspdb)
|
||||
/*
|
||||
* Build a list of LSPs with non-zero ht bounded by start and stop ids
|
||||
*/
|
||||
void lsp_build_list_nonzero_ht(uint8_t *start_id, uint8_t *stop_id,
|
||||
struct list *list, dict_t *lspdb)
|
||||
void lsp_build_list_nonzero_ht(struct lspdb_head *head, const uint8_t *start_id,
|
||||
const uint8_t *stop_id, struct list *list)
|
||||
{
|
||||
for (dnode_t *curr = dict_lower_bound(lspdb, start_id);
|
||||
curr; curr = dict_next(lspdb, curr)) {
|
||||
struct isis_lsp *lsp = curr->dict_data;
|
||||
struct isis_lsp searchfor;
|
||||
struct isis_lsp *lsp, *start;
|
||||
|
||||
memcpy(&searchfor.hdr.lsp_id, start_id, sizeof(searchfor.hdr.lsp_id));
|
||||
|
||||
start = lspdb_find_gteq(head, &searchfor);
|
||||
for_each_from (lspdb, head, lsp, start) {
|
||||
if (memcmp(lsp->hdr.lsp_id, stop_id,
|
||||
ISIS_SYS_ID_LEN + 2) > 0)
|
||||
break;
|
||||
@ -699,26 +675,20 @@ void lsp_print_detail(struct isis_lsp *lsp, struct vty *vty, char dynhost)
|
||||
}
|
||||
|
||||
/* print all the lsps info in the local lspdb */
|
||||
int lsp_print_all(struct vty *vty, dict_t *lspdb, char detail, char dynhost)
|
||||
int lsp_print_all(struct vty *vty, struct lspdb_head *head, char detail,
|
||||
char dynhost)
|
||||
{
|
||||
|
||||
dnode_t *node = dict_first(lspdb), *next;
|
||||
struct isis_lsp *lsp;
|
||||
int lsp_count = 0;
|
||||
|
||||
if (detail == ISIS_UI_LEVEL_BRIEF) {
|
||||
while (node != NULL) {
|
||||
/* I think it is unnecessary, so I comment it out */
|
||||
/* dict_contains (lspdb, node); */
|
||||
next = dict_next(lspdb, node);
|
||||
lsp_print(dnode_get(node), vty, dynhost);
|
||||
node = next;
|
||||
for_each (lspdb, head, lsp) {
|
||||
lsp_print(lsp, vty, dynhost);
|
||||
lsp_count++;
|
||||
}
|
||||
} else if (detail == ISIS_UI_LEVEL_DETAIL) {
|
||||
while (node != NULL) {
|
||||
next = dict_next(lspdb, node);
|
||||
lsp_print_detail(dnode_get(node), vty, dynhost);
|
||||
node = next;
|
||||
for_each (lspdb, head, lsp) {
|
||||
lsp_print_detail(lsp, vty, dynhost);
|
||||
lsp_count++;
|
||||
}
|
||||
}
|
||||
@ -847,7 +817,7 @@ static struct isis_lsp *lsp_next_frag(uint8_t frag_num, struct isis_lsp *lsp0,
|
||||
memcpy(frag_id, lsp0->hdr.lsp_id, ISIS_SYS_ID_LEN + 1);
|
||||
LSP_FRAGMENT(frag_id) = frag_num;
|
||||
|
||||
lsp = lsp_search(frag_id, area->lspdb[level - 1]);
|
||||
lsp = lsp_search(&area->lspdb[level - 1], frag_id);
|
||||
if (lsp) {
|
||||
lsp_clear_data(lsp);
|
||||
if (!lsp->lspu.zero_lsp)
|
||||
@ -860,7 +830,7 @@ static struct isis_lsp *lsp_next_frag(uint8_t frag_num, struct isis_lsp *lsp0,
|
||||
area->attached_bit),
|
||||
0, lsp0, level);
|
||||
lsp->own_lsp = 1;
|
||||
lsp_insert(lsp, area->lspdb[level - 1]);
|
||||
lsp_insert(&area->lspdb[level - 1], lsp);
|
||||
return lsp;
|
||||
}
|
||||
|
||||
@ -1228,12 +1198,12 @@ int lsp_generate(struct isis_area *area, int level)
|
||||
memcpy(&lspid, isis->sysid, ISIS_SYS_ID_LEN);
|
||||
|
||||
/* only builds the lsp if the area shares the level */
|
||||
oldlsp = lsp_search(lspid, area->lspdb[level - 1]);
|
||||
oldlsp = lsp_search(&area->lspdb[level - 1], lspid);
|
||||
if (oldlsp) {
|
||||
/* FIXME: we should actually initiate a purge */
|
||||
seq_num = oldlsp->hdr.seqno;
|
||||
lsp_search_and_destroy(oldlsp->hdr.lsp_id,
|
||||
area->lspdb[level - 1]);
|
||||
lsp_search_and_destroy(&area->lspdb[level - 1],
|
||||
oldlsp->hdr.lsp_id);
|
||||
}
|
||||
rem_lifetime = lsp_rem_lifetime(area, level);
|
||||
newlsp =
|
||||
@ -1243,7 +1213,7 @@ int lsp_generate(struct isis_area *area, int level)
|
||||
newlsp->area = area;
|
||||
newlsp->own_lsp = 1;
|
||||
|
||||
lsp_insert(newlsp, area->lspdb[level - 1]);
|
||||
lsp_insert(&area->lspdb[level - 1], newlsp);
|
||||
/* build_lsp_data (newlsp, area); */
|
||||
lsp_build(newlsp, area);
|
||||
/* time to calculate our checksum */
|
||||
@ -1288,7 +1258,7 @@ int lsp_generate(struct isis_area *area, int level)
|
||||
*/
|
||||
static int lsp_regenerate(struct isis_area *area, int level)
|
||||
{
|
||||
dict_t *lspdb;
|
||||
struct lspdb_head *head;
|
||||
struct isis_lsp *lsp, *frag;
|
||||
struct listnode *node;
|
||||
uint8_t lspid[ISIS_SYS_ID_LEN + 2];
|
||||
@ -1297,12 +1267,12 @@ static int lsp_regenerate(struct isis_area *area, int level)
|
||||
if ((area == NULL) || (area->is_type & level) != level)
|
||||
return ISIS_ERROR;
|
||||
|
||||
lspdb = area->lspdb[level - 1];
|
||||
head = &area->lspdb[level - 1];
|
||||
|
||||
memset(lspid, 0, ISIS_SYS_ID_LEN + 2);
|
||||
memcpy(lspid, isis->sysid, ISIS_SYS_ID_LEN);
|
||||
|
||||
lsp = lsp_search(lspid, lspdb);
|
||||
lsp = lsp_search(head, lspid);
|
||||
|
||||
if (!lsp) {
|
||||
flog_err(EC_LIB_DEVELOPMENT,
|
||||
@ -1445,7 +1415,7 @@ int _lsp_regenerate_schedule(struct isis_area *area, int level,
|
||||
continue;
|
||||
}
|
||||
|
||||
lsp = lsp_search(id, area->lspdb[lvl - 1]);
|
||||
lsp = lsp_search(&area->lspdb[lvl - 1], id);
|
||||
if (!lsp) {
|
||||
sched_debug(
|
||||
"ISIS (%s): We do not have any LSPs to regenerate, nothing todo.",
|
||||
@ -1597,7 +1567,7 @@ static void lsp_build_pseudo(struct isis_lsp *lsp, struct isis_circuit *circuit,
|
||||
|
||||
int lsp_generate_pseudo(struct isis_circuit *circuit, int level)
|
||||
{
|
||||
dict_t *lspdb = circuit->area->lspdb[level - 1];
|
||||
struct lspdb_head *head = &circuit->area->lspdb[level - 1];
|
||||
struct isis_lsp *lsp;
|
||||
uint8_t lsp_id[ISIS_SYS_ID_LEN + 2];
|
||||
uint16_t rem_lifetime, refresh_time;
|
||||
@ -1615,7 +1585,7 @@ int lsp_generate_pseudo(struct isis_circuit *circuit, int level)
|
||||
/*
|
||||
* If for some reason have a pseudo LSP in the db already -> regenerate
|
||||
*/
|
||||
if (lsp_search(lsp_id, lspdb))
|
||||
if (lsp_search(head, lsp_id))
|
||||
return lsp_regenerate_schedule_pseudo(circuit, level);
|
||||
|
||||
rem_lifetime = lsp_rem_lifetime(circuit->area, level);
|
||||
@ -1628,7 +1598,7 @@ int lsp_generate_pseudo(struct isis_circuit *circuit, int level)
|
||||
lsp_build_pseudo(lsp, circuit, level);
|
||||
lsp_pack_pdu(lsp);
|
||||
lsp->own_lsp = 1;
|
||||
lsp_insert(lsp, lspdb);
|
||||
lsp_insert(head, lsp);
|
||||
lsp_flood(lsp, NULL);
|
||||
|
||||
refresh_time = lsp_refresh_time(lsp, rem_lifetime);
|
||||
@ -1659,7 +1629,7 @@ int lsp_generate_pseudo(struct isis_circuit *circuit, int level)
|
||||
|
||||
static int lsp_regenerate_pseudo(struct isis_circuit *circuit, int level)
|
||||
{
|
||||
dict_t *lspdb = circuit->area->lspdb[level - 1];
|
||||
struct lspdb_head *head = &circuit->area->lspdb[level - 1];
|
||||
struct isis_lsp *lsp;
|
||||
uint8_t lsp_id[ISIS_SYS_ID_LEN + 2];
|
||||
uint16_t rem_lifetime, refresh_time;
|
||||
@ -1674,7 +1644,7 @@ static int lsp_regenerate_pseudo(struct isis_circuit *circuit, int level)
|
||||
LSP_PSEUDO_ID(lsp_id) = circuit->circuit_id;
|
||||
LSP_FRAGMENT(lsp_id) = 0;
|
||||
|
||||
lsp = lsp_search(lsp_id, lspdb);
|
||||
lsp = lsp_search(head, lsp_id);
|
||||
|
||||
if (!lsp) {
|
||||
flog_err(EC_LIB_DEVELOPMENT,
|
||||
@ -1813,7 +1783,7 @@ int lsp_regenerate_schedule_pseudo(struct isis_circuit *circuit, int level)
|
||||
continue;
|
||||
}
|
||||
|
||||
lsp = lsp_search(lsp_id, circuit->area->lspdb[lvl - 1]);
|
||||
lsp = lsp_search(&circuit->area->lspdb[lvl - 1], lsp_id);
|
||||
if (!lsp) {
|
||||
sched_debug(
|
||||
"ISIS (%s): Pseudonode LSP does not exist yet, nothing to regenerate.",
|
||||
@ -1869,7 +1839,6 @@ int lsp_tick(struct thread *thread)
|
||||
{
|
||||
struct isis_area *area;
|
||||
struct isis_lsp *lsp;
|
||||
dnode_t *dnode, *dnode_next;
|
||||
int level;
|
||||
uint16_t rem_lifetime;
|
||||
bool fabricd_sync_incomplete = false;
|
||||
@ -1885,83 +1854,69 @@ int lsp_tick(struct thread *thread)
|
||||
* Remove LSPs which have aged out
|
||||
*/
|
||||
for (level = 0; level < ISIS_LEVELS; level++) {
|
||||
if (area->lspdb[level] && dict_count(area->lspdb[level]) > 0) {
|
||||
for (dnode = dict_first(area->lspdb[level]);
|
||||
dnode != NULL; dnode = dnode_next) {
|
||||
dnode_next =
|
||||
dict_next(area->lspdb[level], dnode);
|
||||
lsp = dnode_get(dnode);
|
||||
struct isis_lsp *next = lspdb_first(&area->lspdb[level]);
|
||||
for_each_from (lspdb, &area->lspdb[level], lsp, next) {
|
||||
/*
|
||||
* The lsp rem_lifetime is kept at 0 for MaxAge
|
||||
* or
|
||||
* ZeroAgeLifetime depending on explicit purge
|
||||
* or
|
||||
* natural age out. So schedule spf only once
|
||||
* when
|
||||
* the first time rem_lifetime becomes 0.
|
||||
*/
|
||||
rem_lifetime = lsp->hdr.rem_lifetime;
|
||||
lsp_set_time(lsp);
|
||||
|
||||
/*
|
||||
* The lsp rem_lifetime is kept at 0 for MaxAge
|
||||
* or
|
||||
* ZeroAgeLifetime depending on explicit purge
|
||||
* or
|
||||
* natural age out. So schedule spf only once
|
||||
* when
|
||||
* the first time rem_lifetime becomes 0.
|
||||
/*
|
||||
* Schedule may run spf which should be done
|
||||
* only after
|
||||
* the lsp rem_lifetime becomes 0 for the first
|
||||
* time.
|
||||
* ISO 10589 - 7.3.16.4 first paragraph.
|
||||
*/
|
||||
if (rem_lifetime == 1 && lsp->hdr.seqno != 0) {
|
||||
/* 7.3.16.4 a) set SRM flags on all */
|
||||
/* 7.3.16.4 b) retain only the header */
|
||||
if (lsp->area->purge_originator)
|
||||
lsp_purge(lsp, lsp->level, NULL);
|
||||
else
|
||||
lsp_flood(lsp, NULL);
|
||||
/* 7.3.16.4 c) record the time to purge
|
||||
* FIXME */
|
||||
isis_spf_schedule(lsp->area, lsp->level);
|
||||
}
|
||||
|
||||
if (lsp->age_out == 0) {
|
||||
zlog_debug(
|
||||
"ISIS-Upd (%s): L%u LSP %s seq "
|
||||
"0x%08" PRIx32 " aged out",
|
||||
area->area_tag, lsp->level,
|
||||
rawlspid_print(lsp->hdr.lsp_id),
|
||||
lsp->hdr.seqno);
|
||||
|
||||
/* if we're aging out fragment 0, lsp_destroy()
|
||||
* below will delete all other fragments too,
|
||||
* so we need to skip over those
|
||||
*/
|
||||
rem_lifetime = lsp->hdr.rem_lifetime;
|
||||
lsp_set_time(lsp);
|
||||
if (!LSP_FRAGMENT(lsp->hdr.lsp_id))
|
||||
while (next &&
|
||||
!memcmp(next->hdr.lsp_id,
|
||||
lsp->hdr.lsp_id,
|
||||
ISIS_SYS_ID_LEN + 1))
|
||||
next = lspdb_next(
|
||||
&area->lspdb[level],
|
||||
next);
|
||||
|
||||
/*
|
||||
* Schedule may run spf which should be done
|
||||
* only after
|
||||
* the lsp rem_lifetime becomes 0 for the first
|
||||
* time.
|
||||
* ISO 10589 - 7.3.16.4 first paragraph.
|
||||
*/
|
||||
if (rem_lifetime == 1 && lsp->hdr.seqno != 0) {
|
||||
/* 7.3.16.4 a) set SRM flags on all */
|
||||
/* 7.3.16.4 b) retain only the header */
|
||||
if (lsp->area->purge_originator)
|
||||
lsp_purge(lsp, lsp->level, NULL);
|
||||
else
|
||||
lsp_flood(lsp, NULL);
|
||||
/* 7.3.16.4 c) record the time to purge
|
||||
* FIXME */
|
||||
isis_spf_schedule(lsp->area, lsp->level);
|
||||
}
|
||||
lspdb_del(&area->lspdb[level], lsp);
|
||||
lsp_destroy(lsp);
|
||||
lsp = NULL;
|
||||
}
|
||||
|
||||
if (lsp->age_out == 0) {
|
||||
zlog_debug(
|
||||
"ISIS-Upd (%s): L%u LSP %s seq "
|
||||
"0x%08" PRIx32 " aged out",
|
||||
area->area_tag, lsp->level,
|
||||
rawlspid_print(lsp->hdr.lsp_id),
|
||||
lsp->hdr.seqno);
|
||||
|
||||
/* if we're aging out fragment 0,
|
||||
* lsp_destroy() below will delete all
|
||||
* other fragments too, so we need to
|
||||
* skip over those
|
||||
*/
|
||||
while (!LSP_FRAGMENT(lsp->hdr.lsp_id)
|
||||
&& dnode_next) {
|
||||
struct isis_lsp *nextlsp;
|
||||
|
||||
nextlsp = dnode_get(dnode_next);
|
||||
if (memcmp(nextlsp->hdr.lsp_id,
|
||||
lsp->hdr.lsp_id,
|
||||
ISIS_SYS_ID_LEN + 1))
|
||||
break;
|
||||
|
||||
dnode_next = dict_next(
|
||||
area->lspdb[level],
|
||||
dnode_next);
|
||||
}
|
||||
|
||||
lsp_destroy(lsp);
|
||||
lsp = NULL;
|
||||
dict_delete_free(area->lspdb[level],
|
||||
dnode);
|
||||
}
|
||||
|
||||
if (fabricd_init_c && lsp) {
|
||||
fabricd_sync_incomplete |=
|
||||
ISIS_CHECK_FLAG(lsp->SSNflags,
|
||||
fabricd_init_c);
|
||||
}
|
||||
if (fabricd_init_c && lsp) {
|
||||
fabricd_sync_incomplete |=
|
||||
ISIS_CHECK_FLAG(lsp->SSNflags,
|
||||
fabricd_init_c);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1979,7 +1934,7 @@ void lsp_purge_pseudo(uint8_t *id, struct isis_circuit *circuit, int level)
|
||||
{
|
||||
struct isis_lsp *lsp;
|
||||
|
||||
lsp = lsp_search(id, circuit->area->lspdb[level - 1]);
|
||||
lsp = lsp_search(&circuit->area->lspdb[level - 1], id);
|
||||
if (!lsp)
|
||||
return;
|
||||
|
||||
@ -2012,7 +1967,7 @@ void lsp_purge_non_exist(int level, struct isis_lsp_hdr *hdr,
|
||||
|
||||
lsp_pack_pdu(lsp);
|
||||
|
||||
lsp_insert(lsp, area->lspdb[lsp->level - 1]);
|
||||
lsp_insert(&area->lspdb[lsp->level - 1], lsp);
|
||||
lsp_flood(lsp, NULL);
|
||||
|
||||
return;
|
||||
|
@ -24,13 +24,18 @@
|
||||
#ifndef _ZEBRA_ISIS_LSP_H
|
||||
#define _ZEBRA_ISIS_LSP_H
|
||||
|
||||
#include "lib/typesafe.h"
|
||||
#include "isisd/isis_pdu.h"
|
||||
|
||||
PREDECL_RBTREE_UNIQ(lspdb)
|
||||
|
||||
/* Structure for isis_lsp, this structure will only support the fixed
|
||||
* System ID (Currently 6) (atleast for now). In order to support more
|
||||
* We will have to split the header into two parts, and for readability
|
||||
* sake it should better be avoided */
|
||||
struct isis_lsp {
|
||||
struct lspdb_item dbe;
|
||||
|
||||
struct isis_lsp_hdr hdr;
|
||||
struct stream *pdu; /* full pdu lsp */
|
||||
union {
|
||||
@ -54,8 +59,11 @@ struct isis_lsp {
|
||||
bool flooding_circuit_scoped;
|
||||
};
|
||||
|
||||
dict_t *lsp_db_init(void);
|
||||
void lsp_db_destroy(dict_t *lspdb);
|
||||
extern int lspdb_compare(const struct isis_lsp *a, const struct isis_lsp *b);
|
||||
DECLARE_RBTREE_UNIQ(lspdb, struct isis_lsp, dbe, lspdb_compare)
|
||||
|
||||
void lsp_db_init(struct lspdb_head *head);
|
||||
void lsp_db_fini(struct lspdb_head *head);
|
||||
int lsp_tick(struct thread *thread);
|
||||
|
||||
int lsp_generate(struct isis_area *area, int level);
|
||||
@ -76,14 +84,16 @@ struct isis_lsp *lsp_new_from_recv(struct isis_lsp_hdr *hdr,
|
||||
struct isis_tlvs *tlvs,
|
||||
struct stream *stream, struct isis_lsp *lsp0,
|
||||
struct isis_area *area, int level);
|
||||
void lsp_insert(struct isis_lsp *lsp, dict_t *lspdb);
|
||||
struct isis_lsp *lsp_search(uint8_t *id, dict_t *lspdb);
|
||||
void lsp_insert(struct lspdb_head *head, struct isis_lsp *lsp);
|
||||
struct isis_lsp *lsp_search(struct lspdb_head *head, const uint8_t *id);
|
||||
|
||||
void lsp_build_list(uint8_t *start_id, uint8_t *stop_id, uint8_t num_lsps,
|
||||
struct list *list, dict_t *lspdb);
|
||||
void lsp_build_list_nonzero_ht(uint8_t *start_id, uint8_t *stop_id,
|
||||
struct list *list, dict_t *lspdb);
|
||||
void lsp_search_and_destroy(uint8_t *id, dict_t *lspdb);
|
||||
void lsp_build_list(struct lspdb_head *head, const uint8_t *start_id,
|
||||
const uint8_t *stop_id, uint8_t num_lsps,
|
||||
struct list *list);
|
||||
void lsp_build_list_nonzero_ht(struct lspdb_head *head,
|
||||
const uint8_t *start_id,
|
||||
const uint8_t *stop_id, struct list *list);
|
||||
void lsp_search_and_destroy(struct lspdb_head *head, const uint8_t *id);
|
||||
void lsp_purge_pseudo(uint8_t *id, struct isis_circuit *circuit, int level);
|
||||
void lsp_purge_non_exist(int level, struct isis_lsp_hdr *hdr,
|
||||
struct isis_area *area);
|
||||
@ -108,7 +118,8 @@ void lsp_inc_seqno(struct isis_lsp *lsp, uint32_t seqno);
|
||||
void lspid_print(uint8_t *lsp_id, char *dest, char dynhost, char frag);
|
||||
void lsp_print(struct isis_lsp *lsp, struct vty *vty, char dynhost);
|
||||
void lsp_print_detail(struct isis_lsp *lsp, struct vty *vty, char dynhost);
|
||||
int lsp_print_all(struct vty *vty, dict_t *lspdb, char detail, char dynhost);
|
||||
int lsp_print_all(struct vty *vty, struct lspdb_head *head, char detail,
|
||||
char dynhost);
|
||||
/* sets SRMflags for all active circuits of an lsp */
|
||||
void lsp_set_all_srmflags(struct isis_lsp *lsp, bool set);
|
||||
|
||||
|
@ -41,7 +41,6 @@
|
||||
#include "qobj.h"
|
||||
#include "libfrr.h"
|
||||
|
||||
#include "isisd/dict.h"
|
||||
#include "isisd/isis_constants.h"
|
||||
#include "isisd/isis_common.h"
|
||||
#include "isisd/isis_flags.h"
|
||||
|
@ -29,7 +29,6 @@
|
||||
#include "if.h"
|
||||
#include "command.h"
|
||||
|
||||
#include "isisd/dict.h"
|
||||
#include "isisd/isis_constants.h"
|
||||
#include "isisd/isis_common.h"
|
||||
#include "isisd/isis_flags.h"
|
||||
|
@ -25,7 +25,6 @@
|
||||
#include "libfrr.h"
|
||||
#include "linklist.h"
|
||||
#include "log.h"
|
||||
#include "isisd/dict.h"
|
||||
#include "isisd/isis_constants.h"
|
||||
#include "isisd/isis_common.h"
|
||||
#include "isisd/isis_flags.h"
|
||||
|
@ -36,7 +36,6 @@
|
||||
#include "md5.h"
|
||||
#include "lib_errors.h"
|
||||
|
||||
#include "isisd/dict.h"
|
||||
#include "isisd/isis_constants.h"
|
||||
#include "isisd/isis_common.h"
|
||||
#include "isisd/isis_flags.h"
|
||||
@ -960,7 +959,7 @@ static int process_lsp(uint8_t pdu_type, struct isis_circuit *circuit,
|
||||
/* Find the LSP in our database and compare it to this Link State header
|
||||
*/
|
||||
struct isis_lsp *lsp =
|
||||
lsp_search(hdr.lsp_id, circuit->area->lspdb[level - 1]);
|
||||
lsp_search(&circuit->area->lspdb[level - 1], hdr.lsp_id);
|
||||
int comp = 0;
|
||||
if (lsp)
|
||||
comp = lsp_compare(circuit->area->area_tag, lsp, hdr.seqno,
|
||||
@ -1187,7 +1186,7 @@ dontcheckadj:
|
||||
memcpy(lspid, hdr.lsp_id, ISIS_SYS_ID_LEN + 1);
|
||||
LSP_FRAGMENT(lspid) = 0;
|
||||
lsp0 = lsp_search(
|
||||
lspid, circuit->area->lspdb[level - 1]);
|
||||
&circuit->area->lspdb[level - 1], lspid);
|
||||
if (!lsp0) {
|
||||
zlog_debug(
|
||||
"Got lsp frag, while zero lsp not in database");
|
||||
@ -1200,8 +1199,8 @@ dontcheckadj:
|
||||
&hdr, tlvs, circuit->rcv_stream, lsp0,
|
||||
circuit->area, level);
|
||||
tlvs = NULL;
|
||||
lsp_insert(lsp,
|
||||
circuit->area->lspdb[level - 1]);
|
||||
lsp_insert(&circuit->area->lspdb[level - 1],
|
||||
lsp);
|
||||
} else /* exists, so we overwrite */
|
||||
{
|
||||
lsp_update(lsp, &hdr, tlvs, circuit->rcv_stream,
|
||||
@ -1417,7 +1416,7 @@ static int process_snp(uint8_t pdu_type, struct isis_circuit *circuit,
|
||||
for (struct isis_lsp_entry *entry = entry_head; entry;
|
||||
entry = entry->next) {
|
||||
struct isis_lsp *lsp =
|
||||
lsp_search(entry->id, circuit->area->lspdb[level - 1]);
|
||||
lsp_search(&circuit->area->lspdb[level - 1], entry->id);
|
||||
bool own_lsp = !memcmp(entry->id, isis->sysid, ISIS_SYS_ID_LEN);
|
||||
if (lsp) {
|
||||
/* 7.3.15.2 b) 1) is this LSP newer */
|
||||
@ -1468,8 +1467,8 @@ static int process_snp(uint8_t pdu_type, struct isis_circuit *circuit,
|
||||
ISIS_SYS_ID_LEN + 1);
|
||||
LSP_FRAGMENT(lspid) = 0;
|
||||
lsp0 = lsp_search(
|
||||
lspid,
|
||||
circuit->area->lspdb[level - 1]);
|
||||
&circuit->area->lspdb[level - 1],
|
||||
lspid);
|
||||
if (!lsp0) {
|
||||
zlog_debug("Got lsp frag in snp, while zero not in database");
|
||||
continue;
|
||||
@ -1478,8 +1477,8 @@ static int process_snp(uint8_t pdu_type, struct isis_circuit *circuit,
|
||||
lsp = lsp_new(circuit->area, entry->id,
|
||||
entry->rem_lifetime, 0, 0,
|
||||
entry->checksum, lsp0, level);
|
||||
lsp_insert(lsp,
|
||||
circuit->area->lspdb[level - 1]);
|
||||
lsp_insert(&circuit->area->lspdb[level - 1],
|
||||
lsp);
|
||||
|
||||
lsp_set_all_srmflags(lsp, false);
|
||||
ISIS_SET_FLAG(lsp->SSNflags, circuit);
|
||||
@ -1496,8 +1495,8 @@ static int process_snp(uint8_t pdu_type, struct isis_circuit *circuit,
|
||||
* start_lsp_id and stop_lsp_id
|
||||
*/
|
||||
struct list *lsp_list = list_new();
|
||||
lsp_build_list_nonzero_ht(start_lsp_id, stop_lsp_id, lsp_list,
|
||||
circuit->area->lspdb[level - 1]);
|
||||
lsp_build_list_nonzero_ht(&circuit->area->lspdb[level - 1],
|
||||
start_lsp_id, stop_lsp_id, lsp_list);
|
||||
|
||||
/* Fixme: Find a better solution */
|
||||
struct listnode *node, *nnode;
|
||||
@ -2041,8 +2040,7 @@ static uint16_t get_max_lsp_count(uint16_t size)
|
||||
|
||||
int send_csnp(struct isis_circuit *circuit, int level)
|
||||
{
|
||||
if (circuit->area->lspdb[level - 1] == NULL
|
||||
|| dict_count(circuit->area->lspdb[level - 1]) == 0)
|
||||
if (lspdb_count(&circuit->area->lspdb[level - 1]) == 0)
|
||||
return ISIS_OK;
|
||||
|
||||
uint8_t pdu_type = (level == ISIS_LEVEL1) ? L1_COMPLETE_SEQ_NUM
|
||||
@ -2095,7 +2093,7 @@ int send_csnp(struct isis_circuit *circuit, int level)
|
||||
|
||||
struct isis_lsp *last_lsp;
|
||||
isis_tlvs_add_csnp_entries(tlvs, start, stop, num_lsps,
|
||||
circuit->area->lspdb[level - 1],
|
||||
&circuit->area->lspdb[level - 1],
|
||||
&last_lsp);
|
||||
/*
|
||||
* Update the stop lsp_id before encoding this CSNP.
|
||||
@ -2216,8 +2214,7 @@ static int send_psnp(int level, struct isis_circuit *circuit)
|
||||
&& circuit->u.bc.is_dr[level - 1])
|
||||
return ISIS_OK;
|
||||
|
||||
if (circuit->area->lspdb[level - 1] == NULL
|
||||
|| dict_count(circuit->area->lspdb[level - 1]) == 0)
|
||||
if (lspdb_count(&circuit->area->lspdb[level - 1]) == 0)
|
||||
return ISIS_OK;
|
||||
|
||||
if (!circuit->snd_stream)
|
||||
@ -2255,16 +2252,13 @@ static int send_psnp(int level, struct isis_circuit *circuit)
|
||||
get_max_lsp_count(STREAM_WRITEABLE(circuit->snd_stream));
|
||||
|
||||
while (1) {
|
||||
struct isis_lsp *lsp;
|
||||
|
||||
tlvs = isis_alloc_tlvs();
|
||||
if (CHECK_FLAG(passwd->snp_auth, SNP_AUTH_SEND))
|
||||
isis_tlvs_add_auth(tlvs, passwd);
|
||||
|
||||
for (dnode_t *dnode =
|
||||
dict_first(circuit->area->lspdb[level - 1]);
|
||||
dnode; dnode = dict_next(circuit->area->lspdb[level - 1],
|
||||
dnode)) {
|
||||
struct isis_lsp *lsp = dnode_get(dnode);
|
||||
|
||||
for_each (lspdb, &circuit->area->lspdb[level - 1], lsp) {
|
||||
if (ISIS_CHECK_FLAG(lsp->SSNflags, circuit))
|
||||
isis_tlvs_add_lsp_entry(tlvs, lsp);
|
||||
|
||||
|
@ -33,7 +33,6 @@
|
||||
#include "if.h"
|
||||
#include "lib_errors.h"
|
||||
|
||||
#include "isisd/dict.h"
|
||||
#include "isisd/isis_constants.h"
|
||||
#include "isisd/isis_common.h"
|
||||
#include "isisd/isis_circuit.h"
|
||||
|
@ -32,7 +32,6 @@
|
||||
#include "vty.h"
|
||||
#include "srcdest_table.h"
|
||||
|
||||
#include "isisd/dict.h"
|
||||
#include "isisd/isis_constants.h"
|
||||
#include "isisd/isis_common.h"
|
||||
#include "isisd/isis_flags.h"
|
||||
|
@ -38,7 +38,6 @@
|
||||
#include "isis_constants.h"
|
||||
#include "isis_common.h"
|
||||
#include "isis_flags.h"
|
||||
#include "dict.h"
|
||||
#include "isisd.h"
|
||||
#include "isis_misc.h"
|
||||
#include "isis_adjacency.h"
|
||||
|
@ -37,7 +37,6 @@
|
||||
#include "isis_constants.h"
|
||||
#include "isis_common.h"
|
||||
#include "isis_flags.h"
|
||||
#include "dict.h"
|
||||
#include "isisd.h"
|
||||
#include "isis_misc.h"
|
||||
#include "isis_adjacency.h"
|
||||
|
@ -39,7 +39,6 @@
|
||||
#include "isis_constants.h"
|
||||
#include "isis_common.h"
|
||||
#include "isis_flags.h"
|
||||
#include "dict.h"
|
||||
#include "isisd.h"
|
||||
#include "isis_misc.h"
|
||||
#include "isis_adjacency.h"
|
||||
@ -313,7 +312,7 @@ static struct isis_lsp *isis_root_system_lsp(struct isis_area *area, int level,
|
||||
memcpy(lspid, sysid, ISIS_SYS_ID_LEN);
|
||||
LSP_PSEUDO_ID(lspid) = 0;
|
||||
LSP_FRAGMENT(lspid) = 0;
|
||||
lsp = lsp_search(lspid, area->lspdb[level - 1]);
|
||||
lsp = lsp_search(&area->lspdb[level - 1], lspid);
|
||||
if (lsp && lsp->hdr.rem_lifetime != 0)
|
||||
return lsp;
|
||||
return NULL;
|
||||
@ -870,10 +869,8 @@ static int isis_spf_preload_tent(struct isis_spftree *spftree,
|
||||
[spftree->level - 1],
|
||||
parent);
|
||||
lsp = lsp_search(
|
||||
lsp_id,
|
||||
spftree->area
|
||||
->lspdb[spftree->level
|
||||
- 1]);
|
||||
&spftree->area->lspdb[spftree->level- 1],
|
||||
lsp_id);
|
||||
if (lsp == NULL
|
||||
|| lsp->hdr.rem_lifetime == 0)
|
||||
zlog_warn(
|
||||
@ -923,8 +920,8 @@ static int isis_spf_preload_tent(struct isis_spftree *spftree,
|
||||
continue;
|
||||
}
|
||||
lsp = lsp_search(
|
||||
lsp_id,
|
||||
spftree->area->lspdb[spftree->level - 1]);
|
||||
&spftree->area->lspdb[spftree->level - 1],
|
||||
lsp_id);
|
||||
if (lsp == NULL || lsp->hdr.rem_lifetime == 0) {
|
||||
zlog_warn(
|
||||
"ISIS-Spf: No lsp (%p) found from root "
|
||||
|
@ -347,8 +347,8 @@ static struct isis_lsp *lsp_for_vertex(struct isis_spftree *spftree,
|
||||
memcpy(lsp_id, vertex->N.id, ISIS_SYS_ID_LEN + 1);
|
||||
LSP_FRAGMENT(lsp_id) = 0;
|
||||
|
||||
dict_t *lspdb = spftree->area->lspdb[spftree->level - 1];
|
||||
struct isis_lsp *lsp = lsp_search(lsp_id, lspdb);
|
||||
struct lspdb_head *lspdb = &spftree->area->lspdb[spftree->level - 1];
|
||||
struct isis_lsp *lsp = lsp_search(lspdb, lsp_id);
|
||||
|
||||
if (lsp && lsp->hdr.rem_lifetime != 0)
|
||||
return lsp;
|
||||
|
@ -43,7 +43,6 @@
|
||||
#include "network.h"
|
||||
#include "sbuf.h"
|
||||
|
||||
#include "isisd/dict.h"
|
||||
#include "isisd/isis_constants.h"
|
||||
#include "isisd/isis_common.h"
|
||||
#include "isisd/isis_flags.h"
|
||||
|
@ -3522,26 +3522,24 @@ void isis_tlvs_add_lsp_entry(struct isis_tlvs *tlvs, struct isis_lsp *lsp)
|
||||
|
||||
void isis_tlvs_add_csnp_entries(struct isis_tlvs *tlvs, uint8_t *start_id,
|
||||
uint8_t *stop_id, uint16_t num_lsps,
|
||||
dict_t *lspdb, struct isis_lsp **last_lsp)
|
||||
struct lspdb_head *head,
|
||||
struct isis_lsp **last_lsp)
|
||||
{
|
||||
dnode_t *first = dict_lower_bound(lspdb, start_id);
|
||||
struct isis_lsp searchfor;
|
||||
struct isis_lsp *first, *lsp;
|
||||
|
||||
memcpy(&searchfor.hdr.lsp_id, start_id, sizeof(searchfor.hdr.lsp_id));
|
||||
first = lspdb_find_gteq(head, &searchfor);
|
||||
if (!first)
|
||||
return;
|
||||
|
||||
dnode_t *last = dict_upper_bound(lspdb, stop_id);
|
||||
dnode_t *curr = first;
|
||||
|
||||
isis_tlvs_add_lsp_entry(tlvs, first->dict_data);
|
||||
*last_lsp = first->dict_data;
|
||||
|
||||
while (curr) {
|
||||
curr = dict_next(lspdb, curr);
|
||||
if (curr) {
|
||||
isis_tlvs_add_lsp_entry(tlvs, curr->dict_data);
|
||||
*last_lsp = curr->dict_data;
|
||||
}
|
||||
if (curr == last || tlvs->lsp_entries.count == num_lsps)
|
||||
for_each_from (lspdb, head, lsp, first) {
|
||||
if (memcmp(lsp->hdr.lsp_id, stop_id, sizeof(lsp->hdr.lsp_id))
|
||||
> 0 || tlvs->lsp_entries.count == num_lsps)
|
||||
break;
|
||||
|
||||
isis_tlvs_add_lsp_entry(tlvs, lsp);
|
||||
*last_lsp = lsp;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -25,8 +25,8 @@
|
||||
|
||||
#include "openbsd-tree.h"
|
||||
#include "prefix.h"
|
||||
#include "isisd/dict.h"
|
||||
|
||||
struct lspdb_head;
|
||||
struct isis_subtlvs;
|
||||
|
||||
struct isis_area_address;
|
||||
@ -355,7 +355,8 @@ bool isis_tlvs_own_snpa_found(struct isis_tlvs *tlvs, uint8_t *snpa);
|
||||
void isis_tlvs_add_lsp_entry(struct isis_tlvs *tlvs, struct isis_lsp *lsp);
|
||||
void isis_tlvs_add_csnp_entries(struct isis_tlvs *tlvs, uint8_t *start_id,
|
||||
uint8_t *stop_id, uint16_t num_lsps,
|
||||
dict_t *lspdb, struct isis_lsp **last_lsp);
|
||||
struct lspdb_head *lspdb,
|
||||
struct isis_lsp **last_lsp);
|
||||
void isis_tlvs_set_dynamic_hostname(struct isis_tlvs *tlvs,
|
||||
const char *hostname);
|
||||
void isis_tlvs_set_te_router_id(struct isis_tlvs *tlvs,
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include "isisd/isisd.h"
|
||||
#include "isisd/isis_memory.h"
|
||||
#include "isisd/isis_flags.h"
|
||||
#include "dict.h"
|
||||
#include "isisd/isis_circuit.h"
|
||||
#include "isisd/isis_lsp.h"
|
||||
#include "isisd/isis_misc.h"
|
||||
|
@ -161,13 +161,14 @@ DEFUN (show_lsp_flooding,
|
||||
struct isis_area *area;
|
||||
|
||||
for (ALL_LIST_ELEMENTS_RO(isis->area_list, node, area)) {
|
||||
dict_t *lspdb = area->lspdb[ISIS_LEVEL2 - 1];
|
||||
struct lspdb_head *head = &area->lspdb[ISIS_LEVEL2 - 1];
|
||||
struct isis_lsp *lsp;
|
||||
|
||||
vty_out(vty, "Area %s:\n", area->area_tag ?
|
||||
area->area_tag : "null");
|
||||
|
||||
if (lspid) {
|
||||
struct isis_lsp *lsp = lsp_for_arg(lspid, lspdb);
|
||||
struct isis_lsp *lsp = lsp_for_arg(head, lspid);
|
||||
|
||||
if (lsp)
|
||||
lsp_print_flooding(vty, lsp);
|
||||
@ -175,9 +176,8 @@ DEFUN (show_lsp_flooding,
|
||||
continue;
|
||||
}
|
||||
|
||||
for (dnode_t *dnode = dict_first(lspdb); dnode;
|
||||
dnode = dict_next(lspdb, dnode)) {
|
||||
lsp_print_flooding(vty, dnode_get(dnode));
|
||||
for_each (lspdb, head, lsp) {
|
||||
lsp_print_flooding(vty, lsp);
|
||||
vty_out(vty, "\n");
|
||||
}
|
||||
}
|
||||
|
@ -37,7 +37,6 @@
|
||||
#include "vrf.h"
|
||||
#include "libfrr.h"
|
||||
|
||||
#include "isisd/dict.h"
|
||||
#include "isisd/isis_constants.h"
|
||||
#include "isisd/isis_common.h"
|
||||
#include "isisd/isis_flags.h"
|
||||
|
@ -38,7 +38,6 @@
|
||||
#include "spf_backoff.h"
|
||||
#include "lib/northbound_cli.h"
|
||||
|
||||
#include "isisd/dict.h"
|
||||
#include "isisd/isis_constants.h"
|
||||
#include "isisd/isis_common.h"
|
||||
#include "isisd/isis_flags.h"
|
||||
@ -121,12 +120,10 @@ struct isis_area *isis_area_create(const char *area_tag)
|
||||
/*
|
||||
* intialize the databases
|
||||
*/
|
||||
if (area->is_type & IS_LEVEL_1) {
|
||||
area->lspdb[0] = lsp_db_init();
|
||||
}
|
||||
if (area->is_type & IS_LEVEL_2) {
|
||||
area->lspdb[1] = lsp_db_init();
|
||||
}
|
||||
if (area->is_type & IS_LEVEL_1)
|
||||
lsp_db_init(&area->lspdb[0]);
|
||||
if (area->is_type & IS_LEVEL_2)
|
||||
lsp_db_init(&area->lspdb[1]);
|
||||
|
||||
spftree_area_init(area);
|
||||
|
||||
@ -271,14 +268,8 @@ int isis_area_destroy(const char *area_tag)
|
||||
list_delete(&area->circuit_list);
|
||||
}
|
||||
|
||||
if (area->lspdb[0] != NULL) {
|
||||
lsp_db_destroy(area->lspdb[0]);
|
||||
area->lspdb[0] = NULL;
|
||||
}
|
||||
if (area->lspdb[1] != NULL) {
|
||||
lsp_db_destroy(area->lspdb[1]);
|
||||
area->lspdb[1] = NULL;
|
||||
}
|
||||
lsp_db_fini(&area->lspdb[0]);
|
||||
lsp_db_fini(&area->lspdb[1]);
|
||||
|
||||
/* invalidate and verify to delete all routes from zebra */
|
||||
isis_area_invalidate_routes(area, ISIS_LEVEL1 & ISIS_LEVEL2);
|
||||
@ -1344,7 +1335,7 @@ DEFUN (show_isis_summary,
|
||||
return CMD_SUCCESS;
|
||||
}
|
||||
|
||||
struct isis_lsp *lsp_for_arg(const char *argv, dict_t *lspdb)
|
||||
struct isis_lsp *lsp_for_arg(struct lspdb_head *head, const char *argv)
|
||||
{
|
||||
char sysid[255] = {0};
|
||||
uint8_t number[3];
|
||||
@ -1392,13 +1383,13 @@ struct isis_lsp *lsp_for_arg(const char *argv, dict_t *lspdb)
|
||||
* hostname.<pseudo-id>-<fragment>
|
||||
*/
|
||||
if (sysid2buff(lspid, sysid)) {
|
||||
lsp = lsp_search(lspid, lspdb);
|
||||
lsp = lsp_search(head, lspid);
|
||||
} else if ((dynhn = dynhn_find_by_name(sysid))) {
|
||||
memcpy(lspid, dynhn->id, ISIS_SYS_ID_LEN);
|
||||
lsp = lsp_search(lspid, lspdb);
|
||||
lsp = lsp_search(head, lspid);
|
||||
} else if (strncmp(cmd_hostname_get(), sysid, 15) == 0) {
|
||||
memcpy(lspid, isis->sysid, ISIS_SYS_ID_LEN);
|
||||
lsp = lsp_search(lspid, lspdb);
|
||||
lsp = lsp_search(head, lspid);
|
||||
}
|
||||
|
||||
return lsp;
|
||||
@ -1435,9 +1426,8 @@ static int show_isis_database(struct vty *vty, const char *argv, int ui_level)
|
||||
area->area_tag ? area->area_tag : "null");
|
||||
|
||||
for (level = 0; level < ISIS_LEVELS; level++) {
|
||||
if (area->lspdb[level]
|
||||
&& dict_count(area->lspdb[level]) > 0) {
|
||||
lsp = lsp_for_arg(argv, area->lspdb[level]);
|
||||
if (lspdb_count(&area->lspdb[level]) > 0) {
|
||||
lsp = lsp_for_arg(&area->lspdb[level], argv);
|
||||
|
||||
if (lsp != NULL || argv == NULL) {
|
||||
vty_out(vty,
|
||||
@ -1459,7 +1449,7 @@ static int show_isis_database(struct vty *vty, const char *argv, int ui_level)
|
||||
area->dynhostname);
|
||||
} else if (argv == NULL) {
|
||||
lsp_count = lsp_print_all(
|
||||
vty, area->lspdb[level],
|
||||
vty, &area->lspdb[level],
|
||||
ui_level, area->dynhostname);
|
||||
|
||||
vty_out(vty, " %u LSPs\n\n",
|
||||
@ -1699,10 +1689,7 @@ static void area_resign_level(struct isis_area *area, int level)
|
||||
isis_area_invalidate_routes(area, level);
|
||||
isis_area_verify_routes(area);
|
||||
|
||||
if (area->lspdb[level - 1]) {
|
||||
lsp_db_destroy(area->lspdb[level - 1]);
|
||||
area->lspdb[level - 1] = NULL;
|
||||
}
|
||||
lsp_db_fini(&area->lspdb[level - 1]);
|
||||
|
||||
for (int tree = SPFTREE_IPV4; tree < SPFTREE_COUNT; tree++) {
|
||||
if (area->spftree[tree][level - 1]) {
|
||||
@ -1738,8 +1725,7 @@ void isis_area_is_type_set(struct isis_area *area, int is_type)
|
||||
if (is_type == IS_LEVEL_2)
|
||||
area_resign_level(area, IS_LEVEL_1);
|
||||
|
||||
if (area->lspdb[1] == NULL)
|
||||
area->lspdb[1] = lsp_db_init();
|
||||
lsp_db_init(&area->lspdb[1]);
|
||||
break;
|
||||
|
||||
case IS_LEVEL_1_AND_2:
|
||||
@ -1753,8 +1739,7 @@ void isis_area_is_type_set(struct isis_area *area, int is_type)
|
||||
if (is_type == IS_LEVEL_1)
|
||||
area_resign_level(area, IS_LEVEL_2);
|
||||
|
||||
if (area->lspdb[0] == NULL)
|
||||
area->lspdb[0] = lsp_db_init();
|
||||
lsp_db_init(&area->lspdb[0]);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -31,7 +31,7 @@
|
||||
#include "isisd/isis_pdu_counter.h"
|
||||
#include "isisd/isis_circuit.h"
|
||||
#include "isis_flags.h"
|
||||
#include "dict.h"
|
||||
#include "isis_lsp.h"
|
||||
#include "isis_memory.h"
|
||||
#include "qobj.h"
|
||||
|
||||
@ -107,7 +107,7 @@ enum isis_metric_style {
|
||||
|
||||
struct isis_area {
|
||||
struct isis *isis; /* back pointer */
|
||||
dict_t *lspdb[ISIS_LEVELS]; /* link-state dbs */
|
||||
struct lspdb_head lspdb[ISIS_LEVELS]; /* link-state dbs */
|
||||
struct isis_spftree *spftree[SPFTREE_COUNT][ISIS_LEVELS];
|
||||
#define DEFAULT_LSP_MTU 1497
|
||||
unsigned int lsp_mtu; /* Size of LSPs to generate */
|
||||
@ -197,7 +197,7 @@ struct isis_area *isis_area_lookup(const char *);
|
||||
int isis_area_get(struct vty *vty, const char *area_tag);
|
||||
int isis_area_destroy(const char *area_tag);
|
||||
void print_debug(struct vty *, int, int);
|
||||
struct isis_lsp *lsp_for_arg(const char *argv, dict_t *lspdb);
|
||||
struct isis_lsp *lsp_for_arg(struct lspdb_head *head, const char *argv);
|
||||
|
||||
void isis_area_invalidate_routes(struct isis_area *area, int levels);
|
||||
void isis_area_verify_routes(struct isis_area *area);
|
||||
|
@ -25,7 +25,6 @@ dist_examples_DATA += isisd/fabricd.conf.sample
|
||||
endif
|
||||
|
||||
noinst_HEADERS += \
|
||||
isisd/dict.h \
|
||||
isisd/isis_adjacency.h \
|
||||
isisd/isis_bfd.h \
|
||||
isisd/isis_circuit.h \
|
||||
@ -61,7 +60,6 @@ noinst_HEADERS += \
|
||||
# end
|
||||
|
||||
LIBISIS_SOURCES = \
|
||||
isisd/dict.c \
|
||||
isisd/isis_adjacency.c \
|
||||
isisd/isis_bfd.c \
|
||||
isisd/isis_circuit.c \
|
||||
|
348
lib/atomlist.c
Normal file
348
lib/atomlist.c
Normal file
@ -0,0 +1,348 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2018 David Lamparter, for NetDEF, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include "atomlist.h"
|
||||
|
||||
void atomlist_add_head(struct atomlist_head *h, struct atomlist_item *item)
|
||||
{
|
||||
atomptr_t prevval;
|
||||
atomptr_t i = atomptr_i(item);
|
||||
|
||||
atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed);
|
||||
|
||||
/* updating ->last is possible here, but makes the code considerably
|
||||
* more complicated... let's not.
|
||||
*/
|
||||
prevval = ATOMPTR_NULL;
|
||||
item->next = ATOMPTR_NULL;
|
||||
|
||||
/* head-insert atomically
|
||||
* release barrier: item + item->next writes must be completed
|
||||
*/
|
||||
while (!atomic_compare_exchange_weak_explicit(&h->first, &prevval, i,
|
||||
memory_order_release, memory_order_relaxed))
|
||||
atomic_store_explicit(&item->next, prevval,
|
||||
memory_order_relaxed);
|
||||
}
|
||||
|
||||
void atomlist_add_tail(struct atomlist_head *h, struct atomlist_item *item)
|
||||
{
|
||||
atomptr_t prevval = ATOMPTR_NULL;
|
||||
atomptr_t i = atomptr_i(item);
|
||||
atomptr_t hint;
|
||||
struct atomlist_item *prevptr;
|
||||
_Atomic atomptr_t *prev;
|
||||
|
||||
item->next = ATOMPTR_NULL;
|
||||
|
||||
atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed);
|
||||
|
||||
/* place new item into ->last
|
||||
* release: item writes completed; acquire: DD barrier on hint
|
||||
*/
|
||||
hint = atomic_exchange_explicit(&h->last, i, memory_order_acq_rel);
|
||||
|
||||
while (1) {
|
||||
if (atomptr_p(hint) == NULL)
|
||||
prev = &h->first;
|
||||
else
|
||||
prev = &atomlist_itemp(hint)->next;
|
||||
|
||||
do {
|
||||
prevval = atomic_load_explicit(prev,
|
||||
memory_order_consume);
|
||||
prevptr = atomlist_itemp(prevval);
|
||||
if (prevptr == NULL)
|
||||
break;
|
||||
|
||||
prev = &prevptr->next;
|
||||
} while (prevptr);
|
||||
|
||||
/* last item is being deleted - start over */
|
||||
if (atomptr_l(prevval)) {
|
||||
hint = ATOMPTR_NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* no barrier - item->next is NULL and was so in xchg above */
|
||||
if (!atomic_compare_exchange_strong_explicit(prev, &prevval, i,
|
||||
memory_order_consume,
|
||||
memory_order_consume)) {
|
||||
hint = prevval;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void atomlist_del_core(struct atomlist_head *h,
|
||||
struct atomlist_item *item,
|
||||
_Atomic atomptr_t *hint,
|
||||
atomptr_t next)
|
||||
{
|
||||
_Atomic atomptr_t *prev = hint ? hint : &h->first, *upd;
|
||||
atomptr_t prevval, updval;
|
||||
struct atomlist_item *prevptr;
|
||||
|
||||
/* drop us off "last" if needed. no r/w to barrier. */
|
||||
prevval = atomptr_i(item);
|
||||
atomic_compare_exchange_strong_explicit(&h->last, &prevval,
|
||||
ATOMPTR_NULL,
|
||||
memory_order_relaxed, memory_order_relaxed);
|
||||
|
||||
atomic_fetch_sub_explicit(&h->count, 1, memory_order_relaxed);
|
||||
|
||||
/* the following code should be identical (except sort<>list) to
|
||||
* atomsort_del_hint()
|
||||
*/
|
||||
while (1) {
|
||||
upd = NULL;
|
||||
updval = ATOMPTR_LOCK;
|
||||
|
||||
do {
|
||||
prevval = atomic_load_explicit(prev,
|
||||
memory_order_consume);
|
||||
|
||||
/* track the beginning of a chain of deleted items
|
||||
* this is neccessary to make this lock-free; we can
|
||||
* complete deletions started by other threads.
|
||||
*/
|
||||
if (!atomptr_l(prevval)) {
|
||||
updval = prevval;
|
||||
upd = prev;
|
||||
}
|
||||
|
||||
prevptr = atomlist_itemp(prevval);
|
||||
if (prevptr == item)
|
||||
break;
|
||||
|
||||
prev = &prevptr->next;
|
||||
} while (prevptr);
|
||||
|
||||
if (prevptr != item)
|
||||
/* another thread completed our deletion */
|
||||
return;
|
||||
|
||||
if (!upd || atomptr_l(updval)) {
|
||||
/* failed to find non-deleted predecessor...
|
||||
* have to try again
|
||||
*/
|
||||
prev = &h->first;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!atomic_compare_exchange_strong_explicit(upd, &updval,
|
||||
next, memory_order_consume,
|
||||
memory_order_consume)) {
|
||||
/* prev doesn't point to item anymore, something
|
||||
* was inserted. continue at same position forward.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void atomlist_del_hint(struct atomlist_head *h, struct atomlist_item *item,
|
||||
_Atomic atomptr_t *hint)
|
||||
{
|
||||
atomptr_t next;
|
||||
|
||||
/* mark ourselves in-delete - full barrier */
|
||||
next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
|
||||
memory_order_acquire);
|
||||
assert(!atomptr_l(next)); /* delete race on same item */
|
||||
|
||||
atomlist_del_core(h, item, hint, next);
|
||||
}
|
||||
|
||||
struct atomlist_item *atomlist_pop(struct atomlist_head *h)
|
||||
{
|
||||
struct atomlist_item *item;
|
||||
atomptr_t next;
|
||||
|
||||
/* grab head of the list - and remember it in replval for the
|
||||
* actual delete below. No matter what, the head of the list is
|
||||
* where we start deleting because either it's our item, or it's
|
||||
* some delete-marked items and then our item.
|
||||
*/
|
||||
next = atomic_load_explicit(&h->first, memory_order_consume);
|
||||
|
||||
do {
|
||||
item = atomlist_itemp(next);
|
||||
if (!item)
|
||||
return NULL;
|
||||
|
||||
/* try to mark deletion */
|
||||
next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
|
||||
memory_order_acquire);
|
||||
|
||||
} while (atomptr_l(next));
|
||||
/* if loop is taken: delete race on same item (another pop or del)
|
||||
* => proceed to next item
|
||||
* if loop exited here: we have our item selected and marked
|
||||
*/
|
||||
atomlist_del_core(h, item, &h->first, next);
|
||||
return item;
|
||||
}
|
||||
|
||||
struct atomsort_item *atomsort_add(struct atomsort_head *h,
|
||||
struct atomsort_item *item, int (*cmpfn)(
|
||||
const struct atomsort_item *,
|
||||
const struct atomsort_item *))
|
||||
{
|
||||
_Atomic atomptr_t *prev;
|
||||
atomptr_t prevval;
|
||||
atomptr_t i = atomptr_i(item);
|
||||
struct atomsort_item *previtem;
|
||||
int cmpval;
|
||||
|
||||
do {
|
||||
prev = &h->first;
|
||||
|
||||
do {
|
||||
prevval = atomic_load_explicit(prev,
|
||||
memory_order_acquire);
|
||||
previtem = atomptr_p(prevval);
|
||||
|
||||
if (!previtem || (cmpval = cmpfn(previtem, item)) > 0)
|
||||
break;
|
||||
if (cmpval == 0)
|
||||
return previtem;
|
||||
|
||||
prev = &previtem->next;
|
||||
} while (1);
|
||||
|
||||
if (atomptr_l(prevval))
|
||||
continue;
|
||||
|
||||
item->next = prevval;
|
||||
if (atomic_compare_exchange_strong_explicit(prev, &prevval, i,
|
||||
memory_order_release, memory_order_relaxed))
|
||||
break;
|
||||
} while (1);
|
||||
|
||||
atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void atomsort_del_core(struct atomsort_head *h,
|
||||
struct atomsort_item *item, _Atomic atomptr_t *hint,
|
||||
atomptr_t next)
|
||||
{
|
||||
_Atomic atomptr_t *prev = hint ? hint : &h->first, *upd;
|
||||
atomptr_t prevval, updval;
|
||||
struct atomsort_item *prevptr;
|
||||
|
||||
atomic_fetch_sub_explicit(&h->count, 1, memory_order_relaxed);
|
||||
|
||||
/* the following code should be identical (except sort<>list) to
|
||||
* atomlist_del_core()
|
||||
*/
|
||||
while (1) {
|
||||
upd = NULL;
|
||||
updval = ATOMPTR_LOCK;
|
||||
|
||||
do {
|
||||
prevval = atomic_load_explicit(prev,
|
||||
memory_order_consume);
|
||||
|
||||
/* track the beginning of a chain of deleted items
|
||||
* this is neccessary to make this lock-free; we can
|
||||
* complete deletions started by other threads.
|
||||
*/
|
||||
if (!atomptr_l(prevval)) {
|
||||
updval = prevval;
|
||||
upd = prev;
|
||||
}
|
||||
|
||||
prevptr = atomsort_itemp(prevval);
|
||||
if (prevptr == item)
|
||||
break;
|
||||
|
||||
prev = &prevptr->next;
|
||||
} while (prevptr);
|
||||
|
||||
if (prevptr != item)
|
||||
/* another thread completed our deletion */
|
||||
return;
|
||||
|
||||
if (!upd || atomptr_l(updval)) {
|
||||
/* failed to find non-deleted predecessor...
|
||||
* have to try again
|
||||
*/
|
||||
prev = &h->first;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!atomic_compare_exchange_strong_explicit(upd, &updval,
|
||||
next, memory_order_relaxed,
|
||||
memory_order_relaxed)) {
|
||||
/* prev doesn't point to item anymore, something
|
||||
* was inserted. continue at same position forward.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void atomsort_del_hint(struct atomsort_head *h, struct atomsort_item *item,
|
||||
_Atomic atomptr_t *hint)
|
||||
{
|
||||
atomptr_t next;
|
||||
|
||||
/* mark ourselves in-delete - full barrier */
|
||||
next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
|
||||
memory_order_seq_cst);
|
||||
assert(!atomptr_l(next)); /* delete race on same item */
|
||||
|
||||
atomsort_del_core(h, item, hint, next);
|
||||
}
|
||||
|
||||
struct atomsort_item *atomsort_pop(struct atomsort_head *h)
|
||||
{
|
||||
struct atomsort_item *item;
|
||||
atomptr_t next;
|
||||
|
||||
/* grab head of the list - and remember it in replval for the
|
||||
* actual delete below. No matter what, the head of the list is
|
||||
* where we start deleting because either it's our item, or it's
|
||||
* some delete-marked items and then our item.
|
||||
*/
|
||||
next = atomic_load_explicit(&h->first, memory_order_consume);
|
||||
|
||||
do {
|
||||
item = atomsort_itemp(next);
|
||||
if (!item)
|
||||
return NULL;
|
||||
|
||||
/* try to mark deletion */
|
||||
next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
|
||||
memory_order_acquire);
|
||||
|
||||
} while (atomptr_l(next));
|
||||
/* if loop is taken: delete race on same item (another pop or del)
|
||||
* => proceed to next item
|
||||
* if loop exited here: we have our item selected and marked
|
||||
*/
|
||||
atomsort_del_core(h, item, &h->first, next);
|
||||
return item;
|
||||
}
|
347
lib/atomlist.h
Normal file
347
lib/atomlist.h
Normal file
@ -0,0 +1,347 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _FRR_ATOMLIST_H
|
||||
#define _FRR_ATOMLIST_H
|
||||
|
||||
#include "typesafe.h"
|
||||
#include "frratomic.h"
|
||||
|
||||
/* pointer with lock/deleted/invalid bit in lowest bit
|
||||
*
|
||||
* for atomlist/atomsort, "locked" means "this pointer can't be updated, the
|
||||
* item is being deleted". it is permissible to assume the item will indeed
|
||||
* be deleted (as there are no replace/etc. ops in this).
|
||||
*
|
||||
* in general, lowest 2/3 bits on 32/64bit architectures are available for
|
||||
* uses like this; the only thing that will really break this is putting an
|
||||
* atomlist_item in a struct with "packed" attribute. (it'll break
|
||||
* immediately and consistently.) -- don't do that.
|
||||
*
|
||||
* ATOMPTR_USER is currently unused (and available for atomic hash or skiplist
|
||||
* implementations.)
|
||||
*/
|
||||
typedef uintptr_t atomptr_t;
|
||||
#define ATOMPTR_MASK (UINTPTR_MAX - 3)
|
||||
#define ATOMPTR_LOCK (1)
|
||||
#define ATOMPTR_USER (2)
|
||||
#define ATOMPTR_NULL (0)
|
||||
|
||||
static inline atomptr_t atomptr_i(void *val)
|
||||
{
|
||||
atomptr_t atomval = (atomptr_t)val;
|
||||
|
||||
assert(!(atomval & ATOMPTR_LOCK));
|
||||
return atomval;
|
||||
}
|
||||
static inline void *atomptr_p(atomptr_t val)
|
||||
{
|
||||
return (void *)(val & ATOMPTR_MASK);
|
||||
}
|
||||
static inline bool atomptr_l(atomptr_t val)
|
||||
{
|
||||
return (bool)(val & ATOMPTR_LOCK);
|
||||
}
|
||||
static inline bool atomptr_u(atomptr_t val)
|
||||
{
|
||||
return (bool)(val & ATOMPTR_USER);
|
||||
}
|
||||
|
||||
|
||||
/* the problem with, find(), find_gteq() and find_lt() on atomic lists is that
|
||||
* they're neither an "acquire" nor a "release" operation; the element that
|
||||
* was found is still on the list and doesn't change ownership. Therefore,
|
||||
* an atomic transition in ownership state can't be implemented.
|
||||
*
|
||||
* Contrast this with add() or pop(): both function calls atomically transfer
|
||||
* ownership of an item to or from the list, which makes them "acquire" /
|
||||
* "release" operations.
|
||||
*
|
||||
* What can be implemented atomically is a "find_pop()", i.e. try to locate an
|
||||
* item and atomically try to remove it if found. It's not currently
|
||||
* implemented but can be added when needed.
|
||||
*
|
||||
* Either way - for find(), generally speaking, if you need to use find() on
|
||||
* a list then the whole thing probably isn't well-suited to atomic
|
||||
* implementation and you'll need to have extra locks around to make it work
|
||||
* correctly.
|
||||
*/
|
||||
#ifdef WNO_ATOMLIST_UNSAFE_FIND
|
||||
# define atomic_find_warn
|
||||
#else
|
||||
# define atomic_find_warn __attribute__((_DEPRECATED( \
|
||||
"WARNING: find() on atomic lists cannot be atomic by principle; " \
|
||||
"check code to make sure usage pattern is OK and if it is, use " \
|
||||
"#define WNO_ATOMLIST_UNSAFE_FIND")))
|
||||
#endif
|
||||
|
||||
|
||||
/* single-linked list, unsorted/arbitrary.
|
||||
* can be used as queue with add_tail / pop
|
||||
*
|
||||
* all operations are lock-free, but not neccessarily wait-free. this means
|
||||
* that there is no state where the system as a whole stops making process,
|
||||
* but it *is* possible that a *particular* thread is delayed by some time.
|
||||
*
|
||||
* the only way for this to happen is for other threads to continuously make
|
||||
* updates. an inactive / blocked / deadlocked other thread cannot cause such
|
||||
* delays, and to cause such delays a thread must be heavily hitting the list -
|
||||
* it's a rather theoretical concern.
|
||||
*/
|
||||
|
||||
/* don't use these structs directly */
|
||||
struct atomlist_item {
|
||||
_Atomic atomptr_t next;
|
||||
};
|
||||
#define atomlist_itemp(val) ((struct atomlist_item *)atomptr_p(val))
|
||||
|
||||
struct atomlist_head {
|
||||
_Atomic atomptr_t first, last;
|
||||
_Atomic size_t count;
|
||||
};
|
||||
|
||||
/* use as:
|
||||
*
|
||||
* PREDECL_ATOMLIST(namelist)
|
||||
* struct name {
|
||||
* struct namelist_item nlitem;
|
||||
* }
|
||||
* DECLARE_ATOMLIST(namelist, struct name, nlitem)
|
||||
*/
|
||||
#define PREDECL_ATOMLIST(prefix) \
|
||||
struct prefix ## _head { struct atomlist_head ah; }; \
|
||||
struct prefix ## _item { struct atomlist_item ai; };
|
||||
|
||||
#define INIT_ATOMLIST(var) { }
|
||||
|
||||
#define DECLARE_ATOMLIST(prefix, type, field) \
|
||||
macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
|
||||
{ atomlist_add_head(&h->ah, &item->field.ai); } \
|
||||
macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
|
||||
{ atomlist_add_tail(&h->ah, &item->field.ai); } \
|
||||
macro_inline void prefix ## _del_hint(struct prefix##_head *h, type *item, \
|
||||
_Atomic atomptr_t *hint) \
|
||||
{ atomlist_del_hint(&h->ah, &item->field.ai, hint); } \
|
||||
macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
|
||||
{ atomlist_del_hint(&h->ah, &item->field.ai, NULL); } \
|
||||
macro_inline type *prefix ## _pop(struct prefix##_head *h) \
|
||||
{ char *p = (char *)atomlist_pop(&h->ah); \
|
||||
return p ? (type *)(p - offsetof(type, field)) : NULL; } \
|
||||
macro_inline type *prefix ## _first(struct prefix##_head *h) \
|
||||
{ char *p = atomptr_p(atomic_load_explicit(&h->ah.first, \
|
||||
memory_order_acquire)); \
|
||||
return p ? (type *)(p - offsetof(type, field)) : NULL; } \
|
||||
macro_inline type *prefix ## _next(struct prefix##_head *h, type *item) \
|
||||
{ char *p = atomptr_p(atomic_load_explicit(&item->field.ai.next, \
|
||||
memory_order_acquire)); \
|
||||
return p ? (type *)(p - offsetof(type, field)) : NULL; } \
|
||||
macro_inline type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
|
||||
{ return item ? prefix##_next(h, item) : NULL; } \
|
||||
macro_inline size_t prefix ## _count(struct prefix##_head *h) \
|
||||
{ return atomic_load_explicit(&h->ah.count, memory_order_relaxed); } \
|
||||
/* ... */
|
||||
|
||||
/* add_head:
|
||||
* - contention on ->first pointer
|
||||
* - return implies completion
|
||||
*/
|
||||
void atomlist_add_head(struct atomlist_head *h, struct atomlist_item *item);
|
||||
|
||||
/* add_tail:
|
||||
* - concurrent add_tail can cause wait but has progress guarantee
|
||||
* - return does NOT imply completion. completion is only guaranteed after
|
||||
* all other add_tail operations that started before this add_tail have
|
||||
* completed as well.
|
||||
*/
|
||||
void atomlist_add_tail(struct atomlist_head *h, struct atomlist_item *item);
|
||||
|
||||
/* del/del_hint:
|
||||
*
|
||||
* OWNER MUST HOLD REFERENCE ON ITEM TO BE DELETED, ENSURING NO OTHER THREAD
|
||||
* WILL TRY TO DELETE THE SAME ITEM. DELETING INCLUDES pop().
|
||||
*
|
||||
* as with all deletions, threads that started reading earlier may still hold
|
||||
* pointers to the deleted item. completion is however guaranteed for all
|
||||
* reads starting later.
|
||||
*/
|
||||
void atomlist_del_hint(struct atomlist_head *h, struct atomlist_item *item,
|
||||
_Atomic atomptr_t *hint);
|
||||
|
||||
/* pop:
|
||||
*
|
||||
* as with all deletions, threads that started reading earlier may still hold
|
||||
* pointers to the deleted item. completion is however guaranteed for all
|
||||
* reads starting later.
|
||||
*/
|
||||
struct atomlist_item *atomlist_pop(struct atomlist_head *h);
|
||||
|
||||
|
||||
|
||||
struct atomsort_item {
|
||||
_Atomic atomptr_t next;
|
||||
};
|
||||
#define atomsort_itemp(val) ((struct atomsort_item *)atomptr_p(val))
|
||||
|
||||
struct atomsort_head {
|
||||
_Atomic atomptr_t first;
|
||||
_Atomic size_t count;
|
||||
};
|
||||
|
||||
#define _PREDECL_ATOMSORT(prefix) \
|
||||
struct prefix ## _head { struct atomsort_head ah; }; \
|
||||
struct prefix ## _item { struct atomsort_item ai; };
|
||||
|
||||
#define INIT_ATOMSORT_UNIQ(var) { }
|
||||
#define INIT_ATOMSORT_NONUNIQ(var) { }
|
||||
|
||||
#define _DECLARE_ATOMSORT(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
|
||||
macro_inline void prefix ## _init(struct prefix##_head *h) \
|
||||
{ \
|
||||
memset(h, 0, sizeof(*h)); \
|
||||
} \
|
||||
macro_inline void prefix ## _fini(struct prefix##_head *h) \
|
||||
{ \
|
||||
assert(h->ah.count == 0); \
|
||||
memset(h, 0, sizeof(*h)); \
|
||||
} \
|
||||
macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
struct atomsort_item *p; \
|
||||
p = atomsort_add(&h->ah, &item->field.ai, cmpfn_uq); \
|
||||
return container_of_null(p, type, field.ai); \
|
||||
} \
|
||||
macro_inline type *prefix ## _first(struct prefix##_head *h) \
|
||||
{ \
|
||||
struct atomsort_item *p; \
|
||||
p = atomptr_p(atomic_load_explicit(&h->ah.first, \
|
||||
memory_order_acquire)); \
|
||||
return container_of_null(p, type, field.ai); \
|
||||
} \
|
||||
macro_inline type *prefix ## _next(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
struct atomsort_item *p; \
|
||||
p = atomptr_p(atomic_load_explicit(&item->field.ai.next, \
|
||||
memory_order_acquire)); \
|
||||
return container_of_null(p, type, field.ai); \
|
||||
} \
|
||||
macro_inline type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
return item ? prefix##_next(h, item) : NULL; \
|
||||
} \
|
||||
atomic_find_warn \
|
||||
macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
|
||||
const type *item) \
|
||||
{ \
|
||||
type *p = prefix ## _first(h); \
|
||||
while (p && cmpfn_nuq(&p->field.ai, &item->field.ai) < 0) \
|
||||
p = prefix ## _next(h, p); \
|
||||
return p; \
|
||||
} \
|
||||
atomic_find_warn \
|
||||
macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
|
||||
const type *item) \
|
||||
{ \
|
||||
type *p = prefix ## _first(h), *prev = NULL; \
|
||||
while (p && cmpfn_nuq(&p->field.ai, &item->field.ai) < 0) \
|
||||
p = prefix ## _next(h, (prev = p)); \
|
||||
return prev; \
|
||||
} \
|
||||
macro_inline void prefix ## _del_hint(struct prefix##_head *h, type *item, \
|
||||
_Atomic atomptr_t *hint) \
|
||||
{ \
|
||||
atomsort_del_hint(&h->ah, &item->field.ai, hint); \
|
||||
} \
|
||||
macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
atomsort_del_hint(&h->ah, &item->field.ai, NULL); \
|
||||
} \
|
||||
macro_inline size_t prefix ## _count(struct prefix##_head *h) \
|
||||
{ \
|
||||
return atomic_load_explicit(&h->ah.count, memory_order_relaxed); \
|
||||
} \
|
||||
macro_inline type *prefix ## _pop(struct prefix##_head *h) \
|
||||
{ \
|
||||
struct atomsort_item *p = atomsort_pop(&h->ah); \
|
||||
return p ? container_of(p, type, field.ai) : NULL; \
|
||||
} \
|
||||
/* ... */
|
||||
|
||||
#define PREDECL_ATOMSORT_UNIQ(prefix) \
|
||||
_PREDECL_ATOMSORT(prefix)
|
||||
#define DECLARE_ATOMSORT_UNIQ(prefix, type, field, cmpfn) \
|
||||
\
|
||||
macro_inline int prefix ## __cmp(const struct atomsort_item *a, \
|
||||
const struct atomsort_item *b) \
|
||||
{ \
|
||||
return cmpfn(container_of(a, type, field.ai), \
|
||||
container_of(b, type, field.ai)); \
|
||||
} \
|
||||
\
|
||||
_DECLARE_ATOMSORT(prefix, type, field, \
|
||||
prefix ## __cmp, prefix ## __cmp) \
|
||||
\
|
||||
atomic_find_warn \
|
||||
macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
|
||||
{ \
|
||||
type *p = prefix ## _first(h); \
|
||||
int cmpval = 0; \
|
||||
while (p && (cmpval = cmpfn(p, item)) < 0) \
|
||||
p = prefix ## _next(h, p); \
|
||||
if (!p || cmpval > 0) \
|
||||
return NULL; \
|
||||
return p; \
|
||||
} \
|
||||
/* ... */
|
||||
|
||||
#define PREDECL_ATOMSORT_NONUNIQ(prefix) \
|
||||
_PREDECL_ATOMSORT(prefix)
|
||||
#define DECLARE_ATOMSORT_NONUNIQ(prefix, type, field, cmpfn) \
|
||||
\
|
||||
macro_inline int prefix ## __cmp(const struct atomsort_item *a, \
|
||||
const struct atomsort_item *b) \
|
||||
{ \
|
||||
return cmpfn(container_of(a, type, field.ai), \
|
||||
container_of(b, type, field.ai)); \
|
||||
} \
|
||||
macro_inline int prefix ## __cmp_uq(const struct atomsort_item *a, \
|
||||
const struct atomsort_item *b) \
|
||||
{ \
|
||||
int cmpval = cmpfn(container_of(a, type, field.ai), \
|
||||
container_of(b, type, field.ai)); \
|
||||
if (cmpval) \
|
||||
return cmpval; \
|
||||
if (a < b) \
|
||||
return -1; \
|
||||
if (a > b) \
|
||||
return 1; \
|
||||
return 0; \
|
||||
} \
|
||||
\
|
||||
_DECLARE_ATOMSORT(prefix, type, field, \
|
||||
prefix ## __cmp, prefix ## __cmp_uq) \
|
||||
/* ... */
|
||||
|
||||
struct atomsort_item *atomsort_add(struct atomsort_head *h,
|
||||
struct atomsort_item *item, int (*cmpfn)(
|
||||
const struct atomsort_item *,
|
||||
const struct atomsort_item *));
|
||||
|
||||
void atomsort_del_hint(struct atomsort_head *h,
|
||||
struct atomsort_item *item, _Atomic atomptr_t *hint);
|
||||
|
||||
struct atomsort_item *atomsort_pop(struct atomsort_head *h);
|
||||
|
||||
#endif /* _FRR_ATOMLIST_H */
|
@ -32,6 +32,7 @@ extern "C" {
|
||||
# define _FALLTHROUGH __attribute__((fallthrough));
|
||||
#endif
|
||||
# define _CONSTRUCTOR(x) constructor(x)
|
||||
# define _DEPRECATED(x) deprecated(x)
|
||||
#elif defined(__GNUC__)
|
||||
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
|
||||
# define _RET_NONNULL , returns_nonnull
|
||||
@ -41,6 +42,9 @@ extern "C" {
|
||||
# define _DESTRUCTOR(x) destructor(x)
|
||||
# define _ALLOC_SIZE(x) alloc_size(x)
|
||||
#endif
|
||||
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
|
||||
# define _DEPRECATED(x) deprecated(x)
|
||||
#endif
|
||||
#if __GNUC__ >= 7
|
||||
# define _FALLTHROUGH __attribute__((fallthrough));
|
||||
#endif
|
||||
@ -68,6 +72,13 @@ extern "C" {
|
||||
#ifndef _FALLTHROUGH
|
||||
#define _FALLTHROUGH
|
||||
#endif
|
||||
#ifndef _DEPRECATED
|
||||
#define _DEPRECATED(x) deprecated
|
||||
#endif
|
||||
|
||||
/* for helper functions defined inside macros */
|
||||
#define macro_inline static inline __attribute__((unused))
|
||||
#define macro_pure static inline __attribute__((unused, pure))
|
||||
|
||||
/*
|
||||
* for warnings on macros, put in the macro content like this:
|
||||
@ -92,6 +103,80 @@ extern "C" {
|
||||
#define CPP_NOTICE(text)
|
||||
#endif
|
||||
|
||||
/* MAX / MIN are not commonly defined, but useful */
|
||||
/* note: glibc sys/param.h has #define MIN(a,b) (((a)<(b))?(a):(b)) */
|
||||
#ifdef MAX
|
||||
#undef MAX
|
||||
#endif
|
||||
#define MAX(a, b) \
|
||||
({ \
|
||||
typeof(a) _max_a = (a); \
|
||||
typeof(b) _max_b = (b); \
|
||||
_max_a > _max_b ? _max_a : _max_b; \
|
||||
})
|
||||
#ifdef MIN
|
||||
#undef MIN
|
||||
#endif
|
||||
#define MIN(a, b) \
|
||||
({ \
|
||||
typeof(a) _min_a = (a); \
|
||||
typeof(b) _min_b = (b); \
|
||||
_min_a < _min_b ? _min_a : _min_b; \
|
||||
})
|
||||
|
||||
#ifndef offsetof
|
||||
#ifdef __compiler_offsetof
|
||||
#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE,MEMBER)
|
||||
#else
|
||||
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* this variant of container_of() retains 'const' on pointers without needing
|
||||
* to be told to do so. The following will all work without warning:
|
||||
*
|
||||
* struct member *p;
|
||||
* const struct member *cp;
|
||||
*
|
||||
* const struct cont *x = container_of(cp, struct cont, member);
|
||||
* const struct cont *x = container_of(cp, const struct cont, member);
|
||||
* const struct cont *x = container_of(p, struct cont, member);
|
||||
* const struct cont *x = container_of(p, const struct cont, member);
|
||||
* struct cont *x = container_of(p, struct cont, member);
|
||||
*
|
||||
* but the following will generate warnings about stripping const:
|
||||
*
|
||||
* struct cont *x = container_of(cp, struct cont, member);
|
||||
* struct cont *x = container_of(cp, const struct cont, member);
|
||||
* struct cont *x = container_of(p, const struct cont, member);
|
||||
*/
|
||||
#ifdef container_of
|
||||
#undef container_of
|
||||
#endif
|
||||
#define container_of(ptr, type, member) \
|
||||
(__builtin_choose_expr( \
|
||||
__builtin_types_compatible_p(typeof(&((type *)0)->member), \
|
||||
typeof(ptr)) \
|
||||
|| __builtin_types_compatible_p(void *, typeof(ptr)), \
|
||||
({ \
|
||||
typeof(((type *)0)->member) *__mptr = (void *)(ptr); \
|
||||
(type *)((char *)__mptr - offsetof(type, member)); \
|
||||
}), \
|
||||
({ \
|
||||
typeof(((const type *)0)->member) *__mptr = (ptr); \
|
||||
(const type *)((const char *)__mptr - \
|
||||
offsetof(type, member)); \
|
||||
}) \
|
||||
))
|
||||
|
||||
#define container_of_null(ptr, type, member) \
|
||||
({ \
|
||||
typeof(ptr) _tmp = (ptr); \
|
||||
_tmp ? container_of(_tmp, type, member) : NULL; \
|
||||
})
|
||||
|
||||
#define array_size(ar) (sizeof(ar) / sizeof(ar[0]))
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
66
lib/fifo.h
66
lib/fifo.h
@ -1,66 +0,0 @@
|
||||
/* FIFO common header.
|
||||
* Copyright (C) 2015 Kunihiro Ishiguro
|
||||
*
|
||||
* This file is part of Quagga.
|
||||
*
|
||||
* Quagga is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2, or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* Quagga is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; see the file COPYING; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#ifndef __LIB_FIFO_H__
|
||||
#define __LIB_FIFO_H__
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* FIFO -- first in first out structure and macros. */
|
||||
struct fifo {
|
||||
struct fifo *next;
|
||||
struct fifo *prev;
|
||||
};
|
||||
|
||||
#define FIFO_INIT(F) \
|
||||
do { \
|
||||
struct fifo *Xfifo = (struct fifo *)(F); \
|
||||
Xfifo->next = Xfifo->prev = Xfifo; \
|
||||
} while (0)
|
||||
|
||||
#define FIFO_ADD(F, N) \
|
||||
do { \
|
||||
struct fifo *Xfifo = (struct fifo *)(F); \
|
||||
struct fifo *Xnode = (struct fifo *)(N); \
|
||||
Xnode->next = Xfifo; \
|
||||
Xnode->prev = Xfifo->prev; \
|
||||
Xfifo->prev = Xfifo->prev->next = Xnode; \
|
||||
} while (0)
|
||||
|
||||
#define FIFO_DEL(N) \
|
||||
do { \
|
||||
struct fifo *Xnode = (struct fifo *)(N); \
|
||||
Xnode->prev->next = Xnode->next; \
|
||||
Xnode->next->prev = Xnode->prev; \
|
||||
} while (0)
|
||||
|
||||
#define FIFO_HEAD(F) \
|
||||
((((struct fifo *)(F))->next == (struct fifo *)(F)) ? NULL : (F)->next)
|
||||
|
||||
#define FIFO_EMPTY(F) (((struct fifo *)(F))->next == (struct fifo *)(F))
|
||||
|
||||
#define FIFO_TOP(F) (FIFO_EMPTY(F) ? NULL : ((struct fifo *)(F))->next)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __LIB_FIFO_H__ */
|
@ -80,6 +80,9 @@ typedef std::atomic<uint_fast32_t> atomic_uint_fast32_t;
|
||||
#define atomic_compare_exchange_weak_explicit(atom, expect, desire, mem1, \
|
||||
mem2) \
|
||||
__atomic_compare_exchange_n(atom, expect, desire, 1, mem1, mem2)
|
||||
#define atomic_compare_exchange_strong_explicit(atom, expect, desire, mem1, \
|
||||
mem2) \
|
||||
__atomic_compare_exchange_n(atom, expect, desire, 0, mem1, mem2)
|
||||
|
||||
/* gcc 4.1 and newer,
|
||||
* clang 3.3 (possibly older)
|
||||
@ -152,7 +155,7 @@ typedef std::atomic<uint_fast32_t> atomic_uint_fast32_t;
|
||||
rval; \
|
||||
})
|
||||
|
||||
#define atomic_compare_exchange_weak_explicit(atom, expect, desire, mem1, \
|
||||
#define atomic_compare_exchange_strong_explicit(atom, expect, desire, mem1, \
|
||||
mem2) \
|
||||
({ \
|
||||
typeof(atom) _atom = (atom); \
|
||||
@ -166,6 +169,8 @@ typedef std::atomic<uint_fast32_t> atomic_uint_fast32_t;
|
||||
*_expect = rval; \
|
||||
ret; \
|
||||
})
|
||||
#define atomic_compare_exchange_weak_explicit \
|
||||
atomic_compare_exchange_strong_explicit
|
||||
|
||||
#define atomic_fetch_and_explicit(ptr, val, mem) \
|
||||
({ \
|
||||
|
@ -26,8 +26,6 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define array_size(ar) (sizeof(ar) / sizeof(ar[0]))
|
||||
|
||||
#if defined(HAVE_MALLOC_SIZE) && !defined(HAVE_MALLOC_USABLE_SIZE)
|
||||
#define malloc_usable_size(x) malloc_size(x)
|
||||
#define HAVE_MALLOC_USABLE_SIZE
|
||||
|
@ -917,7 +917,7 @@ void zprivs_init(struct zebra_privs_t *zprivs)
|
||||
zprivs->user, zprivs->vty_group);
|
||||
exit(1);
|
||||
}
|
||||
if (i >= ngroups && ngroups < (int)ZEBRA_NUM_OF(groups)) {
|
||||
if (i >= ngroups && ngroups < (int)array_size(groups)) {
|
||||
groups[i] = zprivs_state.vtygrp;
|
||||
}
|
||||
}
|
||||
|
52
lib/qobj.c
52
lib/qobj.c
@ -27,20 +27,26 @@
|
||||
#include "qobj.h"
|
||||
#include "jhash.h"
|
||||
|
||||
static uint32_t qobj_hash(const struct qobj_node *node)
|
||||
{
|
||||
return (uint32_t)node->nid;
|
||||
}
|
||||
|
||||
static int qobj_cmp(const struct qobj_node *na, const struct qobj_node *nb)
|
||||
{
|
||||
if (na->nid < nb->nid)
|
||||
return -1;
|
||||
if (na->nid > nb->nid)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
DECLARE_HASH(qobj_nodes, struct qobj_node, nodehash,
|
||||
qobj_cmp, qobj_hash)
|
||||
|
||||
static pthread_rwlock_t nodes_lock;
|
||||
static struct hash *nodes = NULL;
|
||||
static struct qobj_nodes_head nodes = { };
|
||||
|
||||
static unsigned int qobj_key(void *data)
|
||||
{
|
||||
struct qobj_node *node = data;
|
||||
return (unsigned int)node->nid;
|
||||
}
|
||||
|
||||
static bool qobj_cmp(const void *a, const void *b)
|
||||
{
|
||||
const struct qobj_node *na = a, *nb = b;
|
||||
return na->nid == nb->nid;
|
||||
}
|
||||
|
||||
void qobj_reg(struct qobj_node *node, struct qobj_nodetype *type)
|
||||
{
|
||||
@ -49,15 +55,15 @@ void qobj_reg(struct qobj_node *node, struct qobj_nodetype *type)
|
||||
do {
|
||||
node->nid = (uint64_t)random();
|
||||
node->nid ^= (uint64_t)random() << 32;
|
||||
} while (!node->nid
|
||||
|| hash_get(nodes, node, hash_alloc_intern) != node);
|
||||
} while (!node->nid || qobj_nodes_find(&nodes, node));
|
||||
qobj_nodes_add(&nodes, node);
|
||||
pthread_rwlock_unlock(&nodes_lock);
|
||||
}
|
||||
|
||||
void qobj_unreg(struct qobj_node *node)
|
||||
{
|
||||
pthread_rwlock_wrlock(&nodes_lock);
|
||||
hash_release(nodes, node);
|
||||
qobj_nodes_del(&nodes, node);
|
||||
pthread_rwlock_unlock(&nodes_lock);
|
||||
}
|
||||
|
||||
@ -65,7 +71,7 @@ struct qobj_node *qobj_get(uint64_t id)
|
||||
{
|
||||
struct qobj_node dummy = {.nid = id}, *rv;
|
||||
pthread_rwlock_rdlock(&nodes_lock);
|
||||
rv = hash_lookup(nodes, &dummy);
|
||||
rv = qobj_nodes_find(&nodes, &dummy);
|
||||
pthread_rwlock_unlock(&nodes_lock);
|
||||
return rv;
|
||||
}
|
||||
@ -77,7 +83,7 @@ void *qobj_get_typed(uint64_t id, struct qobj_nodetype *type)
|
||||
void *rv;
|
||||
|
||||
pthread_rwlock_rdlock(&nodes_lock);
|
||||
node = hash_lookup(nodes, &dummy);
|
||||
node = qobj_nodes_find(&nodes, &dummy);
|
||||
|
||||
/* note: we explicitly hold the lock until after we have checked the
|
||||
* type.
|
||||
@ -96,16 +102,14 @@ void *qobj_get_typed(uint64_t id, struct qobj_nodetype *type)
|
||||
|
||||
void qobj_init(void)
|
||||
{
|
||||
if (!nodes) {
|
||||
pthread_rwlock_init(&nodes_lock, NULL);
|
||||
nodes = hash_create_size(16, qobj_key, qobj_cmp, "QOBJ Hash");
|
||||
}
|
||||
pthread_rwlock_init(&nodes_lock, NULL);
|
||||
qobj_nodes_init(&nodes);
|
||||
}
|
||||
|
||||
void qobj_finish(void)
|
||||
{
|
||||
hash_clean(nodes, NULL);
|
||||
hash_free(nodes);
|
||||
nodes = NULL;
|
||||
struct qobj_node *node;
|
||||
while ((node = qobj_nodes_pop(&nodes)))
|
||||
qobj_nodes_del(&nodes, node);
|
||||
pthread_rwlock_destroy(&nodes_lock);
|
||||
}
|
||||
|
@ -21,6 +21,8 @@
|
||||
#include <stdlib.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#include "typesafe.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
@ -69,6 +71,8 @@ struct qobj_nodetype_capnp {
|
||||
};
|
||||
#endif
|
||||
|
||||
#include "typesafe.h"
|
||||
|
||||
/* each different kind of object will have a global variable of this type,
|
||||
* which can be used by various other pieces to store type-related bits.
|
||||
* type equality can be tested as pointer equality. (cf. QOBJ_GET_TYPESAFE)
|
||||
@ -79,9 +83,12 @@ struct qobj_nodetype {
|
||||
RESERVED_SPACE_STRUCT(qobj_nodetype_capnp, capnp, 256)
|
||||
};
|
||||
|
||||
PREDECL_HASH(qobj_nodes)
|
||||
|
||||
/* anchor to be embedded somewhere in the object's struct */
|
||||
struct qobj_node {
|
||||
uint64_t nid;
|
||||
struct qobj_nodes_item nodehash;
|
||||
struct qobj_nodetype *type;
|
||||
};
|
||||
|
||||
|
167
lib/seqlock.c
Normal file
167
lib/seqlock.c
Normal file
@ -0,0 +1,167 @@
|
||||
/*
|
||||
* "Sequence" lock primitive
|
||||
*
|
||||
* Copyright (C) 2015 David Lamparter <equinox@diac24.net>
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, write to the
|
||||
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
|
||||
* Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#define _GNU_SOURCE
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <unistd.h>
|
||||
#include <limits.h>
|
||||
#include <errno.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/time.h>
|
||||
#include <pthread.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include "seqlock.h"
|
||||
|
||||
#ifdef HAVE_SYNC_LINUX_FUTEX
|
||||
/* Linux-specific - sys_futex() */
|
||||
#include <sys/syscall.h>
|
||||
#include <linux/futex.h>
|
||||
|
||||
static long sys_futex(void *addr1, int op, int val1, struct timespec *timeout,
|
||||
void *addr2, int val3)
|
||||
{
|
||||
return syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3);
|
||||
}
|
||||
|
||||
#define wait_once(sqlo, val) \
|
||||
sys_futex((int *)&sqlo->pos, FUTEX_WAIT, (int)val, NULL, NULL, 0)
|
||||
#define wait_poke(sqlo) \
|
||||
sys_futex((int *)&sqlo->pos, FUTEX_WAKE, INT_MAX, NULL, NULL, 0)
|
||||
|
||||
#elif defined(HAVE_SYNC_OPENBSD_FUTEX)
|
||||
/* OpenBSD variant of the above. untested, not upstream in OpenBSD. */
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/futex.h>
|
||||
|
||||
#define wait_once(sqlo, val) \
|
||||
futex((int *)&sqlo->pos, FUTEX_WAIT, (int)val, NULL, NULL, 0)
|
||||
#define wait_poke(sqlo) \
|
||||
futex((int *)&sqlo->pos, FUTEX_WAKE, INT_MAX, NULL, NULL, 0)
|
||||
|
||||
#elif defined(HAVE_SYNC_UMTX_OP)
|
||||
/* FreeBSD-specific: umtx_op() */
|
||||
#include <sys/umtx.h>
|
||||
|
||||
#define wait_once(sqlo, val) \
|
||||
_umtx_op((void *)&sqlo->pos, UMTX_OP_WAIT_UINT, val, NULL, NULL)
|
||||
#define wait_poke(sqlo) \
|
||||
_umtx_op((void *)&sqlo->pos, UMTX_OP_WAKE, INT_MAX, NULL, NULL)
|
||||
|
||||
#else
|
||||
/* generic version. used on *BSD, Solaris and OSX.
|
||||
*/
|
||||
|
||||
#define wait_init(sqlo) do { \
|
||||
pthread_mutex_init(&sqlo->lock, NULL); \
|
||||
pthread_cond_init(&sqlo->wake, NULL); \
|
||||
} while (0)
|
||||
#define wait_prep(sqlo) pthread_mutex_lock(&sqlo->lock)
|
||||
#define wait_once(sqlo, val) pthread_cond_wait(&sqlo->wake, &sqlo->lock)
|
||||
#define wait_done(sqlo) pthread_mutex_unlock(&sqlo->lock)
|
||||
#define wait_poke(sqlo) do { \
|
||||
pthread_mutex_lock(&sqlo->lock); \
|
||||
pthread_cond_broadcast(&sqlo->wake); \
|
||||
pthread_mutex_unlock(&sqlo->lock); \
|
||||
} while (0)
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef wait_init
|
||||
#define wait_init(sqlo) /**/
|
||||
#define wait_prep(sqlo) /**/
|
||||
#define wait_done(sqlo) /**/
|
||||
#endif /* wait_init */
|
||||
|
||||
|
||||
void seqlock_wait(struct seqlock *sqlo, seqlock_val_t val)
|
||||
{
|
||||
seqlock_val_t cur, cal;
|
||||
|
||||
seqlock_assert_valid(val);
|
||||
|
||||
wait_prep(sqlo);
|
||||
while (1) {
|
||||
cur = atomic_load_explicit(&sqlo->pos, memory_order_acquire);
|
||||
if (!(cur & 1))
|
||||
break;
|
||||
cal = cur - val - 1;
|
||||
assert(cal < 0x40000000 || cal > 0xc0000000);
|
||||
if (cal < 0x80000000)
|
||||
break;
|
||||
|
||||
wait_once(sqlo, cur);
|
||||
}
|
||||
wait_done(sqlo);
|
||||
}
|
||||
|
||||
bool seqlock_check(struct seqlock *sqlo, seqlock_val_t val)
|
||||
{
|
||||
seqlock_val_t cur;
|
||||
|
||||
seqlock_assert_valid(val);
|
||||
|
||||
cur = atomic_load_explicit(&sqlo->pos, memory_order_acquire);
|
||||
if (!(cur & 1))
|
||||
return 1;
|
||||
cur -= val;
|
||||
assert(cur < 0x40000000 || cur > 0xc0000000);
|
||||
return cur < 0x80000000;
|
||||
}
|
||||
|
||||
void seqlock_acquire_val(struct seqlock *sqlo, seqlock_val_t val)
|
||||
{
|
||||
seqlock_assert_valid(val);
|
||||
|
||||
atomic_store_explicit(&sqlo->pos, val, memory_order_release);
|
||||
wait_poke(sqlo);
|
||||
}
|
||||
|
||||
void seqlock_release(struct seqlock *sqlo)
|
||||
{
|
||||
atomic_store_explicit(&sqlo->pos, 0, memory_order_release);
|
||||
wait_poke(sqlo);
|
||||
}
|
||||
|
||||
void seqlock_init(struct seqlock *sqlo)
|
||||
{
|
||||
sqlo->pos = 0;
|
||||
wait_init(sqlo);
|
||||
}
|
||||
|
||||
|
||||
seqlock_val_t seqlock_cur(struct seqlock *sqlo)
|
||||
{
|
||||
return atomic_load_explicit(&sqlo->pos, memory_order_acquire);
|
||||
}
|
||||
|
||||
seqlock_val_t seqlock_bump(struct seqlock *sqlo)
|
||||
{
|
||||
seqlock_val_t val;
|
||||
|
||||
val = atomic_fetch_add_explicit(&sqlo->pos, 2, memory_order_release);
|
||||
wait_poke(sqlo);
|
||||
return val;
|
||||
}
|
106
lib/seqlock.h
Normal file
106
lib/seqlock.h
Normal file
@ -0,0 +1,106 @@
|
||||
/*
|
||||
* "Sequence" lock primitive
|
||||
*
|
||||
* Copyright (C) 2015 David Lamparter <equinox@diac24.net>
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, write to the
|
||||
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
|
||||
* Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef _SEQLOCK_H
|
||||
#define _SEQLOCK_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <pthread.h>
|
||||
#include "frratomic.h"
|
||||
|
||||
/*
|
||||
* this locking primitive is intended to use in a 1:N setup.
|
||||
*
|
||||
* - one "counter" seqlock issuing increasing numbers
|
||||
* - multiple seqlock users hold references on these numbers
|
||||
*
|
||||
* this is intended for implementing RCU reference-holding. There is one
|
||||
* global counter, with threads locking a seqlock whenever they take a
|
||||
* reference. A seqlock can also be idle/unlocked.
|
||||
*
|
||||
* The "counter" seqlock will always stay locked; the RCU cleanup thread
|
||||
* continuously counts it up, waiting for threads to release or progress to a
|
||||
* sequence number further ahead. If all threads are > N, references dropped
|
||||
* in N can be free'd.
|
||||
*
|
||||
* generally, the lock function is:
|
||||
*
|
||||
* Thread-A Thread-B
|
||||
*
|
||||
* seqlock_acquire(a)
|
||||
* | running seqlock_wait(b) -- a <= b
|
||||
* seqlock_release() | blocked
|
||||
* OR: seqlock_acquire(a') | -- a' > b
|
||||
* (resumes)
|
||||
*/
|
||||
|
||||
/* use sequentially increasing "ticket numbers". lowest bit will always
|
||||
* be 1 to have a 'cleared' indication (i.e., counts 1,3,5,7,etc. )
|
||||
*/
|
||||
typedef _Atomic uint32_t seqlock_ctr_t;
|
||||
typedef uint32_t seqlock_val_t;
|
||||
#define seqlock_assert_valid(val) assert(val & 1)
|
||||
|
||||
|
||||
struct seqlock {
|
||||
/* always used */
|
||||
seqlock_ctr_t pos;
|
||||
/* used when futexes not available: (i.e. non-linux) */
|
||||
pthread_mutex_t lock;
|
||||
pthread_cond_t wake;
|
||||
};
|
||||
|
||||
|
||||
/* sqlo = 0 - init state: not held */
|
||||
extern void seqlock_init(struct seqlock *sqlo);
|
||||
|
||||
|
||||
/* while (sqlo <= val) - wait until seqlock->pos > val, or seqlock unheld */
|
||||
extern void seqlock_wait(struct seqlock *sqlo, seqlock_val_t val);
|
||||
extern bool seqlock_check(struct seqlock *sqlo, seqlock_val_t val);
|
||||
|
||||
static inline bool seqlock_held(struct seqlock *sqlo)
|
||||
{
|
||||
return !!atomic_load_explicit(&sqlo->pos, memory_order_relaxed);
|
||||
}
|
||||
|
||||
/* sqlo - get seqlock position -- for the "counter" seqlock */
|
||||
extern seqlock_val_t seqlock_cur(struct seqlock *sqlo);
|
||||
/* sqlo++ - note: like x++, returns previous value, before bumping */
|
||||
extern seqlock_val_t seqlock_bump(struct seqlock *sqlo);
|
||||
|
||||
|
||||
/* sqlo = val - can be used on held seqlock. */
|
||||
extern void seqlock_acquire_val(struct seqlock *sqlo, seqlock_val_t val);
|
||||
/* sqlo = ref - standard pattern: acquire relative to other seqlock */
|
||||
static inline void seqlock_acquire(struct seqlock *sqlo, struct seqlock *ref)
|
||||
{
|
||||
seqlock_acquire_val(sqlo, seqlock_cur(ref));
|
||||
}
|
||||
|
||||
/* sqlo = 0 - set seqlock position to 0, marking as non-held */
|
||||
extern void seqlock_release(struct seqlock *sqlo);
|
||||
/* release should normally be followed by a bump on the "counter", if
|
||||
* anything other than reading RCU items was done
|
||||
*/
|
||||
|
||||
#endif /* _SEQLOCK_H */
|
@ -472,7 +472,7 @@ unsigned int sockunion_hash(const union sockunion *su)
|
||||
return jhash_1word(su->sin.sin_addr.s_addr, 0);
|
||||
case AF_INET6:
|
||||
return jhash2(su->sin6.sin6_addr.s6_addr32,
|
||||
ZEBRA_NUM_OF(su->sin6.sin6_addr.s6_addr32), 0);
|
||||
array_size(su->sin6.sin6_addr.s6_addr32), 0);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ lib_libfrr_la_LIBADD = $(LIBCAP) $(UNWIND_LIBS) $(LIBYANG_LIBS)
|
||||
|
||||
lib_libfrr_la_SOURCES = \
|
||||
lib/agg_table.c \
|
||||
lib/atomlist.c \
|
||||
lib/bfd.c \
|
||||
lib/buffer.c \
|
||||
lib/checksum.c \
|
||||
@ -65,6 +66,7 @@ lib_libfrr_la_SOURCES = \
|
||||
lib/ringbuf.c \
|
||||
lib/routemap.c \
|
||||
lib/sbuf.c \
|
||||
lib/seqlock.c \
|
||||
lib/sha256.c \
|
||||
lib/sigevent.c \
|
||||
lib/skiplist.c \
|
||||
@ -79,6 +81,8 @@ lib_libfrr_la_SOURCES = \
|
||||
lib/table.c \
|
||||
lib/termtable.c \
|
||||
lib/thread.c \
|
||||
lib/typerb.c \
|
||||
lib/typesafe.c \
|
||||
lib/vector.c \
|
||||
lib/vrf.c \
|
||||
lib/vty.c \
|
||||
@ -130,6 +134,7 @@ lib/northbound_cli.lo: lib/northbound_cli_clippy.c
|
||||
|
||||
pkginclude_HEADERS += \
|
||||
lib/agg_table.h \
|
||||
lib/atomlist.h \
|
||||
lib/bfd.h \
|
||||
lib/bitfield.h \
|
||||
lib/buffer.h \
|
||||
@ -144,7 +149,6 @@ pkginclude_HEADERS += \
|
||||
lib/debug.h \
|
||||
lib/distribute.h \
|
||||
lib/ferr.h \
|
||||
lib/fifo.h \
|
||||
lib/filter.h \
|
||||
lib/freebsd-queue.h \
|
||||
lib/frr_pthread.h \
|
||||
@ -193,6 +197,7 @@ pkginclude_HEADERS += \
|
||||
lib/ringbuf.h \
|
||||
lib/routemap.h \
|
||||
lib/sbuf.h \
|
||||
lib/seqlock.h \
|
||||
lib/sha256.h \
|
||||
lib/sigevent.h \
|
||||
lib/skiplist.h \
|
||||
@ -206,6 +211,8 @@ pkginclude_HEADERS += \
|
||||
lib/table.h \
|
||||
lib/termtable.h \
|
||||
lib/thread.h \
|
||||
lib/typerb.h \
|
||||
lib/typesafe.h \
|
||||
lib/vector.h \
|
||||
lib/vlan.h \
|
||||
lib/vrf.h \
|
||||
|
131
lib/thread.c
131
lib/thread.c
@ -40,6 +40,8 @@ DEFINE_MTYPE_STATIC(LIB, THREAD_MASTER, "Thread master")
|
||||
DEFINE_MTYPE_STATIC(LIB, THREAD_POLL, "Thread Poll Info")
|
||||
DEFINE_MTYPE_STATIC(LIB, THREAD_STATS, "Thread stats")
|
||||
|
||||
DECLARE_LIST(thread_list, struct thread, threaditem)
|
||||
|
||||
#if defined(__APPLE__)
|
||||
#include <mach/mach.h>
|
||||
#include <mach/mach_time.h>
|
||||
@ -435,6 +437,9 @@ struct thread_master *thread_master_create(const char *name)
|
||||
(bool (*)(const void *, const void *))cpu_record_hash_cmp,
|
||||
"Thread Hash");
|
||||
|
||||
thread_list_init(&rv->event);
|
||||
thread_list_init(&rv->ready);
|
||||
thread_list_init(&rv->unuse);
|
||||
|
||||
/* Initialize the timer queues */
|
||||
rv->timer = pqueue_create();
|
||||
@ -487,50 +492,6 @@ void thread_master_set_name(struct thread_master *master, const char *name)
|
||||
pthread_mutex_unlock(&master->mtx);
|
||||
}
|
||||
|
||||
/* Add a new thread to the list. */
|
||||
static void thread_list_add(struct thread_list *list, struct thread *thread)
|
||||
{
|
||||
thread->next = NULL;
|
||||
thread->prev = list->tail;
|
||||
if (list->tail)
|
||||
list->tail->next = thread;
|
||||
else
|
||||
list->head = thread;
|
||||
list->tail = thread;
|
||||
list->count++;
|
||||
}
|
||||
|
||||
/* Delete a thread from the list. */
|
||||
static struct thread *thread_list_delete(struct thread_list *list,
|
||||
struct thread *thread)
|
||||
{
|
||||
if (thread->next)
|
||||
thread->next->prev = thread->prev;
|
||||
else
|
||||
list->tail = thread->prev;
|
||||
if (thread->prev)
|
||||
thread->prev->next = thread->next;
|
||||
else
|
||||
list->head = thread->next;
|
||||
thread->next = thread->prev = NULL;
|
||||
list->count--;
|
||||
return thread;
|
||||
}
|
||||
|
||||
/* Thread list is empty or not. */
|
||||
static int thread_empty(struct thread_list *list)
|
||||
{
|
||||
return list->head ? 0 : 1;
|
||||
}
|
||||
|
||||
/* Delete top of the list and return it. */
|
||||
static struct thread *thread_trim_head(struct thread_list *list)
|
||||
{
|
||||
if (!thread_empty(list))
|
||||
return thread_list_delete(list, list->head);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define THREAD_UNUSED_DEPTH 10
|
||||
|
||||
/* Move thread to unuse list. */
|
||||
@ -539,8 +500,6 @@ static void thread_add_unuse(struct thread_master *m, struct thread *thread)
|
||||
pthread_mutex_t mtxc = thread->mtx;
|
||||
|
||||
assert(m != NULL && thread != NULL);
|
||||
assert(thread->next == NULL);
|
||||
assert(thread->prev == NULL);
|
||||
|
||||
thread->hist->total_active--;
|
||||
memset(thread, 0, sizeof(struct thread));
|
||||
@ -549,8 +508,8 @@ static void thread_add_unuse(struct thread_master *m, struct thread *thread)
|
||||
/* Restore the thread mutex context. */
|
||||
thread->mtx = mtxc;
|
||||
|
||||
if (m->unuse.count < THREAD_UNUSED_DEPTH) {
|
||||
thread_list_add(&m->unuse, thread);
|
||||
if (thread_list_count(&m->unuse) < THREAD_UNUSED_DEPTH) {
|
||||
thread_list_add_tail(&m->unuse, thread);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -558,16 +517,13 @@ static void thread_add_unuse(struct thread_master *m, struct thread *thread)
|
||||
}
|
||||
|
||||
/* Free all unused thread. */
|
||||
static void thread_list_free(struct thread_master *m, struct thread_list *list)
|
||||
static void thread_list_free(struct thread_master *m,
|
||||
struct thread_list_head *list)
|
||||
{
|
||||
struct thread *t;
|
||||
struct thread *next;
|
||||
|
||||
for (t = list->head; t; t = next) {
|
||||
next = t->next;
|
||||
while ((t = thread_list_pop(list)))
|
||||
thread_free(m, t);
|
||||
list->count--;
|
||||
}
|
||||
}
|
||||
|
||||
static void thread_array_free(struct thread_master *m,
|
||||
@ -609,9 +565,8 @@ void thread_master_free_unused(struct thread_master *m)
|
||||
pthread_mutex_lock(&m->mtx);
|
||||
{
|
||||
struct thread *t;
|
||||
while ((t = thread_trim_head(&m->unuse)) != NULL) {
|
||||
while ((t = thread_list_pop(&m->unuse)))
|
||||
thread_free(m, t);
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&m->mtx);
|
||||
}
|
||||
@ -690,7 +645,7 @@ static struct thread *thread_get(struct thread_master *m, uint8_t type,
|
||||
int (*func)(struct thread *), void *arg,
|
||||
debugargdef)
|
||||
{
|
||||
struct thread *thread = thread_trim_head(&m->unuse);
|
||||
struct thread *thread = thread_list_pop(&m->unuse);
|
||||
struct cpu_thread_history tmp;
|
||||
|
||||
if (!thread) {
|
||||
@ -971,7 +926,7 @@ struct thread *funcname_thread_add_event(struct thread_master *m,
|
||||
pthread_mutex_lock(&thread->mtx);
|
||||
{
|
||||
thread->u.val = val;
|
||||
thread_list_add(&m->event, thread);
|
||||
thread_list_add_tail(&m->event, thread);
|
||||
}
|
||||
pthread_mutex_unlock(&thread->mtx);
|
||||
|
||||
@ -1063,7 +1018,7 @@ static void thread_cancel_rw(struct thread_master *master, int fd, short state)
|
||||
*/
|
||||
static void do_thread_cancel(struct thread_master *master)
|
||||
{
|
||||
struct thread_list *list = NULL;
|
||||
struct thread_list_head *list = NULL;
|
||||
struct pqueue *queue = NULL;
|
||||
struct thread **thread_array = NULL;
|
||||
struct thread *thread;
|
||||
@ -1078,31 +1033,23 @@ static void do_thread_cancel(struct thread_master *master)
|
||||
* need to check every thread in the ready queue. */
|
||||
if (cr->eventobj) {
|
||||
struct thread *t;
|
||||
thread = master->event.head;
|
||||
|
||||
while (thread) {
|
||||
t = thread;
|
||||
thread = t->next;
|
||||
|
||||
if (t->arg == cr->eventobj) {
|
||||
thread_list_delete(&master->event, t);
|
||||
if (t->ref)
|
||||
*t->ref = NULL;
|
||||
thread_add_unuse(master, t);
|
||||
}
|
||||
for_each_safe(thread_list, &master->event, t) {
|
||||
if (t->arg != cr->eventobj)
|
||||
continue;
|
||||
thread_list_del(&master->event, t);
|
||||
if (t->ref)
|
||||
*t->ref = NULL;
|
||||
thread_add_unuse(master, t);
|
||||
}
|
||||
|
||||
thread = master->ready.head;
|
||||
while (thread) {
|
||||
t = thread;
|
||||
thread = t->next;
|
||||
|
||||
if (t->arg == cr->eventobj) {
|
||||
thread_list_delete(&master->ready, t);
|
||||
if (t->ref)
|
||||
*t->ref = NULL;
|
||||
thread_add_unuse(master, t);
|
||||
}
|
||||
for_each_safe(thread_list, &master->ready, t) {
|
||||
if (t->arg != cr->eventobj)
|
||||
continue;
|
||||
thread_list_del(&master->ready, t);
|
||||
if (t->ref)
|
||||
*t->ref = NULL;
|
||||
thread_add_unuse(master, t);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
@ -1146,7 +1093,7 @@ static void do_thread_cancel(struct thread_master *master)
|
||||
assert(thread == queue->array[thread->index]);
|
||||
pqueue_remove_at(thread->index, queue);
|
||||
} else if (list) {
|
||||
thread_list_delete(list, thread);
|
||||
thread_list_del(list, thread);
|
||||
} else if (thread_array) {
|
||||
thread_array[thread->u.fd] = NULL;
|
||||
} else {
|
||||
@ -1301,7 +1248,7 @@ static int thread_process_io_helper(struct thread_master *m,
|
||||
thread_array = m->write;
|
||||
|
||||
thread_array[thread->u.fd] = NULL;
|
||||
thread_list_add(&m->ready, thread);
|
||||
thread_list_add_tail(&m->ready, thread);
|
||||
thread->type = THREAD_READY;
|
||||
/* if another pthread scheduled this file descriptor for the event we're
|
||||
* responding to, no problem; we're getting to it now */
|
||||
@ -1380,24 +1327,21 @@ static unsigned int thread_process_timers(struct pqueue *queue,
|
||||
return ready;
|
||||
pqueue_dequeue(queue);
|
||||
thread->type = THREAD_READY;
|
||||
thread_list_add(&thread->master->ready, thread);
|
||||
thread_list_add_tail(&thread->master->ready, thread);
|
||||
ready++;
|
||||
}
|
||||
return ready;
|
||||
}
|
||||
|
||||
/* process a list en masse, e.g. for event thread lists */
|
||||
static unsigned int thread_process(struct thread_list *list)
|
||||
static unsigned int thread_process(struct thread_list_head *list)
|
||||
{
|
||||
struct thread *thread;
|
||||
struct thread *next;
|
||||
unsigned int ready = 0;
|
||||
|
||||
for (thread = list->head; thread; thread = next) {
|
||||
next = thread->next;
|
||||
thread_list_delete(list, thread);
|
||||
while ((thread = thread_list_pop(list))) {
|
||||
thread->type = THREAD_READY;
|
||||
thread_list_add(&thread->master->ready, thread);
|
||||
thread_list_add_tail(&thread->master->ready, thread);
|
||||
ready++;
|
||||
}
|
||||
return ready;
|
||||
@ -1429,7 +1373,7 @@ struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
|
||||
* Attempt to flush ready queue before going into poll().
|
||||
* This is performance-critical. Think twice before modifying.
|
||||
*/
|
||||
if ((thread = thread_trim_head(&m->ready))) {
|
||||
if ((thread = thread_list_pop(&m->ready))) {
|
||||
fetch = thread_run(m, thread, fetch);
|
||||
if (fetch->ref)
|
||||
*fetch->ref = NULL;
|
||||
@ -1462,10 +1406,11 @@ struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
|
||||
* In every case except the last, we need to hit poll() at least
|
||||
* once per loop to avoid starvation by events
|
||||
*/
|
||||
if (m->ready.count == 0)
|
||||
if (!thread_list_count(&m->ready))
|
||||
tw = thread_timer_wait(m->timer, &tv);
|
||||
|
||||
if (m->ready.count != 0 || (tw && !timercmp(tw, &zerotime, >)))
|
||||
if (thread_list_count(&m->ready) ||
|
||||
(tw && !timercmp(tw, &zerotime, >)))
|
||||
tw = &zerotime;
|
||||
|
||||
if (!tw && m->handler.pfdcount == 0) { /* die */
|
||||
|
15
lib/thread.h
15
lib/thread.h
@ -26,6 +26,7 @@
|
||||
#include <poll.h>
|
||||
#include "monotime.h"
|
||||
#include "frratomic.h"
|
||||
#include "typesafe.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@ -39,12 +40,7 @@ struct rusage_t {
|
||||
|
||||
#define GETRUSAGE(X) thread_getrusage(X)
|
||||
|
||||
/* Linked list of thread. */
|
||||
struct thread_list {
|
||||
struct thread *head;
|
||||
struct thread *tail;
|
||||
int count;
|
||||
};
|
||||
PREDECL_LIST(thread_list)
|
||||
|
||||
struct pqueue;
|
||||
|
||||
@ -78,9 +74,7 @@ struct thread_master {
|
||||
struct thread **read;
|
||||
struct thread **write;
|
||||
struct pqueue *timer;
|
||||
struct thread_list event;
|
||||
struct thread_list ready;
|
||||
struct thread_list unuse;
|
||||
struct thread_list_head event, ready, unuse;
|
||||
struct list *cancel_req;
|
||||
bool canceled;
|
||||
pthread_cond_t cancel_cond;
|
||||
@ -100,8 +94,7 @@ struct thread_master {
|
||||
struct thread {
|
||||
uint8_t type; /* thread type */
|
||||
uint8_t add_type; /* thread type */
|
||||
struct thread *next; /* next pointer of the thread */
|
||||
struct thread *prev; /* previous pointer of the thread */
|
||||
struct thread_list_item threaditem;
|
||||
struct thread **ref; /* external reference (if given) */
|
||||
struct thread_master *master; /* pointer to the struct thread_master */
|
||||
int (*func)(struct thread *); /* event function */
|
||||
|
472
lib/typerb.c
Normal file
472
lib/typerb.c
Normal file
@ -0,0 +1,472 @@
|
||||
/* RB-tree */
|
||||
|
||||
/*
|
||||
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (c) 2016 David Gwynne <dlg@openbsd.org>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "typerb.h"
|
||||
|
||||
#define RB_BLACK 0
|
||||
#define RB_RED 1
|
||||
|
||||
#define rb_entry typed_rb_entry
|
||||
#define rbt_tree typed_rb_root
|
||||
|
||||
#define RBE_LEFT(_rbe) (_rbe)->rbt_left
|
||||
#define RBE_RIGHT(_rbe) (_rbe)->rbt_right
|
||||
#define RBE_PARENT(_rbe) (_rbe)->rbt_parent
|
||||
#define RBE_COLOR(_rbe) (_rbe)->rbt_color
|
||||
|
||||
#define RBH_ROOT(_rbt) (_rbt)->rbt_root
|
||||
|
||||
static inline void rbe_set(struct rb_entry *rbe, struct rb_entry *parent)
|
||||
{
|
||||
RBE_PARENT(rbe) = parent;
|
||||
RBE_LEFT(rbe) = RBE_RIGHT(rbe) = NULL;
|
||||
RBE_COLOR(rbe) = RB_RED;
|
||||
}
|
||||
|
||||
static inline void rbe_set_blackred(struct rb_entry *black,
|
||||
struct rb_entry *red)
|
||||
{
|
||||
RBE_COLOR(black) = RB_BLACK;
|
||||
RBE_COLOR(red) = RB_RED;
|
||||
}
|
||||
|
||||
static inline void rbe_rotate_left(struct rbt_tree *rbt, struct rb_entry *rbe)
|
||||
{
|
||||
struct rb_entry *parent;
|
||||
struct rb_entry *tmp;
|
||||
|
||||
tmp = RBE_RIGHT(rbe);
|
||||
RBE_RIGHT(rbe) = RBE_LEFT(tmp);
|
||||
if (RBE_RIGHT(rbe) != NULL)
|
||||
RBE_PARENT(RBE_LEFT(tmp)) = rbe;
|
||||
|
||||
parent = RBE_PARENT(rbe);
|
||||
RBE_PARENT(tmp) = parent;
|
||||
if (parent != NULL) {
|
||||
if (rbe == RBE_LEFT(parent))
|
||||
RBE_LEFT(parent) = tmp;
|
||||
else
|
||||
RBE_RIGHT(parent) = tmp;
|
||||
} else
|
||||
RBH_ROOT(rbt) = tmp;
|
||||
|
||||
RBE_LEFT(tmp) = rbe;
|
||||
RBE_PARENT(rbe) = tmp;
|
||||
}
|
||||
|
||||
static inline void rbe_rotate_right(struct rbt_tree *rbt, struct rb_entry *rbe)
|
||||
{
|
||||
struct rb_entry *parent;
|
||||
struct rb_entry *tmp;
|
||||
|
||||
tmp = RBE_LEFT(rbe);
|
||||
RBE_LEFT(rbe) = RBE_RIGHT(tmp);
|
||||
if (RBE_LEFT(rbe) != NULL)
|
||||
RBE_PARENT(RBE_RIGHT(tmp)) = rbe;
|
||||
|
||||
parent = RBE_PARENT(rbe);
|
||||
RBE_PARENT(tmp) = parent;
|
||||
if (parent != NULL) {
|
||||
if (rbe == RBE_LEFT(parent))
|
||||
RBE_LEFT(parent) = tmp;
|
||||
else
|
||||
RBE_RIGHT(parent) = tmp;
|
||||
} else
|
||||
RBH_ROOT(rbt) = tmp;
|
||||
|
||||
RBE_RIGHT(tmp) = rbe;
|
||||
RBE_PARENT(rbe) = tmp;
|
||||
}
|
||||
|
||||
static inline void rbe_insert_color(struct rbt_tree *rbt, struct rb_entry *rbe)
|
||||
{
|
||||
struct rb_entry *parent, *gparent, *tmp;
|
||||
|
||||
rbt->count++;
|
||||
|
||||
while ((parent = RBE_PARENT(rbe)) != NULL
|
||||
&& RBE_COLOR(parent) == RB_RED) {
|
||||
gparent = RBE_PARENT(parent);
|
||||
|
||||
if (parent == RBE_LEFT(gparent)) {
|
||||
tmp = RBE_RIGHT(gparent);
|
||||
if (tmp != NULL && RBE_COLOR(tmp) == RB_RED) {
|
||||
RBE_COLOR(tmp) = RB_BLACK;
|
||||
rbe_set_blackred(parent, gparent);
|
||||
rbe = gparent;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (RBE_RIGHT(parent) == rbe) {
|
||||
rbe_rotate_left(rbt, parent);
|
||||
tmp = parent;
|
||||
parent = rbe;
|
||||
rbe = tmp;
|
||||
}
|
||||
|
||||
rbe_set_blackred(parent, gparent);
|
||||
rbe_rotate_right(rbt, gparent);
|
||||
} else {
|
||||
tmp = RBE_LEFT(gparent);
|
||||
if (tmp != NULL && RBE_COLOR(tmp) == RB_RED) {
|
||||
RBE_COLOR(tmp) = RB_BLACK;
|
||||
rbe_set_blackred(parent, gparent);
|
||||
rbe = gparent;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (RBE_LEFT(parent) == rbe) {
|
||||
rbe_rotate_right(rbt, parent);
|
||||
tmp = parent;
|
||||
parent = rbe;
|
||||
rbe = tmp;
|
||||
}
|
||||
|
||||
rbe_set_blackred(parent, gparent);
|
||||
rbe_rotate_left(rbt, gparent);
|
||||
}
|
||||
}
|
||||
|
||||
RBE_COLOR(RBH_ROOT(rbt)) = RB_BLACK;
|
||||
}
|
||||
|
||||
static inline void rbe_remove_color(struct rbt_tree *rbt,
|
||||
struct rb_entry *parent,
|
||||
struct rb_entry *rbe)
|
||||
{
|
||||
struct rb_entry *tmp;
|
||||
|
||||
while ((rbe == NULL || RBE_COLOR(rbe) == RB_BLACK)
|
||||
&& rbe != RBH_ROOT(rbt) && parent) {
|
||||
if (RBE_LEFT(parent) == rbe) {
|
||||
tmp = RBE_RIGHT(parent);
|
||||
if (RBE_COLOR(tmp) == RB_RED) {
|
||||
rbe_set_blackred(tmp, parent);
|
||||
rbe_rotate_left(rbt, parent);
|
||||
tmp = RBE_RIGHT(parent);
|
||||
}
|
||||
if ((RBE_LEFT(tmp) == NULL
|
||||
|| RBE_COLOR(RBE_LEFT(tmp)) == RB_BLACK)
|
||||
&& (RBE_RIGHT(tmp) == NULL
|
||||
|| RBE_COLOR(RBE_RIGHT(tmp)) == RB_BLACK)) {
|
||||
RBE_COLOR(tmp) = RB_RED;
|
||||
rbe = parent;
|
||||
parent = RBE_PARENT(rbe);
|
||||
} else {
|
||||
if (RBE_RIGHT(tmp) == NULL
|
||||
|| RBE_COLOR(RBE_RIGHT(tmp)) == RB_BLACK) {
|
||||
struct rb_entry *oleft;
|
||||
|
||||
oleft = RBE_LEFT(tmp);
|
||||
if (oleft != NULL)
|
||||
RBE_COLOR(oleft) = RB_BLACK;
|
||||
|
||||
RBE_COLOR(tmp) = RB_RED;
|
||||
rbe_rotate_right(rbt, tmp);
|
||||
tmp = RBE_RIGHT(parent);
|
||||
}
|
||||
|
||||
RBE_COLOR(tmp) = RBE_COLOR(parent);
|
||||
RBE_COLOR(parent) = RB_BLACK;
|
||||
if (RBE_RIGHT(tmp))
|
||||
RBE_COLOR(RBE_RIGHT(tmp)) = RB_BLACK;
|
||||
|
||||
rbe_rotate_left(rbt, parent);
|
||||
rbe = RBH_ROOT(rbt);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
tmp = RBE_LEFT(parent);
|
||||
if (RBE_COLOR(tmp) == RB_RED) {
|
||||
rbe_set_blackred(tmp, parent);
|
||||
rbe_rotate_right(rbt, parent);
|
||||
tmp = RBE_LEFT(parent);
|
||||
}
|
||||
|
||||
if ((RBE_LEFT(tmp) == NULL
|
||||
|| RBE_COLOR(RBE_LEFT(tmp)) == RB_BLACK)
|
||||
&& (RBE_RIGHT(tmp) == NULL
|
||||
|| RBE_COLOR(RBE_RIGHT(tmp)) == RB_BLACK)) {
|
||||
RBE_COLOR(tmp) = RB_RED;
|
||||
rbe = parent;
|
||||
parent = RBE_PARENT(rbe);
|
||||
} else {
|
||||
if (RBE_LEFT(tmp) == NULL
|
||||
|| RBE_COLOR(RBE_LEFT(tmp)) == RB_BLACK) {
|
||||
struct rb_entry *oright;
|
||||
|
||||
oright = RBE_RIGHT(tmp);
|
||||
if (oright != NULL)
|
||||
RBE_COLOR(oright) = RB_BLACK;
|
||||
|
||||
RBE_COLOR(tmp) = RB_RED;
|
||||
rbe_rotate_left(rbt, tmp);
|
||||
tmp = RBE_LEFT(parent);
|
||||
}
|
||||
|
||||
RBE_COLOR(tmp) = RBE_COLOR(parent);
|
||||
RBE_COLOR(parent) = RB_BLACK;
|
||||
if (RBE_LEFT(tmp) != NULL)
|
||||
RBE_COLOR(RBE_LEFT(tmp)) = RB_BLACK;
|
||||
|
||||
rbe_rotate_right(rbt, parent);
|
||||
rbe = RBH_ROOT(rbt);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (rbe != NULL)
|
||||
RBE_COLOR(rbe) = RB_BLACK;
|
||||
}
|
||||
|
||||
static inline struct rb_entry *
|
||||
rbe_remove(struct rbt_tree *rbt, struct rb_entry *rbe)
|
||||
{
|
||||
struct rb_entry *child, *parent, *old = rbe;
|
||||
unsigned int color;
|
||||
|
||||
if (RBE_LEFT(rbe) == NULL)
|
||||
child = RBE_RIGHT(rbe);
|
||||
else if (RBE_RIGHT(rbe) == NULL)
|
||||
child = RBE_LEFT(rbe);
|
||||
else {
|
||||
struct rb_entry *tmp;
|
||||
|
||||
rbe = RBE_RIGHT(rbe);
|
||||
while ((tmp = RBE_LEFT(rbe)) != NULL)
|
||||
rbe = tmp;
|
||||
|
||||
child = RBE_RIGHT(rbe);
|
||||
parent = RBE_PARENT(rbe);
|
||||
color = RBE_COLOR(rbe);
|
||||
if (child != NULL)
|
||||
RBE_PARENT(child) = parent;
|
||||
if (parent != NULL) {
|
||||
if (RBE_LEFT(parent) == rbe)
|
||||
RBE_LEFT(parent) = child;
|
||||
else
|
||||
RBE_RIGHT(parent) = child;
|
||||
} else
|
||||
RBH_ROOT(rbt) = child;
|
||||
if (RBE_PARENT(rbe) == old)
|
||||
parent = rbe;
|
||||
*rbe = *old;
|
||||
|
||||
tmp = RBE_PARENT(old);
|
||||
if (tmp != NULL) {
|
||||
if (RBE_LEFT(tmp) == old)
|
||||
RBE_LEFT(tmp) = rbe;
|
||||
else
|
||||
RBE_RIGHT(tmp) = rbe;
|
||||
} else
|
||||
RBH_ROOT(rbt) = rbe;
|
||||
|
||||
RBE_PARENT(RBE_LEFT(old)) = rbe;
|
||||
if (RBE_RIGHT(old))
|
||||
RBE_PARENT(RBE_RIGHT(old)) = rbe;
|
||||
|
||||
goto color;
|
||||
}
|
||||
|
||||
parent = RBE_PARENT(rbe);
|
||||
color = RBE_COLOR(rbe);
|
||||
|
||||
if (child != NULL)
|
||||
RBE_PARENT(child) = parent;
|
||||
if (parent != NULL) {
|
||||
if (RBE_LEFT(parent) == rbe)
|
||||
RBE_LEFT(parent) = child;
|
||||
else
|
||||
RBE_RIGHT(parent) = child;
|
||||
} else
|
||||
RBH_ROOT(rbt) = child;
|
||||
color:
|
||||
if (color == RB_BLACK)
|
||||
rbe_remove_color(rbt, parent, child);
|
||||
|
||||
rbt->count--;
|
||||
return (old);
|
||||
}
|
||||
|
||||
void typed_rb_remove(struct rbt_tree *rbt, struct rb_entry *rbe)
|
||||
{
|
||||
rbe_remove(rbt, rbe);
|
||||
}
|
||||
|
||||
struct typed_rb_entry *typed_rb_insert(struct rbt_tree *rbt,
|
||||
struct rb_entry *rbe, int (*cmpfn)(
|
||||
const struct typed_rb_entry *a,
|
||||
const struct typed_rb_entry *b))
|
||||
{
|
||||
struct rb_entry *tmp;
|
||||
struct rb_entry *parent = NULL;
|
||||
int comp = 0;
|
||||
|
||||
tmp = RBH_ROOT(rbt);
|
||||
while (tmp != NULL) {
|
||||
parent = tmp;
|
||||
|
||||
comp = cmpfn(rbe, tmp);
|
||||
if (comp < 0)
|
||||
tmp = RBE_LEFT(tmp);
|
||||
else if (comp > 0)
|
||||
tmp = RBE_RIGHT(tmp);
|
||||
else
|
||||
return tmp;
|
||||
}
|
||||
|
||||
rbe_set(rbe, parent);
|
||||
|
||||
if (parent != NULL) {
|
||||
if (comp < 0)
|
||||
RBE_LEFT(parent) = rbe;
|
||||
else
|
||||
RBE_RIGHT(parent) = rbe;
|
||||
} else
|
||||
RBH_ROOT(rbt) = rbe;
|
||||
|
||||
rbe_insert_color(rbt, rbe);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Finds the node with the same key as elm */
|
||||
struct rb_entry *typed_rb_find(struct rbt_tree *rbt, const struct rb_entry *key,
|
||||
int (*cmpfn)(
|
||||
const struct typed_rb_entry *a,
|
||||
const struct typed_rb_entry *b))
|
||||
{
|
||||
struct rb_entry *tmp = RBH_ROOT(rbt);
|
||||
int comp;
|
||||
|
||||
while (tmp != NULL) {
|
||||
comp = cmpfn(key, tmp);
|
||||
if (comp < 0)
|
||||
tmp = RBE_LEFT(tmp);
|
||||
else if (comp > 0)
|
||||
tmp = RBE_RIGHT(tmp);
|
||||
else
|
||||
return tmp;
|
||||
}
|
||||
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
struct rb_entry *typed_rb_find_gteq(struct rbt_tree *rbt,
|
||||
const struct rb_entry *key,
|
||||
int (*cmpfn)(
|
||||
const struct typed_rb_entry *a,
|
||||
const struct typed_rb_entry *b))
|
||||
{
|
||||
struct rb_entry *tmp = RBH_ROOT(rbt), *best = NULL;
|
||||
int comp;
|
||||
|
||||
while (tmp != NULL) {
|
||||
comp = cmpfn(key, tmp);
|
||||
if (comp < 0) {
|
||||
best = tmp;
|
||||
tmp = RBE_LEFT(tmp);
|
||||
} else if (comp > 0)
|
||||
tmp = RBE_RIGHT(tmp);
|
||||
else
|
||||
return tmp;
|
||||
}
|
||||
|
||||
return best;
|
||||
}
|
||||
|
||||
struct rb_entry *typed_rb_find_lt(struct rbt_tree *rbt,
|
||||
const struct rb_entry *key,
|
||||
int (*cmpfn)(
|
||||
const struct typed_rb_entry *a,
|
||||
const struct typed_rb_entry *b))
|
||||
{
|
||||
struct rb_entry *tmp = RBH_ROOT(rbt), *best = NULL;
|
||||
int comp;
|
||||
|
||||
while (tmp != NULL) {
|
||||
comp = cmpfn(key, tmp);
|
||||
if (comp <= 0)
|
||||
tmp = RBE_LEFT(tmp);
|
||||
else {
|
||||
best = tmp;
|
||||
tmp = RBE_RIGHT(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
return best;
|
||||
}
|
||||
|
||||
struct rb_entry *typed_rb_next(struct rb_entry *rbe)
|
||||
{
|
||||
if (RBE_RIGHT(rbe) != NULL) {
|
||||
rbe = RBE_RIGHT(rbe);
|
||||
while (RBE_LEFT(rbe) != NULL)
|
||||
rbe = RBE_LEFT(rbe);
|
||||
} else {
|
||||
if (RBE_PARENT(rbe) && (rbe == RBE_LEFT(RBE_PARENT(rbe))))
|
||||
rbe = RBE_PARENT(rbe);
|
||||
else {
|
||||
while (RBE_PARENT(rbe)
|
||||
&& (rbe == RBE_RIGHT(RBE_PARENT(rbe))))
|
||||
rbe = RBE_PARENT(rbe);
|
||||
rbe = RBE_PARENT(rbe);
|
||||
}
|
||||
}
|
||||
|
||||
return rbe;
|
||||
}
|
||||
|
||||
struct rb_entry *typed_rb_min(struct rbt_tree *rbt)
|
||||
{
|
||||
struct rb_entry *rbe = RBH_ROOT(rbt);
|
||||
struct rb_entry *parent = NULL;
|
||||
|
||||
while (rbe != NULL) {
|
||||
parent = rbe;
|
||||
rbe = RBE_LEFT(rbe);
|
||||
}
|
||||
|
||||
return parent;
|
||||
}
|
182
lib/typerb.h
Normal file
182
lib/typerb.h
Normal file
@ -0,0 +1,182 @@
|
||||
/*
|
||||
* The following Red-Black tree implementation is based off code with
|
||||
* original copyright:
|
||||
*
|
||||
* Copyright (c) 2016 David Gwynne <dlg@openbsd.org>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _FRR_TYPERB_H
|
||||
#define _FRR_TYPERB_H
|
||||
|
||||
#include "typesafe.h"
|
||||
|
||||
struct typed_rb_entry {
|
||||
struct typed_rb_entry *rbt_parent;
|
||||
struct typed_rb_entry *rbt_left;
|
||||
struct typed_rb_entry *rbt_right;
|
||||
unsigned int rbt_color;
|
||||
};
|
||||
|
||||
struct typed_rb_root {
|
||||
struct typed_rb_entry *rbt_root;
|
||||
size_t count;
|
||||
};
|
||||
|
||||
struct typed_rb_entry *typed_rb_insert(struct typed_rb_root *,
|
||||
struct typed_rb_entry *rbe,
|
||||
int (*cmpfn)(
|
||||
const struct typed_rb_entry *a,
|
||||
const struct typed_rb_entry *b));
|
||||
void typed_rb_remove(struct typed_rb_root *, struct typed_rb_entry *rbe);
|
||||
struct typed_rb_entry *typed_rb_find(struct typed_rb_root *,
|
||||
const struct typed_rb_entry *rbe,
|
||||
int (*cmpfn)(
|
||||
const struct typed_rb_entry *a,
|
||||
const struct typed_rb_entry *b));
|
||||
struct typed_rb_entry *typed_rb_find_gteq(struct typed_rb_root *,
|
||||
const struct typed_rb_entry *rbe,
|
||||
int (*cmpfn)(
|
||||
const struct typed_rb_entry *a,
|
||||
const struct typed_rb_entry *b));
|
||||
struct typed_rb_entry *typed_rb_find_lt(struct typed_rb_root *,
|
||||
const struct typed_rb_entry *rbe,
|
||||
int (*cmpfn)(
|
||||
const struct typed_rb_entry *a,
|
||||
const struct typed_rb_entry *b));
|
||||
struct typed_rb_entry *typed_rb_min(struct typed_rb_root *);
|
||||
struct typed_rb_entry *typed_rb_next(struct typed_rb_entry *);
|
||||
|
||||
#define _PREDECL_RBTREE(prefix) \
|
||||
struct prefix ## _head { struct typed_rb_root rr; }; \
|
||||
struct prefix ## _item { struct typed_rb_entry re; };
|
||||
|
||||
#define INIT_RBTREE_UNIQ(var) { }
|
||||
#define INIT_RBTREE_NONUNIQ(var) { }
|
||||
|
||||
#define _DECLARE_RBTREE(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
|
||||
\
|
||||
macro_inline void prefix ## _init(struct prefix##_head *h) \
|
||||
{ \
|
||||
memset(h, 0, sizeof(*h)); \
|
||||
} \
|
||||
macro_inline void prefix ## _fini(struct prefix##_head *h) \
|
||||
{ \
|
||||
memset(h, 0, sizeof(*h)); \
|
||||
} \
|
||||
macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
struct typed_rb_entry *re; \
|
||||
re = typed_rb_insert(&h->rr, &item->field.re, cmpfn_uq); \
|
||||
return container_of_null(re, type, field.re); \
|
||||
} \
|
||||
macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
|
||||
const type *item) \
|
||||
{ \
|
||||
struct typed_rb_entry *re; \
|
||||
re = typed_rb_find_gteq(&h->rr, &item->field.re, cmpfn_nuq); \
|
||||
return container_of_null(re, type, field.re); \
|
||||
} \
|
||||
macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
|
||||
const type *item) \
|
||||
{ \
|
||||
struct typed_rb_entry *re; \
|
||||
re = typed_rb_find_lt(&h->rr, &item->field.re, cmpfn_nuq); \
|
||||
return container_of_null(re, type, field.re); \
|
||||
} \
|
||||
macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
typed_rb_remove(&h->rr, &item->field.re); \
|
||||
} \
|
||||
macro_inline type *prefix ## _pop(struct prefix##_head *h) \
|
||||
{ \
|
||||
struct typed_rb_entry *re; \
|
||||
re = typed_rb_min(&h->rr); \
|
||||
if (!re) \
|
||||
return NULL; \
|
||||
typed_rb_remove(&h->rr, re); \
|
||||
return container_of(re, type, field.re); \
|
||||
} \
|
||||
macro_pure type *prefix ## _first(struct prefix##_head *h) \
|
||||
{ \
|
||||
struct typed_rb_entry *re; \
|
||||
re = typed_rb_min(&h->rr); \
|
||||
return container_of_null(re, type, field.re); \
|
||||
} \
|
||||
macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
struct typed_rb_entry *re; \
|
||||
re = typed_rb_next(&item->field.re); \
|
||||
return container_of_null(re, type, field.re); \
|
||||
} \
|
||||
macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
struct typed_rb_entry *re; \
|
||||
re = item ? typed_rb_next(&item->field.re) : NULL; \
|
||||
return container_of_null(re, type, field.re); \
|
||||
} \
|
||||
macro_pure size_t prefix ## _count(struct prefix##_head *h) \
|
||||
{ \
|
||||
return h->rr.count; \
|
||||
} \
|
||||
/* ... */
|
||||
|
||||
#define PREDECL_RBTREE_UNIQ(prefix) \
|
||||
_PREDECL_RBTREE(prefix)
|
||||
#define DECLARE_RBTREE_UNIQ(prefix, type, field, cmpfn) \
|
||||
\
|
||||
macro_inline int prefix ## __cmp(const struct typed_rb_entry *a, \
|
||||
const struct typed_rb_entry *b) \
|
||||
{ \
|
||||
return cmpfn(container_of(a, type, field.re), \
|
||||
container_of(b, type, field.re)); \
|
||||
} \
|
||||
macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
|
||||
{ \
|
||||
struct typed_rb_entry *re; \
|
||||
re = typed_rb_find(&h->rr, &item->field.re, &prefix ## __cmp); \
|
||||
return container_of_null(re, type, field.re); \
|
||||
} \
|
||||
\
|
||||
_DECLARE_RBTREE(prefix, type, field, prefix ## __cmp, prefix ## __cmp) \
|
||||
/* ... */
|
||||
|
||||
#define PREDECL_RBTREE_NONUNIQ(prefix) \
|
||||
_PREDECL_RBTREE(prefix)
|
||||
#define DECLARE_RBTREE_NONUNIQ(prefix, type, field, cmpfn) \
|
||||
\
|
||||
macro_inline int prefix ## __cmp(const struct typed_rb_entry *a, \
|
||||
const struct typed_rb_entry *b) \
|
||||
{ \
|
||||
return cmpfn(container_of(a, type, field.re), \
|
||||
container_of(b, type, field.re)); \
|
||||
} \
|
||||
macro_inline int prefix ## __cmp_uq(const struct typed_rb_entry *a, \
|
||||
const struct typed_rb_entry *b) \
|
||||
{ \
|
||||
int cmpval = cmpfn(container_of(a, type, field.re), \
|
||||
container_of(b, type, field.re)); \
|
||||
if (cmpval) \
|
||||
return cmpval; \
|
||||
if (a < b) \
|
||||
return -1; \
|
||||
if (a > b) \
|
||||
return 1; \
|
||||
return 0; \
|
||||
} \
|
||||
\
|
||||
_DECLARE_RBTREE(prefix, type, field, prefix ## __cmp, prefix ## __cmp_uq) \
|
||||
/* ... */
|
||||
|
||||
#endif /* _FRR_TYPERB_H */
|
377
lib/typesafe.c
Normal file
377
lib/typesafe.c
Normal file
@ -0,0 +1,377 @@
|
||||
/*
|
||||
* Copyright (c) 2019 David Lamparter, for NetDEF, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "typesafe.h"
|
||||
#include "memory.h"
|
||||
|
||||
DEFINE_MTYPE_STATIC(LIB, TYPEDHASH_BUCKET, "Typed-hash bucket")
|
||||
DEFINE_MTYPE_STATIC(LIB, SKIPLIST_OFLOW, "Skiplist overflow")
|
||||
|
||||
#if 0
|
||||
static void hash_consistency_check(struct thash_head *head)
|
||||
{
|
||||
uint32_t i;
|
||||
struct thash_item *item, *prev;
|
||||
|
||||
for (i = 0; i < HASH_SIZE(*head); i++) {
|
||||
item = head->entries[i];
|
||||
prev = NULL;
|
||||
while (item) {
|
||||
assert(HASH_KEY(*head, item->hashval) == i);
|
||||
assert(!prev || item->hashval >= prev->hashval);
|
||||
prev = item;
|
||||
item = item->next;
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
#define hash_consistency_check(x)
|
||||
#endif
|
||||
|
||||
void typesafe_hash_grow(struct thash_head *head)
|
||||
{
|
||||
uint32_t newsize = head->count, i, j;
|
||||
uint8_t newshift, delta;
|
||||
|
||||
hash_consistency_check(head);
|
||||
|
||||
newsize |= newsize >> 1;
|
||||
newsize |= newsize >> 2;
|
||||
newsize |= newsize >> 4;
|
||||
newsize |= newsize >> 8;
|
||||
newsize |= newsize >> 16;
|
||||
newsize++;
|
||||
newshift = __builtin_ctz(newsize) + 1;
|
||||
|
||||
if (head->maxshift && newshift > head->maxshift)
|
||||
newshift = head->maxshift;
|
||||
if (newshift == head->tabshift)
|
||||
return;
|
||||
newsize = _HASH_SIZE(newshift);
|
||||
|
||||
head->entries = XREALLOC(MTYPE_TYPEDHASH_BUCKET, head->entries,
|
||||
sizeof(head->entries[0]) * newsize);
|
||||
memset(head->entries + HASH_SIZE(*head), 0,
|
||||
sizeof(head->entries[0]) *
|
||||
(newsize - HASH_SIZE(*head)));
|
||||
|
||||
delta = newshift - head->tabshift;
|
||||
|
||||
i = HASH_SIZE(*head);
|
||||
if (i == 0)
|
||||
goto out;
|
||||
do {
|
||||
struct thash_item **apos, *item;
|
||||
|
||||
i--;
|
||||
apos = &head->entries[i];
|
||||
|
||||
for (j = 0; j < (1U << delta); j++) {
|
||||
item = *apos;
|
||||
*apos = NULL;
|
||||
|
||||
head->entries[(i << delta) + j] = item;
|
||||
apos = &head->entries[(i << delta) + j];
|
||||
|
||||
while ((item = *apos)) {
|
||||
uint32_t midbits;
|
||||
midbits = _HASH_KEY(newshift, item->hashval);
|
||||
midbits &= (1 << delta) - 1;
|
||||
if (midbits > j)
|
||||
break;
|
||||
apos = &item->next;
|
||||
}
|
||||
}
|
||||
} while (i > 0);
|
||||
|
||||
out:
|
||||
head->tabshift = newshift;
|
||||
hash_consistency_check(head);
|
||||
}
|
||||
|
||||
void typesafe_hash_shrink(struct thash_head *head)
|
||||
{
|
||||
uint32_t newsize = head->count, i, j;
|
||||
uint8_t newshift, delta;
|
||||
|
||||
hash_consistency_check(head);
|
||||
|
||||
if (!head->count) {
|
||||
XFREE(MTYPE_TYPEDHASH_BUCKET, head->entries);
|
||||
head->tabshift = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
newsize |= newsize >> 1;
|
||||
newsize |= newsize >> 2;
|
||||
newsize |= newsize >> 4;
|
||||
newsize |= newsize >> 8;
|
||||
newsize |= newsize >> 16;
|
||||
newsize++;
|
||||
newshift = __builtin_ctz(newsize) + 1;
|
||||
|
||||
if (head->minshift && newshift < head->minshift)
|
||||
newshift = head->minshift;
|
||||
if (newshift == head->tabshift)
|
||||
return;
|
||||
newsize = _HASH_SIZE(newshift);
|
||||
|
||||
delta = head->tabshift - newshift;
|
||||
|
||||
for (i = 0; i < newsize; i++) {
|
||||
struct thash_item **apos = &head->entries[i];
|
||||
|
||||
for (j = 0; j < (1U << delta); j++) {
|
||||
*apos = head->entries[(i << delta) + j];
|
||||
while (*apos)
|
||||
apos = &(*apos)->next;
|
||||
}
|
||||
}
|
||||
head->entries = XREALLOC(MTYPE_TYPEDHASH_BUCKET, head->entries,
|
||||
sizeof(head->entries[0]) * newsize);
|
||||
head->tabshift = newshift;
|
||||
|
||||
hash_consistency_check(head);
|
||||
}
|
||||
|
||||
/* skiplist */
|
||||
|
||||
static inline struct sskip_item *sl_level_get(struct sskip_item *item,
|
||||
size_t level)
|
||||
{
|
||||
if (level < SKIPLIST_OVERFLOW)
|
||||
return item->next[level];
|
||||
if (level == SKIPLIST_OVERFLOW && !((uintptr_t)item->next[level] & 1))
|
||||
return item->next[level];
|
||||
|
||||
uintptr_t ptrval = (uintptr_t)item->next[SKIPLIST_OVERFLOW];
|
||||
ptrval &= UINTPTR_MAX - 3;
|
||||
struct sskip_overflow *oflow = (struct sskip_overflow *)ptrval;
|
||||
return oflow->next[level - SKIPLIST_OVERFLOW];
|
||||
}
|
||||
|
||||
static inline void sl_level_set(struct sskip_item *item, size_t level,
|
||||
struct sskip_item *value)
|
||||
{
|
||||
if (level < SKIPLIST_OVERFLOW)
|
||||
item->next[level] = value;
|
||||
else if (level == SKIPLIST_OVERFLOW && !((uintptr_t)item->next[level] & 1))
|
||||
item->next[level] = value;
|
||||
else {
|
||||
uintptr_t ptrval = (uintptr_t)item->next[SKIPLIST_OVERFLOW];
|
||||
ptrval &= UINTPTR_MAX - 3;
|
||||
struct sskip_overflow *oflow = (struct sskip_overflow *)ptrval;
|
||||
oflow->next[level - SKIPLIST_OVERFLOW] = value;
|
||||
}
|
||||
}
|
||||
|
||||
struct sskip_item *typesafe_skiplist_add(struct sskip_head *head,
|
||||
struct sskip_item *item,
|
||||
int (*cmpfn)(const struct sskip_item *a,
|
||||
const struct sskip_item *b))
|
||||
{
|
||||
size_t level = SKIPLIST_MAXDEPTH, newlevel, auxlevel;
|
||||
struct sskip_item *prev = &head->hitem, *next, *auxprev, *auxnext;
|
||||
int cmpval;
|
||||
|
||||
/* level / newlevel are 1-counted here */
|
||||
newlevel = __builtin_ctz(random()) + 1;
|
||||
if (newlevel > SKIPLIST_MAXDEPTH)
|
||||
newlevel = SKIPLIST_MAXDEPTH;
|
||||
|
||||
next = NULL;
|
||||
while (level >= newlevel) {
|
||||
next = sl_level_get(prev, level - 1);
|
||||
if (!next) {
|
||||
level--;
|
||||
continue;
|
||||
}
|
||||
cmpval = cmpfn(next, item);
|
||||
if (cmpval < 0) {
|
||||
prev = next;
|
||||
continue;
|
||||
} else if (cmpval == 0) {
|
||||
return next;
|
||||
}
|
||||
level--;
|
||||
}
|
||||
|
||||
/* check for duplicate item - could be removed if code doesn't rely
|
||||
* on it, but not really work the complication. */
|
||||
auxlevel = level;
|
||||
auxprev = prev;
|
||||
while (auxlevel) {
|
||||
auxlevel--;
|
||||
auxnext = sl_level_get(auxprev, auxlevel);
|
||||
cmpval = 1;
|
||||
while (auxnext && (cmpval = cmpfn(auxnext, item)) < 0) {
|
||||
auxprev = auxnext;
|
||||
auxnext = sl_level_get(auxprev, auxlevel);
|
||||
}
|
||||
if (cmpval == 0)
|
||||
return auxnext;
|
||||
};
|
||||
|
||||
head->count++;
|
||||
memset(item, 0, sizeof(*item));
|
||||
if (newlevel > SKIPLIST_EMBED) {
|
||||
struct sskip_overflow *oflow;
|
||||
oflow = XMALLOC(MTYPE_SKIPLIST_OFLOW, sizeof(void *)
|
||||
* (newlevel - SKIPLIST_OVERFLOW));
|
||||
item->next[SKIPLIST_OVERFLOW] = (struct sskip_item *)
|
||||
((uintptr_t)oflow | 1);
|
||||
}
|
||||
|
||||
sl_level_set(item, level, next);
|
||||
sl_level_set(prev, level, item);
|
||||
/* level is now 0-counted and < newlevel*/
|
||||
while (level) {
|
||||
level--;
|
||||
next = sl_level_get(prev, level);
|
||||
while (next && cmpfn(next, item) < 0) {
|
||||
prev = next;
|
||||
next = sl_level_get(prev, level);
|
||||
}
|
||||
|
||||
sl_level_set(item, level, next);
|
||||
sl_level_set(prev, level, item);
|
||||
};
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* NOTE: level counting below is 1-based since that makes the code simpler! */
|
||||
|
||||
struct sskip_item *typesafe_skiplist_find(struct sskip_head *head,
|
||||
const struct sskip_item *item, int (*cmpfn)(
|
||||
const struct sskip_item *a,
|
||||
const struct sskip_item *b))
|
||||
{
|
||||
size_t level = SKIPLIST_MAXDEPTH;
|
||||
struct sskip_item *prev = &head->hitem, *next;
|
||||
int cmpval;
|
||||
|
||||
while (level) {
|
||||
next = sl_level_get(prev, level - 1);
|
||||
if (!next) {
|
||||
level--;
|
||||
continue;
|
||||
}
|
||||
cmpval = cmpfn(next, item);
|
||||
if (cmpval < 0) {
|
||||
prev = next;
|
||||
continue;
|
||||
}
|
||||
if (cmpval == 0)
|
||||
return next;
|
||||
level--;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct sskip_item *typesafe_skiplist_find_gteq(struct sskip_head *head,
|
||||
const struct sskip_item *item, int (*cmpfn)(
|
||||
const struct sskip_item *a,
|
||||
const struct sskip_item *b))
|
||||
{
|
||||
size_t level = SKIPLIST_MAXDEPTH;
|
||||
struct sskip_item *prev = &head->hitem, *next;
|
||||
int cmpval;
|
||||
|
||||
while (level) {
|
||||
next = sl_level_get(prev, level - 1);
|
||||
if (!next) {
|
||||
level--;
|
||||
continue;
|
||||
}
|
||||
cmpval = cmpfn(next, item);
|
||||
if (cmpval < 0) {
|
||||
prev = next;
|
||||
continue;
|
||||
}
|
||||
if (cmpval == 0)
|
||||
return next;
|
||||
level--;
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
||||
struct sskip_item *typesafe_skiplist_find_lt(struct sskip_head *head,
|
||||
const struct sskip_item *item, int (*cmpfn)(
|
||||
const struct sskip_item *a,
|
||||
const struct sskip_item *b))
|
||||
{
|
||||
size_t level = SKIPLIST_MAXDEPTH;
|
||||
struct sskip_item *prev = &head->hitem, *next, *best = NULL;
|
||||
int cmpval;
|
||||
|
||||
while (level) {
|
||||
next = sl_level_get(prev, level - 1);
|
||||
if (!next) {
|
||||
level--;
|
||||
continue;
|
||||
}
|
||||
cmpval = cmpfn(next, item);
|
||||
if (cmpval < 0) {
|
||||
best = prev = next;
|
||||
continue;
|
||||
}
|
||||
level--;
|
||||
}
|
||||
return best;
|
||||
}
|
||||
|
||||
void typesafe_skiplist_del(struct sskip_head *head, struct sskip_item *item,
|
||||
int (*cmpfn)(const struct sskip_item *a,
|
||||
const struct sskip_item *b))
|
||||
{
|
||||
size_t level = SKIPLIST_MAXDEPTH;
|
||||
struct sskip_item *prev = &head->hitem, *next;
|
||||
int cmpval;
|
||||
|
||||
while (level) {
|
||||
next = sl_level_get(prev, level - 1);
|
||||
if (!next) {
|
||||
level--;
|
||||
continue;
|
||||
}
|
||||
if (next == item) {
|
||||
sl_level_set(prev, level - 1,
|
||||
sl_level_get(item, level - 1));
|
||||
level--;
|
||||
continue;
|
||||
}
|
||||
cmpval = cmpfn(next, item);
|
||||
if (cmpval < 0) {
|
||||
prev = next;
|
||||
continue;
|
||||
}
|
||||
level--;
|
||||
}
|
||||
|
||||
/* TBD: assert when trying to remove non-existing item? */
|
||||
head->count--;
|
||||
|
||||
if ((uintptr_t)item->next[SKIPLIST_OVERFLOW] & 1) {
|
||||
uintptr_t ptrval = (uintptr_t)item->next[SKIPLIST_OVERFLOW];
|
||||
ptrval &= UINTPTR_MAX - 3;
|
||||
struct sskip_overflow *oflow = (struct sskip_overflow *)ptrval;
|
||||
XFREE(MTYPE_SKIPLIST_OFLOW, oflow);
|
||||
}
|
||||
memset(item, 0, sizeof(*item));
|
||||
}
|
645
lib/typesafe.h
Normal file
645
lib/typesafe.h
Normal file
@ -0,0 +1,645 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _FRR_TYPESAFE_H
|
||||
#define _FRR_TYPESAFE_H
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <assert.h>
|
||||
#include "compiler.h"
|
||||
|
||||
/* generic macros for all list-like types */
|
||||
|
||||
#define for_each(prefix, head, item) \
|
||||
for (item = prefix##_first(head); item; \
|
||||
item = prefix##_next(head, item))
|
||||
#define for_each_safe(prefix, head, item) \
|
||||
for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
|
||||
prefix##_next_safe(head, \
|
||||
(item = prefix##_first(head))); \
|
||||
item; \
|
||||
item = prefix##_safe, \
|
||||
prefix##_safe = prefix##_next_safe(head, prefix##_safe))
|
||||
#define for_each_from(prefix, head, item, from) \
|
||||
for (item = from, from = prefix##_next_safe(head, item); \
|
||||
item; \
|
||||
item = from, from = prefix##_next_safe(head, from))
|
||||
|
||||
/* single-linked list, unsorted/arbitrary.
|
||||
* can be used as queue with add_tail / pop
|
||||
*/
|
||||
|
||||
/* don't use these structs directly */
|
||||
struct slist_item {
|
||||
struct slist_item *next;
|
||||
};
|
||||
|
||||
struct slist_head {
|
||||
struct slist_item *first, **last_next;
|
||||
size_t count;
|
||||
};
|
||||
|
||||
static inline void typesafe_list_add(struct slist_head *head,
|
||||
struct slist_item **pos, struct slist_item *item)
|
||||
{
|
||||
item->next = *pos;
|
||||
*pos = item;
|
||||
if (pos == head->last_next)
|
||||
head->last_next = &item->next;
|
||||
head->count++;
|
||||
}
|
||||
|
||||
/* use as:
|
||||
*
|
||||
* PREDECL_LIST(namelist)
|
||||
* struct name {
|
||||
* struct namelist_item nlitem;
|
||||
* }
|
||||
* DECLARE_LIST(namelist, struct name, nlitem)
|
||||
*/
|
||||
#define PREDECL_LIST(prefix) \
|
||||
struct prefix ## _head { struct slist_head sh; }; \
|
||||
struct prefix ## _item { struct slist_item si; };
|
||||
|
||||
#define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
|
||||
|
||||
#define DECLARE_LIST(prefix, type, field) \
|
||||
\
|
||||
macro_inline void prefix ## _init(struct prefix##_head *h) \
|
||||
{ \
|
||||
memset(h, 0, sizeof(*h)); \
|
||||
h->sh.last_next = &h->sh.first; \
|
||||
} \
|
||||
macro_inline void prefix ## _fini(struct prefix##_head *h) \
|
||||
{ \
|
||||
memset(h, 0, sizeof(*h)); \
|
||||
} \
|
||||
macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
|
||||
} \
|
||||
macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
|
||||
} \
|
||||
macro_inline void prefix ## _add_after(struct prefix##_head *h, \
|
||||
type *after, type *item) \
|
||||
{ \
|
||||
struct slist_item **nextp; \
|
||||
nextp = after ? &after->field.si.next : &h->sh.first; \
|
||||
typesafe_list_add(&h->sh, nextp, &item->field.si); \
|
||||
} \
|
||||
/* TODO: del_hint */ \
|
||||
macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
struct slist_item **iter = &h->sh.first; \
|
||||
while (*iter && *iter != &item->field.si) \
|
||||
iter = &(*iter)->next; \
|
||||
if (!*iter) \
|
||||
return; \
|
||||
h->sh.count--; \
|
||||
*iter = item->field.si.next; \
|
||||
if (!item->field.si.next) \
|
||||
h->sh.last_next = iter; \
|
||||
} \
|
||||
macro_inline type *prefix ## _pop(struct prefix##_head *h) \
|
||||
{ \
|
||||
struct slist_item *sitem = h->sh.first; \
|
||||
if (!sitem) \
|
||||
return NULL; \
|
||||
h->sh.count--; \
|
||||
h->sh.first = sitem->next; \
|
||||
if (h->sh.first == NULL) \
|
||||
h->sh.last_next = &h->sh.first; \
|
||||
return container_of(sitem, type, field.si); \
|
||||
} \
|
||||
macro_pure type *prefix ## _first(struct prefix##_head *h) \
|
||||
{ \
|
||||
return container_of_null(h->sh.first, type, field.si); \
|
||||
} \
|
||||
macro_pure type *prefix ## _next(struct prefix##_head * h, type *item) \
|
||||
{ \
|
||||
struct slist_item *sitem = &item->field.si; \
|
||||
return container_of_null(sitem->next, type, field.si); \
|
||||
} \
|
||||
macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
struct slist_item *sitem; \
|
||||
if (!item) \
|
||||
return NULL; \
|
||||
sitem = &item->field.si; \
|
||||
return container_of_null(sitem->next, type, field.si); \
|
||||
} \
|
||||
macro_pure size_t prefix ## _count(struct prefix##_head *h) \
|
||||
{ \
|
||||
return h->sh.count; \
|
||||
} \
|
||||
/* ... */
|
||||
|
||||
/* single-linked list, sorted.
|
||||
* can be used as priority queue with add / pop
|
||||
*/
|
||||
|
||||
/* don't use these structs directly */
|
||||
struct ssort_item {
|
||||
struct ssort_item *next;
|
||||
};
|
||||
|
||||
struct ssort_head {
|
||||
struct ssort_item *first;
|
||||
size_t count;
|
||||
};
|
||||
|
||||
/* use as:
|
||||
*
|
||||
* PREDECL_SORTLIST(namelist)
|
||||
* struct name {
|
||||
* struct namelist_item nlitem;
|
||||
* }
|
||||
* DECLARE_SORTLIST(namelist, struct name, nlitem)
|
||||
*/
|
||||
#define _PREDECL_SORTLIST(prefix) \
|
||||
struct prefix ## _head { struct ssort_head sh; }; \
|
||||
struct prefix ## _item { struct ssort_item si; };
|
||||
|
||||
#define INIT_SORTLIST_UNIQ(var) { }
|
||||
#define INIT_SORTLIST_NONUNIQ(var) { }
|
||||
|
||||
#define PREDECL_SORTLIST_UNIQ(prefix) \
|
||||
_PREDECL_SORTLIST(prefix)
|
||||
#define PREDECL_SORTLIST_NONUNIQ(prefix) \
|
||||
_PREDECL_SORTLIST(prefix)
|
||||
|
||||
#define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
|
||||
\
|
||||
macro_inline void prefix ## _init(struct prefix##_head *h) \
|
||||
{ \
|
||||
memset(h, 0, sizeof(*h)); \
|
||||
} \
|
||||
macro_inline void prefix ## _fini(struct prefix##_head *h) \
|
||||
{ \
|
||||
memset(h, 0, sizeof(*h)); \
|
||||
} \
|
||||
macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
struct ssort_item **np = &h->sh.first; \
|
||||
int c = 1; \
|
||||
while (*np && (c = cmpfn_uq( \
|
||||
container_of(*np, type, field.si), item)) < 0) \
|
||||
np = &(*np)->next; \
|
||||
if (c == 0) \
|
||||
return container_of(*np, type, field.si); \
|
||||
item->field.si.next = *np; \
|
||||
*np = &item->field.si; \
|
||||
h->sh.count++; \
|
||||
return NULL; \
|
||||
} \
|
||||
macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
|
||||
const type *item) \
|
||||
{ \
|
||||
struct ssort_item *sitem = h->sh.first; \
|
||||
int cmpval = 0; \
|
||||
while (sitem && (cmpval = cmpfn_nuq( \
|
||||
container_of(sitem, type, field.si), item) < 0)) \
|
||||
sitem = sitem->next; \
|
||||
return container_of_null(sitem, type, field.si); \
|
||||
} \
|
||||
macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
|
||||
const type *item) \
|
||||
{ \
|
||||
struct ssort_item *prev = NULL, *sitem = h->sh.first; \
|
||||
int cmpval = 0; \
|
||||
while (sitem && (cmpval = cmpfn_nuq( \
|
||||
container_of(sitem, type, field.si), item) < 0)) \
|
||||
sitem = (prev = sitem)->next; \
|
||||
return container_of_null(prev, type, field.si); \
|
||||
} \
|
||||
/* TODO: del_hint */ \
|
||||
macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
struct ssort_item **iter = &h->sh.first; \
|
||||
while (*iter && *iter != &item->field.si) \
|
||||
iter = &(*iter)->next; \
|
||||
if (!*iter) \
|
||||
return; \
|
||||
h->sh.count--; \
|
||||
*iter = item->field.si.next; \
|
||||
} \
|
||||
macro_inline type *prefix ## _pop(struct prefix##_head *h) \
|
||||
{ \
|
||||
struct ssort_item *sitem = h->sh.first; \
|
||||
if (!sitem) \
|
||||
return NULL; \
|
||||
h->sh.count--; \
|
||||
h->sh.first = sitem->next; \
|
||||
return container_of(sitem, type, field.si); \
|
||||
} \
|
||||
macro_pure type *prefix ## _first(struct prefix##_head *h) \
|
||||
{ \
|
||||
return container_of_null(h->sh.first, type, field.si); \
|
||||
} \
|
||||
macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
struct ssort_item *sitem = &item->field.si; \
|
||||
return container_of_null(sitem->next, type, field.si); \
|
||||
} \
|
||||
macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
struct ssort_item *sitem; \
|
||||
if (!item) \
|
||||
return NULL; \
|
||||
sitem = &item->field.si; \
|
||||
return container_of_null(sitem->next, type, field.si); \
|
||||
} \
|
||||
macro_pure size_t prefix ## _count(struct prefix##_head *h) \
|
||||
{ \
|
||||
return h->sh.count; \
|
||||
} \
|
||||
/* ... */
|
||||
|
||||
#define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
|
||||
_DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn) \
|
||||
\
|
||||
macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
|
||||
{ \
|
||||
struct ssort_item *sitem = h->sh.first; \
|
||||
int cmpval = 0; \
|
||||
while (sitem && (cmpval = cmpfn( \
|
||||
container_of(sitem, type, field.si), item) < 0)) \
|
||||
sitem = sitem->next; \
|
||||
if (!sitem || cmpval > 0) \
|
||||
return NULL; \
|
||||
return container_of(sitem, type, field.si); \
|
||||
} \
|
||||
/* ... */
|
||||
|
||||
#define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
|
||||
macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
|
||||
{ \
|
||||
int cmpval = cmpfn(a, b); \
|
||||
if (cmpval) \
|
||||
return cmpval; \
|
||||
if (a < b) \
|
||||
return -1; \
|
||||
if (a > b) \
|
||||
return 1; \
|
||||
return 0; \
|
||||
} \
|
||||
_DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp) \
|
||||
/* ... */
|
||||
|
||||
|
||||
/* hash, "sorted" by hash value
|
||||
*/
|
||||
|
||||
/* don't use these structs directly */
|
||||
struct thash_item {
|
||||
struct thash_item *next;
|
||||
uint32_t hashval;
|
||||
};
|
||||
|
||||
struct thash_head {
|
||||
struct thash_item **entries;
|
||||
uint32_t count;
|
||||
|
||||
uint8_t tabshift;
|
||||
uint8_t minshift, maxshift;
|
||||
};
|
||||
|
||||
#define _HASH_SIZE(tabshift) \
|
||||
((1U << (tabshift)) >> 1)
|
||||
#define HASH_SIZE(head) \
|
||||
_HASH_SIZE((head).tabshift)
|
||||
#define _HASH_KEY(tabshift, val) \
|
||||
((val) >> (33 - (tabshift)))
|
||||
#define HASH_KEY(head, val) \
|
||||
_HASH_KEY((head).tabshift, val)
|
||||
#define HASH_GROW_THRESHOLD(head) \
|
||||
((head).count >= HASH_SIZE(head))
|
||||
#define HASH_SHRINK_THRESHOLD(head) \
|
||||
((head).count <= (HASH_SIZE(head) - 1) / 2)
|
||||
|
||||
extern void typesafe_hash_grow(struct thash_head *head);
|
||||
extern void typesafe_hash_shrink(struct thash_head *head);
|
||||
|
||||
/* use as:
|
||||
*
|
||||
* PREDECL_HASH(namelist)
|
||||
* struct name {
|
||||
* struct namelist_item nlitem;
|
||||
* }
|
||||
* DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
|
||||
*/
|
||||
#define PREDECL_HASH(prefix) \
|
||||
struct prefix ## _head { struct thash_head hh; }; \
|
||||
struct prefix ## _item { struct thash_item hi; };
|
||||
|
||||
#define INIT_HASH(var) { }
|
||||
|
||||
#define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
|
||||
\
|
||||
macro_inline void prefix ## _init(struct prefix##_head *h) \
|
||||
{ \
|
||||
memset(h, 0, sizeof(*h)); \
|
||||
} \
|
||||
macro_inline void prefix ## _fini(struct prefix##_head *h) \
|
||||
{ \
|
||||
assert(h->hh.count == 0); \
|
||||
h->hh.minshift = 0; \
|
||||
typesafe_hash_shrink(&h->hh); \
|
||||
memset(h, 0, sizeof(*h)); \
|
||||
} \
|
||||
macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
h->hh.count++; \
|
||||
if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
|
||||
typesafe_hash_grow(&h->hh); \
|
||||
\
|
||||
uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
|
||||
item->field.hi.hashval = hval; \
|
||||
struct thash_item **np = &h->hh.entries[hbits]; \
|
||||
while (*np && (*np)->hashval < hval) \
|
||||
np = &(*np)->next; \
|
||||
if (*np && cmpfn(container_of(*np, type, field.hi), item) == 0) { \
|
||||
h->hh.count--; \
|
||||
return container_of(*np, type, field.hi); \
|
||||
} \
|
||||
item->field.hi.next = *np; \
|
||||
*np = &item->field.hi; \
|
||||
return NULL; \
|
||||
} \
|
||||
macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
|
||||
{ \
|
||||
if (!h->hh.tabshift) \
|
||||
return NULL; \
|
||||
uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
|
||||
struct thash_item *hitem = h->hh.entries[hbits]; \
|
||||
while (hitem && hitem->hashval < hval) \
|
||||
hitem = hitem->next; \
|
||||
while (hitem && hitem->hashval == hval) { \
|
||||
if (!cmpfn(container_of(hitem, type, field.hi), item)) \
|
||||
return container_of(hitem, type, field.hi); \
|
||||
hitem = hitem->next; \
|
||||
} \
|
||||
return NULL; \
|
||||
} \
|
||||
macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
if (!h->hh.tabshift) \
|
||||
return; \
|
||||
uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
|
||||
struct thash_item **np = &h->hh.entries[hbits]; \
|
||||
while (*np && (*np)->hashval < hval) \
|
||||
np = &(*np)->next; \
|
||||
while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
|
||||
np = &(*np)->next; \
|
||||
if (*np != &item->field.hi) \
|
||||
return; \
|
||||
*np = item->field.hi.next; \
|
||||
item->field.hi.next = NULL; \
|
||||
h->hh.count--; \
|
||||
if (HASH_SHRINK_THRESHOLD(h->hh)) \
|
||||
typesafe_hash_shrink(&h->hh); \
|
||||
} \
|
||||
macro_inline type *prefix ## _pop(struct prefix##_head *h) \
|
||||
{ \
|
||||
uint32_t i; \
|
||||
for (i = 0; i < HASH_SIZE(h->hh); i++) \
|
||||
if (h->hh.entries[i]) { \
|
||||
struct thash_item *hitem = h->hh.entries[i]; \
|
||||
h->hh.entries[i] = hitem->next; \
|
||||
h->hh.count--; \
|
||||
hitem->next = NULL; \
|
||||
if (HASH_SHRINK_THRESHOLD(h->hh)) \
|
||||
typesafe_hash_shrink(&h->hh); \
|
||||
return container_of(hitem, type, field.hi); \
|
||||
} \
|
||||
return NULL; \
|
||||
} \
|
||||
macro_pure type *prefix ## _first(struct prefix##_head *h) \
|
||||
{ \
|
||||
uint32_t i; \
|
||||
for (i = 0; i < HASH_SIZE(h->hh); i++) \
|
||||
if (h->hh.entries[i]) \
|
||||
return container_of(h->hh.entries[i], type, field.hi); \
|
||||
return NULL; \
|
||||
} \
|
||||
macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
struct thash_item *hitem = &item->field.hi; \
|
||||
if (hitem->next) \
|
||||
return container_of(hitem->next, type, field.hi); \
|
||||
uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
|
||||
for (; i < HASH_SIZE(h->hh); i++) \
|
||||
if (h->hh.entries[i]) \
|
||||
return container_of(h->hh.entries[i], type, field.hi); \
|
||||
return NULL; \
|
||||
} \
|
||||
macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
if (!item) \
|
||||
return NULL; \
|
||||
return prefix ## _next(h, item); \
|
||||
} \
|
||||
macro_pure size_t prefix ## _count(struct prefix##_head *h) \
|
||||
{ \
|
||||
return h->hh.count; \
|
||||
} \
|
||||
/* ... */
|
||||
|
||||
/* skiplist, sorted.
|
||||
* can be used as priority queue with add / pop
|
||||
*/
|
||||
|
||||
/* don't use these structs directly */
|
||||
#define SKIPLIST_MAXDEPTH 16
|
||||
#define SKIPLIST_EMBED 4
|
||||
#define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
|
||||
|
||||
struct sskip_item {
|
||||
struct sskip_item *next[SKIPLIST_EMBED];
|
||||
};
|
||||
|
||||
struct sskip_overflow {
|
||||
struct sskip_item *next[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
|
||||
};
|
||||
|
||||
struct sskip_head {
|
||||
struct sskip_item hitem;
|
||||
struct sskip_item *overflow[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
|
||||
size_t count;
|
||||
};
|
||||
|
||||
/* use as:
|
||||
*
|
||||
* PREDECL_SKIPLIST(namelist)
|
||||
* struct name {
|
||||
* struct namelist_item nlitem;
|
||||
* }
|
||||
* DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
|
||||
*/
|
||||
#define _PREDECL_SKIPLIST(prefix) \
|
||||
struct prefix ## _head { struct sskip_head sh; }; \
|
||||
struct prefix ## _item { struct sskip_item si; };
|
||||
|
||||
#define INIT_SKIPLIST_UNIQ(var) { }
|
||||
#define INIT_SKIPLIST_NONUNIQ(var) { }
|
||||
|
||||
#define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
|
||||
\
|
||||
macro_inline void prefix ## _init(struct prefix##_head *h) \
|
||||
{ \
|
||||
memset(h, 0, sizeof(*h)); \
|
||||
h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
|
||||
((uintptr_t)h->sh.overflow | 1); \
|
||||
} \
|
||||
macro_inline void prefix ## _fini(struct prefix##_head *h) \
|
||||
{ \
|
||||
memset(h, 0, sizeof(*h)); \
|
||||
} \
|
||||
macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
struct sskip_item *si; \
|
||||
si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
|
||||
return container_of_null(si, type, field.si); \
|
||||
} \
|
||||
macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
|
||||
const type *item) \
|
||||
{ \
|
||||
struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
|
||||
&item->field.si, cmpfn_nuq); \
|
||||
return container_of_null(sitem, type, field.si); \
|
||||
} \
|
||||
macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
|
||||
const type *item) \
|
||||
{ \
|
||||
struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
|
||||
&item->field.si, cmpfn_nuq); \
|
||||
return container_of_null(sitem, type, field.si); \
|
||||
} \
|
||||
macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
typesafe_skiplist_del(&h->sh, &item->field.si, cmpfn_uq); \
|
||||
} \
|
||||
macro_inline type *prefix ## _pop(struct prefix##_head *h) \
|
||||
{ \
|
||||
struct sskip_item *sitem = h->sh.hitem.next[0]; \
|
||||
if (!sitem) \
|
||||
return NULL; \
|
||||
typesafe_skiplist_del(&h->sh, sitem, cmpfn_uq); \
|
||||
return container_of(sitem, type, field.si); \
|
||||
} \
|
||||
macro_pure type *prefix ## _first(struct prefix##_head *h) \
|
||||
{ \
|
||||
struct sskip_item *first = h->sh.hitem.next[0]; \
|
||||
return container_of_null(first, type, field.si); \
|
||||
} \
|
||||
macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
struct sskip_item *next = item->field.si.next[0]; \
|
||||
return container_of_null(next, type, field.si); \
|
||||
} \
|
||||
macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
|
||||
{ \
|
||||
struct sskip_item *next; \
|
||||
next = item ? item->field.si.next[0] : NULL; \
|
||||
return container_of_null(next, type, field.si); \
|
||||
} \
|
||||
macro_pure size_t prefix ## _count(struct prefix##_head *h) \
|
||||
{ \
|
||||
return h->sh.count; \
|
||||
} \
|
||||
/* ... */
|
||||
|
||||
#define PREDECL_SKIPLIST_UNIQ(prefix) \
|
||||
_PREDECL_SKIPLIST(prefix)
|
||||
#define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
|
||||
\
|
||||
macro_inline int prefix ## __cmp(const struct sskip_item *a, \
|
||||
const struct sskip_item *b) \
|
||||
{ \
|
||||
return cmpfn(container_of(a, type, field.si), \
|
||||
container_of(b, type, field.si)); \
|
||||
} \
|
||||
macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
|
||||
{ \
|
||||
struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
|
||||
&item->field.si, &prefix ## __cmp); \
|
||||
return container_of_null(sitem, type, field.si); \
|
||||
} \
|
||||
\
|
||||
_DECLARE_SKIPLIST(prefix, type, field, \
|
||||
prefix ## __cmp, prefix ## __cmp) \
|
||||
/* ... */
|
||||
|
||||
#define PREDECL_SKIPLIST_NONUNIQ(prefix) \
|
||||
_PREDECL_SKIPLIST(prefix)
|
||||
#define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
|
||||
\
|
||||
macro_inline int prefix ## __cmp(const struct sskip_item *a, \
|
||||
const struct sskip_item *b) \
|
||||
{ \
|
||||
return cmpfn(container_of(a, type, field.si), \
|
||||
container_of(b, type, field.si)); \
|
||||
} \
|
||||
macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
|
||||
const struct sskip_item *b) \
|
||||
{ \
|
||||
int cmpval = cmpfn(container_of(a, type, field.si), \
|
||||
container_of(b, type, field.si)); \
|
||||
if (cmpval) \
|
||||
return cmpval; \
|
||||
if (a < b) \
|
||||
return -1; \
|
||||
if (a > b) \
|
||||
return 1; \
|
||||
return 0; \
|
||||
} \
|
||||
\
|
||||
_DECLARE_SKIPLIST(prefix, type, field, \
|
||||
prefix ## __cmp, prefix ## __cmp_uq) \
|
||||
/* ... */
|
||||
|
||||
|
||||
extern struct sskip_item *typesafe_skiplist_add(struct sskip_head *head,
|
||||
struct sskip_item *item, int (*cmpfn)(
|
||||
const struct sskip_item *a,
|
||||
const struct sskip_item *b));
|
||||
extern struct sskip_item *typesafe_skiplist_find(struct sskip_head *head,
|
||||
const struct sskip_item *item, int (*cmpfn)(
|
||||
const struct sskip_item *a,
|
||||
const struct sskip_item *b));
|
||||
extern struct sskip_item *typesafe_skiplist_find_gteq(struct sskip_head *head,
|
||||
const struct sskip_item *item, int (*cmpfn)(
|
||||
const struct sskip_item *a,
|
||||
const struct sskip_item *b));
|
||||
extern struct sskip_item *typesafe_skiplist_find_lt(struct sskip_head *head,
|
||||
const struct sskip_item *item, int (*cmpfn)(
|
||||
const struct sskip_item *a,
|
||||
const struct sskip_item *b));
|
||||
extern void typesafe_skiplist_del(struct sskip_head *head,
|
||||
struct sskip_item *item, int (*cmpfn)(
|
||||
const struct sskip_item *a,
|
||||
const struct sskip_item *b));
|
||||
|
||||
/* this needs to stay at the end because both files include each other.
|
||||
* the resolved order is typesafe.h before typerb.h
|
||||
*/
|
||||
#include "typerb.h"
|
||||
|
||||
#endif /* _FRR_TYPESAFE_H */
|
39
lib/zebra.h
39
lib/zebra.h
@ -335,45 +335,6 @@ struct in_pktinfo {
|
||||
|
||||
#endif /* ndef BYTE_ORDER */
|
||||
|
||||
/* MAX / MIN are not commonly defined, but useful */
|
||||
/* note: glibc sys/param.h has #define MIN(a,b) (((a)<(b))?(a):(b)) */
|
||||
#ifdef MAX
|
||||
#undef MAX
|
||||
#endif
|
||||
#define MAX(a, b) \
|
||||
({ \
|
||||
typeof(a) _max_a = (a); \
|
||||
typeof(b) _max_b = (b); \
|
||||
_max_a > _max_b ? _max_a : _max_b; \
|
||||
})
|
||||
#ifdef MIN
|
||||
#undef MIN
|
||||
#endif
|
||||
#define MIN(a, b) \
|
||||
({ \
|
||||
typeof(a) _min_a = (a); \
|
||||
typeof(b) _min_b = (b); \
|
||||
_min_a < _min_b ? _min_a : _min_b; \
|
||||
})
|
||||
|
||||
#ifndef offsetof
|
||||
#ifdef __compiler_offsetof
|
||||
#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER)
|
||||
#else
|
||||
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef container_of
|
||||
#define container_of(ptr, type, member) \
|
||||
({ \
|
||||
const typeof(((type *)0)->member) *__mptr = (ptr); \
|
||||
(type *)((char *)__mptr - offsetof(type, member)); \
|
||||
})
|
||||
#endif
|
||||
|
||||
#define ZEBRA_NUM_OF(x) (sizeof (x) / sizeof (x[0]))
|
||||
|
||||
/* For old definition. */
|
||||
#ifndef IN6_ARE_ADDR_EQUAL
|
||||
#define IN6_ARE_ADDR_EQUAL IN6_IS_ADDR_EQUAL
|
||||
|
@ -55,7 +55,7 @@ struct zebra_privs_t nhrpd_privs = {
|
||||
.vty_group = VTY_GROUP,
|
||||
#endif
|
||||
.caps_p = _caps_p,
|
||||
.cap_num_p = ZEBRA_NUM_OF(_caps_p),
|
||||
.cap_num_p = array_size(_caps_p),
|
||||
};
|
||||
|
||||
static void parse_arguments(int argc, char **argv)
|
||||
|
@ -102,7 +102,7 @@ int nhrp_vc_ipsec_updown(uint32_t child_id, struct nhrp_vc *vc)
|
||||
{
|
||||
char buf[2][SU_ADDRSTRLEN];
|
||||
struct child_sa *sa = NULL, *lsa;
|
||||
uint32_t child_hash = child_id % ZEBRA_NUM_OF(childlist_head);
|
||||
uint32_t child_hash = child_id % array_size(childlist_head);
|
||||
int abort_migration = 0;
|
||||
|
||||
list_for_each_entry(lsa, &childlist_head[child_hash], childlist_entry)
|
||||
@ -202,7 +202,7 @@ void nhrp_vc_init(void)
|
||||
size_t i;
|
||||
|
||||
nhrp_vc_hash = hash_create(nhrp_vc_key, nhrp_vc_cmp, "NHRP VC hash");
|
||||
for (i = 0; i < ZEBRA_NUM_OF(childlist_head); i++)
|
||||
for (i = 0; i < array_size(childlist_head); i++)
|
||||
list_init(&childlist_head[i]);
|
||||
}
|
||||
|
||||
@ -211,7 +211,7 @@ void nhrp_vc_reset(void)
|
||||
struct child_sa *sa, *n;
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < ZEBRA_NUM_OF(childlist_head); i++) {
|
||||
for (i = 0; i < array_size(childlist_head); i++) {
|
||||
list_for_each_entry_safe(sa, n, &childlist_head[i],
|
||||
childlist_entry)
|
||||
nhrp_vc_ipsec_updown(sa->id, 0);
|
||||
|
@ -171,7 +171,7 @@ static void ares_address_cb(void *arg, int status, int timeouts,
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < ZEBRA_NUM_OF(addr) && he->h_addr_list[i] != NULL; i++) {
|
||||
for (i = 0; i < array_size(addr) && he->h_addr_list[i] != NULL; i++) {
|
||||
memset(&addr[i], 0, sizeof(addr[i]));
|
||||
addr[i].sa.sa_family = he->h_addrtype;
|
||||
switch (he->h_addrtype) {
|
||||
|
@ -196,7 +196,7 @@ int zbufq_write(struct zbuf_queue *zbq, int fd)
|
||||
iov[iovcnt++] = (struct iovec){
|
||||
.iov_base = zb->head, .iov_len = zbuf_used(zb),
|
||||
};
|
||||
if (iovcnt >= ZEBRA_NUM_OF(iov))
|
||||
if (iovcnt >= array_size(iov))
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include "command.h"
|
||||
#include "vty.h"
|
||||
#include "prefix.h"
|
||||
#include "pqueue.h"
|
||||
#include "linklist.h"
|
||||
#include "thread.h"
|
||||
#include "lib_errors.h"
|
||||
@ -76,16 +75,18 @@ static unsigned int ospf6_spf_get_ifindex_from_nh(struct ospf6_vertex *v)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ospf6_vertex_cmp(void *a, void *b)
|
||||
static int ospf6_vertex_cmp(const struct ospf6_vertex *va,
|
||||
const struct ospf6_vertex *vb)
|
||||
{
|
||||
struct ospf6_vertex *va = (struct ospf6_vertex *)a;
|
||||
struct ospf6_vertex *vb = (struct ospf6_vertex *)b;
|
||||
|
||||
/* ascending order */
|
||||
if (va->cost != vb->cost)
|
||||
return (va->cost - vb->cost);
|
||||
return (va->hops - vb->hops);
|
||||
if (va->hops != vb->hops)
|
||||
return (va->hops - vb->hops);
|
||||
return 0;
|
||||
}
|
||||
DECLARE_SKIPLIST_NONUNIQ(vertex_pqueue, struct ospf6_vertex, pqi,
|
||||
ospf6_vertex_cmp)
|
||||
|
||||
static int ospf6_vertex_id_cmp(void *a, void *b)
|
||||
{
|
||||
@ -461,7 +462,7 @@ void ospf6_spf_calculation(uint32_t router_id,
|
||||
struct ospf6_route_table *result_table,
|
||||
struct ospf6_area *oa)
|
||||
{
|
||||
struct pqueue *candidate_list;
|
||||
struct vertex_pqueue_head candidate_list;
|
||||
struct ospf6_vertex *root, *v, *w;
|
||||
int size;
|
||||
caddr_t lsdesc;
|
||||
@ -481,8 +482,7 @@ void ospf6_spf_calculation(uint32_t router_id,
|
||||
}
|
||||
|
||||
/* initialize */
|
||||
candidate_list = pqueue_create();
|
||||
candidate_list->cmp = ospf6_vertex_cmp;
|
||||
vertex_pqueue_init(&candidate_list);
|
||||
|
||||
root = ospf6_vertex_create(lsa);
|
||||
root->area = oa;
|
||||
@ -492,13 +492,10 @@ void ospf6_spf_calculation(uint32_t router_id,
|
||||
inet_pton(AF_INET6, "::1", &address);
|
||||
|
||||
/* Actually insert root to the candidate-list as the only candidate */
|
||||
pqueue_enqueue(root, candidate_list);
|
||||
vertex_pqueue_add(&candidate_list, root);
|
||||
|
||||
/* Iterate until candidate-list becomes empty */
|
||||
while (candidate_list->size) {
|
||||
/* get closest candidate from priority queue */
|
||||
v = pqueue_dequeue(candidate_list);
|
||||
|
||||
while ((v = vertex_pqueue_pop(&candidate_list))) {
|
||||
/* installing may result in merging or rejecting of the vertex
|
||||
*/
|
||||
if (ospf6_spf_install(v, result_table) < 0)
|
||||
@ -557,12 +554,11 @@ void ospf6_spf_calculation(uint32_t router_id,
|
||||
zlog_debug(
|
||||
" New candidate: %s hops %d cost %d",
|
||||
w->name, w->hops, w->cost);
|
||||
pqueue_enqueue(w, candidate_list);
|
||||
vertex_pqueue_add(&candidate_list, w);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pqueue_delete(candidate_list);
|
||||
//vertex_pqueue_fini(&candidate_list);
|
||||
|
||||
ospf6_remove_temp_router_lsa(oa);
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
#ifndef OSPF6_SPF_H
|
||||
#define OSPF6_SPF_H
|
||||
|
||||
#include "typesafe.h"
|
||||
#include "ospf6_top.h"
|
||||
|
||||
/* Debug option */
|
||||
@ -33,6 +34,7 @@ extern unsigned char conf_debug_ospf6_spf;
|
||||
#define IS_OSPF6_DEBUG_SPF(level) \
|
||||
(conf_debug_ospf6_spf & OSPF6_DEBUG_SPF_##level)
|
||||
|
||||
PREDECL_SKIPLIST_NONUNIQ(vertex_pqueue)
|
||||
/* Transit Vertex */
|
||||
struct ospf6_vertex {
|
||||
/* type of this vertex */
|
||||
@ -41,6 +43,8 @@ struct ospf6_vertex {
|
||||
/* Vertex Identifier */
|
||||
struct prefix vertex_id;
|
||||
|
||||
struct vertex_pqueue_item pqi;
|
||||
|
||||
/* Identifier String */
|
||||
char name[128];
|
||||
|
||||
|
@ -69,6 +69,8 @@ struct lsa_header {
|
||||
uint16_t length;
|
||||
};
|
||||
|
||||
struct vertex;
|
||||
|
||||
/* OSPF LSA. */
|
||||
struct ospf_lsa {
|
||||
/* LSA origination flag. */
|
||||
@ -95,10 +97,7 @@ struct ospf_lsa {
|
||||
int lock;
|
||||
|
||||
/* Flags for the SPF calculation. */
|
||||
int stat;
|
||||
#define LSA_SPF_NOT_EXPLORED -1
|
||||
#define LSA_SPF_IN_SPFTREE -2
|
||||
/* If stat >= 0, stat is LSA position in candidates heap. */
|
||||
struct vertex *stat;
|
||||
|
||||
/* References to this LSA in neighbor retransmission lists*/
|
||||
int retransmit_counter;
|
||||
|
@ -169,21 +169,6 @@ void ospf_lsdb_delete_all(struct ospf_lsdb *lsdb)
|
||||
}
|
||||
}
|
||||
|
||||
void ospf_lsdb_clean_stat(struct ospf_lsdb *lsdb)
|
||||
{
|
||||
struct route_table *table;
|
||||
struct route_node *rn;
|
||||
struct ospf_lsa *lsa;
|
||||
int i;
|
||||
|
||||
for (i = OSPF_MIN_LSA; i < OSPF_MAX_LSA; i++) {
|
||||
table = lsdb->type[i].db;
|
||||
for (rn = route_top(table); rn; rn = route_next(rn))
|
||||
if ((lsa = (rn->info)) != NULL)
|
||||
lsa->stat = LSA_SPF_NOT_EXPLORED;
|
||||
}
|
||||
}
|
||||
|
||||
struct ospf_lsa *ospf_lsdb_lookup(struct ospf_lsdb *lsdb, struct ospf_lsa *lsa)
|
||||
{
|
||||
struct route_table *table;
|
||||
|
@ -67,8 +67,6 @@ extern void ls_prefix_set(struct prefix_ls *lp, struct ospf_lsa *lsa);
|
||||
extern void ospf_lsdb_add(struct ospf_lsdb *, struct ospf_lsa *);
|
||||
extern void ospf_lsdb_delete(struct ospf_lsdb *, struct ospf_lsa *);
|
||||
extern void ospf_lsdb_delete_all(struct ospf_lsdb *);
|
||||
/* Set all stats to -1 (LSA_SPF_NOT_EXPLORED). */
|
||||
extern void ospf_lsdb_clean_stat(struct ospf_lsdb *lsdb);
|
||||
extern struct ospf_lsa *ospf_lsdb_lookup(struct ospf_lsdb *, struct ospf_lsa *);
|
||||
extern struct ospf_lsa *ospf_lsdb_lookup_by_id(struct ospf_lsdb *, uint8_t,
|
||||
struct in_addr, struct in_addr);
|
||||
|
105
ospfd/ospf_spf.c
105
ospfd/ospf_spf.c
@ -30,7 +30,6 @@
|
||||
#include "table.h"
|
||||
#include "log.h"
|
||||
#include "sockunion.h" /* for inet_ntop () */
|
||||
#include "pqueue.h"
|
||||
|
||||
#include "ospfd/ospfd.h"
|
||||
#include "ospfd/ospf_interface.h"
|
||||
@ -53,6 +52,11 @@
|
||||
|
||||
static unsigned int spf_reason_flags = 0;
|
||||
|
||||
/* dummy vertex to flag "in spftree" */
|
||||
static const struct vertex vertex_in_spftree = {};
|
||||
#define LSA_SPF_IN_SPFTREE (struct vertex *)&vertex_in_spftree
|
||||
#define LSA_SPF_NOT_EXPLORED NULL
|
||||
|
||||
static void ospf_clear_spf_reason_flags(void)
|
||||
{
|
||||
spf_reason_flags = 0;
|
||||
@ -72,35 +76,36 @@ static struct list vertex_list = {.del = ospf_vertex_free};
|
||||
|
||||
/* Heap related functions, for the managment of the candidates, to
|
||||
* be used with pqueue. */
|
||||
static int cmp(void *node1, void *node2)
|
||||
static int vertex_cmp(const struct vertex *v1, const struct vertex *v2)
|
||||
{
|
||||
struct vertex *v1 = (struct vertex *)node1;
|
||||
struct vertex *v2 = (struct vertex *)node2;
|
||||
if (v1 != NULL && v2 != NULL) {
|
||||
/* network vertices must be chosen before router vertices of
|
||||
* same
|
||||
* cost in order to find all shortest paths
|
||||
*/
|
||||
if (((v1->distance - v2->distance) == 0)
|
||||
&& (v1->type != v2->type)) {
|
||||
switch (v1->type) {
|
||||
case OSPF_VERTEX_NETWORK:
|
||||
return -1;
|
||||
case OSPF_VERTEX_ROUTER:
|
||||
return 1;
|
||||
}
|
||||
} else
|
||||
return (v1->distance - v2->distance);
|
||||
if (v1->distance != v2->distance)
|
||||
return v1->distance - v2->distance;
|
||||
|
||||
if (v1->type != v2->type) {
|
||||
switch (v1->type) {
|
||||
case OSPF_VERTEX_NETWORK:
|
||||
return -1;
|
||||
case OSPF_VERTEX_ROUTER:
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
DECLARE_SKIPLIST_NONUNIQ(vertex_pqueue, struct vertex, pqi, vertex_cmp)
|
||||
|
||||
static void update_stat(void *node, int position)
|
||||
static void lsdb_clean_stat(struct ospf_lsdb *lsdb)
|
||||
{
|
||||
struct vertex *v = node;
|
||||
struct route_table *table;
|
||||
struct route_node *rn;
|
||||
struct ospf_lsa *lsa;
|
||||
int i;
|
||||
|
||||
/* Set the status of the vertex, when its position changes. */
|
||||
*(v->stat) = position;
|
||||
for (i = OSPF_MIN_LSA; i < OSPF_MAX_LSA; i++) {
|
||||
table = lsdb->type[i].db;
|
||||
for (rn = route_top(table); rn; rn = route_next(rn))
|
||||
if ((lsa = (rn->info)) != NULL)
|
||||
lsa->stat = LSA_SPF_NOT_EXPLORED;
|
||||
}
|
||||
}
|
||||
|
||||
static struct vertex_nexthop *vertex_nexthop_new(void)
|
||||
@ -179,7 +184,6 @@ static struct vertex *ospf_vertex_new(struct ospf_lsa *lsa)
|
||||
new = XCALLOC(MTYPE_OSPF_VERTEX, sizeof(struct vertex));
|
||||
|
||||
new->flags = 0;
|
||||
new->stat = &(lsa->stat);
|
||||
new->type = lsa->data->type;
|
||||
new->id = lsa->data->id;
|
||||
new->lsa = lsa->data;
|
||||
@ -187,6 +191,9 @@ static struct vertex *ospf_vertex_new(struct ospf_lsa *lsa)
|
||||
new->parents = list_new();
|
||||
new->parents->del = vertex_parent_free;
|
||||
new->parents->cmp = vertex_parent_cmp;
|
||||
new->lsa_p = lsa;
|
||||
|
||||
lsa->stat = new;
|
||||
|
||||
listnode_add(&vertex_list, new);
|
||||
|
||||
@ -786,7 +793,8 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area,
|
||||
* path is found to a vertex already on the candidate list, store the new cost.
|
||||
*/
|
||||
static void ospf_spf_next(struct vertex *v, struct ospf *ospf,
|
||||
struct ospf_area *area, struct pqueue *candidate)
|
||||
struct ospf_area *area,
|
||||
struct vertex_pqueue_head *candidate)
|
||||
{
|
||||
struct ospf_lsa *w_lsa = NULL;
|
||||
uint8_t *p;
|
||||
@ -935,13 +943,11 @@ static void ospf_spf_next(struct vertex *v, struct ospf *ospf,
|
||||
/* Calculate nexthop to W. */
|
||||
if (ospf_nexthop_calculation(area, v, w, l, distance,
|
||||
lsa_pos))
|
||||
pqueue_enqueue(w, candidate);
|
||||
vertex_pqueue_add(candidate, w);
|
||||
else if (IS_DEBUG_OSPF_EVENT)
|
||||
zlog_debug("Nexthop Calc failed");
|
||||
} else if (w_lsa->stat >= 0) {
|
||||
/* Get the vertex from candidates. */
|
||||
w = candidate->array[w_lsa->stat];
|
||||
|
||||
} else if (w_lsa->stat != LSA_SPF_IN_SPFTREE) {
|
||||
w = w_lsa->stat;
|
||||
/* if D is greater than. */
|
||||
if (w->distance < distance) {
|
||||
continue;
|
||||
@ -962,18 +968,10 @@ static void ospf_spf_next(struct vertex *v, struct ospf *ospf,
|
||||
* which
|
||||
* will flush the old parents
|
||||
*/
|
||||
if (ospf_nexthop_calculation(area, v, w, l,
|
||||
distance, lsa_pos))
|
||||
/* Decrease the key of the node in the
|
||||
* heap.
|
||||
* trickle-sort it up towards root, just
|
||||
* in case this
|
||||
* node should now be the new root due
|
||||
* the cost change.
|
||||
* (next pqueu_{de,en}queue will fully
|
||||
* re-heap the queue).
|
||||
*/
|
||||
trickle_up(w_lsa->stat, candidate);
|
||||
vertex_pqueue_del(candidate, w);
|
||||
ospf_nexthop_calculation(area, v, w, l,
|
||||
distance, lsa_pos);
|
||||
vertex_pqueue_add(candidate, w);
|
||||
}
|
||||
} /* end W is already on the candidate list */
|
||||
} /* end loop over the links in V's LSA */
|
||||
@ -1169,7 +1167,7 @@ static void ospf_spf_calculate(struct ospf *ospf, struct ospf_area *area,
|
||||
struct route_table *new_table,
|
||||
struct route_table *new_rtrs)
|
||||
{
|
||||
struct pqueue *candidate;
|
||||
struct vertex_pqueue_head candidate;
|
||||
struct vertex *v;
|
||||
|
||||
if (IS_DEBUG_OSPF_EVENT) {
|
||||
@ -1194,11 +1192,9 @@ static void ospf_spf_calculate(struct ospf *ospf, struct ospf_area *area,
|
||||
|
||||
/* This function scans all the LSA database and set the stat field to
|
||||
* LSA_SPF_NOT_EXPLORED. */
|
||||
ospf_lsdb_clean_stat(area->lsdb);
|
||||
lsdb_clean_stat(area->lsdb);
|
||||
/* Create a new heap for the candidates. */
|
||||
candidate = pqueue_create();
|
||||
candidate->cmp = cmp;
|
||||
candidate->update = update_stat;
|
||||
vertex_pqueue_init(&candidate);
|
||||
|
||||
/* Initialize the shortest-path tree to only the root (which is the
|
||||
router doing the calculation). */
|
||||
@ -1207,7 +1203,7 @@ static void ospf_spf_calculate(struct ospf *ospf, struct ospf_area *area,
|
||||
/* Set LSA position to LSA_SPF_IN_SPFTREE. This vertex is the root of
|
||||
* the
|
||||
* spanning tree. */
|
||||
*(v->stat) = LSA_SPF_IN_SPFTREE;
|
||||
v->lsa_p->stat = LSA_SPF_IN_SPFTREE;
|
||||
|
||||
/* Set Area A's TransitCapability to FALSE. */
|
||||
area->transit = OSPF_TRANSIT_FALSE;
|
||||
@ -1215,23 +1211,22 @@ static void ospf_spf_calculate(struct ospf *ospf, struct ospf_area *area,
|
||||
|
||||
for (;;) {
|
||||
/* RFC2328 16.1. (2). */
|
||||
ospf_spf_next(v, ospf, area, candidate);
|
||||
ospf_spf_next(v, ospf, area, &candidate);
|
||||
|
||||
/* RFC2328 16.1. (3). */
|
||||
/* If at this step the candidate list is empty, the shortest-
|
||||
path tree (of transit vertices) has been completely built and
|
||||
this stage of the procedure terminates. */
|
||||
if (candidate->size == 0)
|
||||
break;
|
||||
|
||||
/* Otherwise, choose the vertex belonging to the candidate list
|
||||
that is closest to the root, and add it to the shortest-path
|
||||
tree (removing it from the candidate list in the
|
||||
process). */
|
||||
/* Extract from the candidates the node with the lower key. */
|
||||
v = (struct vertex *)pqueue_dequeue(candidate);
|
||||
v = vertex_pqueue_pop(&candidate);
|
||||
if (!v)
|
||||
break;
|
||||
/* Update stat field in vertex. */
|
||||
*(v->stat) = LSA_SPF_IN_SPFTREE;
|
||||
v->lsa_p->stat = LSA_SPF_IN_SPFTREE;
|
||||
|
||||
ospf_vertex_add_parent(v);
|
||||
|
||||
@ -1255,7 +1250,7 @@ static void ospf_spf_calculate(struct ospf *ospf, struct ospf_area *area,
|
||||
ospf_spf_process_stubs(area, area->spf, new_table, 0);
|
||||
|
||||
/* Free candidate queue. */
|
||||
pqueue_delete(candidate);
|
||||
//vertex_pqueue_fini(&candidate);
|
||||
|
||||
ospf_vertex_dump(__func__, area->spf, 0, 1);
|
||||
/* Free nexthop information, canonical versions of which are attached
|
||||
|
@ -22,6 +22,8 @@
|
||||
#ifndef _QUAGGA_OSPF_SPF_H
|
||||
#define _QUAGGA_OSPF_SPF_H
|
||||
|
||||
#include "typesafe.h"
|
||||
|
||||
/* values for vertex->type */
|
||||
#define OSPF_VERTEX_ROUTER 1 /* for a Router-LSA */
|
||||
#define OSPF_VERTEX_NETWORK 2 /* for a Network-LSA */
|
||||
@ -31,13 +33,15 @@
|
||||
|
||||
/* The "root" is the node running the SPF calculation */
|
||||
|
||||
PREDECL_SKIPLIST_NONUNIQ(vertex_pqueue)
|
||||
/* A router or network in an area */
|
||||
struct vertex {
|
||||
struct vertex_pqueue_item pqi;
|
||||
uint8_t flags;
|
||||
uint8_t type; /* copied from LSA header */
|
||||
struct in_addr id; /* copied from LSA header */
|
||||
struct ospf_lsa *lsa_p;
|
||||
struct lsa_header *lsa; /* Router or Network LSA */
|
||||
int *stat; /* Link to LSA status. */
|
||||
uint32_t distance; /* from root to this vertex */
|
||||
struct list *parents; /* list of parents in SPF tree */
|
||||
struct list *children; /* list of children in SPF tree*/
|
||||
|
3
tests/.gitignore
vendored
3
tests/.gitignore
vendored
@ -20,6 +20,7 @@
|
||||
/lib/cli/test_commands_defun.c
|
||||
/lib/northbound/test_oper_data
|
||||
/lib/cxxcompat
|
||||
/lib/test_atomlist
|
||||
/lib/test_buffer
|
||||
/lib/test_checksum
|
||||
/lib/test_graph
|
||||
@ -32,6 +33,7 @@
|
||||
/lib/test_privs
|
||||
/lib/test_ringbuf
|
||||
/lib/test_segv
|
||||
/lib/test_seqlock
|
||||
/lib/test_sig
|
||||
/lib/test_srcdest_table
|
||||
/lib/test_stream
|
||||
@ -39,6 +41,7 @@
|
||||
/lib/test_timer_correctness
|
||||
/lib/test_timer_performance
|
||||
/lib/test_ttable
|
||||
/lib/test_typelist
|
||||
/lib/test_zlog
|
||||
/lib/test_zmq
|
||||
/ospf6d/test_lsdb
|
||||
|
@ -28,21 +28,22 @@ static void test_lsp_build_list_nonzero_ht(void)
|
||||
|
||||
area->lsp_mtu = 1500;
|
||||
|
||||
dict_t *lspdb = lsp_db_init();
|
||||
struct lspdb_head _lspdb, *lspdb = &_lspdb;
|
||||
lsp_db_init(&_lspdb);
|
||||
|
||||
struct isis_lsp *lsp1 = lsp_new(area, lsp_id1, 6000, 0, 0, 0, NULL,
|
||||
ISIS_LEVEL2);
|
||||
|
||||
lsp_insert(lsp1, lspdb);
|
||||
lsp_insert(lspdb, lsp1);
|
||||
|
||||
struct isis_lsp *lsp2 = lsp_new(area, lsp_id2, 6000, 0, 0, 0, NULL,
|
||||
ISIS_LEVEL2);
|
||||
|
||||
lsp_insert(lsp2, lspdb);
|
||||
lsp_insert(lspdb, lsp2);
|
||||
|
||||
struct list *list = list_new();
|
||||
|
||||
lsp_build_list_nonzero_ht(lsp_id1, lsp_id_end, list, lspdb);
|
||||
lsp_build_list_nonzero_ht(lspdb, lsp_id1, lsp_id_end, list);
|
||||
assert(list->count == 1);
|
||||
assert(listgetdata(listhead(list)) == lsp1);
|
||||
list_delete_all_node(list);
|
||||
@ -50,7 +51,7 @@ static void test_lsp_build_list_nonzero_ht(void)
|
||||
lsp_id_end[5] = 0x03;
|
||||
lsp_id_end[6] = 0x00;
|
||||
|
||||
lsp_build_list_nonzero_ht(lsp_id1, lsp_id_end, list, lspdb);
|
||||
lsp_build_list_nonzero_ht(lspdb, lsp_id1, lsp_id_end, list);
|
||||
assert(list->count == 2);
|
||||
assert(listgetdata(listhead(list)) == lsp1);
|
||||
assert(listgetdata(listtail(list)) == lsp2);
|
||||
@ -58,7 +59,7 @@ static void test_lsp_build_list_nonzero_ht(void)
|
||||
|
||||
memcpy(lsp_id1, lsp_id2, sizeof(lsp_id1));
|
||||
|
||||
lsp_build_list_nonzero_ht(lsp_id1, lsp_id_end, list, lspdb);
|
||||
lsp_build_list_nonzero_ht(lspdb, lsp_id1, lsp_id_end, list);
|
||||
assert(list->count == 1);
|
||||
assert(listgetdata(listhead(list)) == lsp2);
|
||||
list_delete_all_node(list);
|
||||
@ -66,13 +67,13 @@ static void test_lsp_build_list_nonzero_ht(void)
|
||||
lsp_id1[5] = 0x03;
|
||||
lsp_id_end[5] = 0x04;
|
||||
|
||||
lsp_build_list_nonzero_ht(lsp_id1, lsp_id_end, list, lspdb);
|
||||
lsp_build_list_nonzero_ht(lspdb, lsp_id1, lsp_id_end, list);
|
||||
assert(list->count == 0);
|
||||
list_delete_all_node(list);
|
||||
|
||||
lsp_id1[5] = 0x00;
|
||||
|
||||
lsp_build_list_nonzero_ht(lsp_id1, lsp_id_end, list, lspdb);
|
||||
lsp_build_list_nonzero_ht(lspdb, lsp_id1, lsp_id_end, list);
|
||||
assert(list->count == 2);
|
||||
assert(listgetdata(listhead(list)) == lsp1);
|
||||
assert(listgetdata(listtail(list)) == lsp2);
|
||||
|
@ -32,7 +32,6 @@
|
||||
#include "lib/debug.h"
|
||||
#include "lib/distribute.h"
|
||||
#include "lib/ferr.h"
|
||||
#include "lib/fifo.h"
|
||||
#include "lib/filter.h"
|
||||
#include "lib/frr_pthread.h"
|
||||
#include "lib/frratomic.h"
|
||||
|
404
tests/lib/test_atomlist.c
Normal file
404
tests/lib/test_atomlist.c
Normal file
@ -0,0 +1,404 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2018 David Lamparter, for NetDEF, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#include <inttypes.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <assert.h>
|
||||
#include <pthread.h>
|
||||
|
||||
#include "atomlist.h"
|
||||
#include "seqlock.h"
|
||||
#include "monotime.h"
|
||||
|
||||
/*
|
||||
* maybe test:
|
||||
* - alist_del_hint
|
||||
* - alist_next_safe
|
||||
* - asort_del_hint
|
||||
* - asort_next_safe
|
||||
*/
|
||||
|
||||
static struct seqlock sqlo;
|
||||
|
||||
PREDECL_ATOMLIST(alist)
|
||||
PREDECL_ATOMSORT_UNIQ(asort)
|
||||
struct item {
|
||||
uint64_t val1;
|
||||
struct alist_item chain;
|
||||
struct asort_item sortc;
|
||||
uint64_t val2;
|
||||
};
|
||||
DECLARE_ATOMLIST(alist, struct item, chain)
|
||||
|
||||
static int icmp(const struct item *a, const struct item *b);
|
||||
DECLARE_ATOMSORT_UNIQ(asort, struct item, sortc, icmp)
|
||||
|
||||
static int icmp(const struct item *a, const struct item *b)
|
||||
{
|
||||
if (a->val1 > b->val1)
|
||||
return 1;
|
||||
if (a->val1 < b->val1)
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define NITEM 10000
|
||||
struct item itm[NITEM];
|
||||
|
||||
static struct alist_head ahead;
|
||||
static struct asort_head shead;
|
||||
|
||||
#define NTHREADS 4
|
||||
static struct testthread {
|
||||
pthread_t pt;
|
||||
struct seqlock sqlo;
|
||||
size_t counter, nullops;
|
||||
} thr[NTHREADS];
|
||||
|
||||
struct testrun {
|
||||
struct testrun *next;
|
||||
int lineno;
|
||||
const char *desc;
|
||||
ssize_t prefill;
|
||||
bool sorted;
|
||||
void (*func)(unsigned int offset);
|
||||
};
|
||||
struct testrun *runs = NULL;
|
||||
|
||||
#define NOCLEAR -1
|
||||
|
||||
#define deftestrun(name, _desc, _prefill, _sorted) \
|
||||
static void trfunc_##name(unsigned int offset); \
|
||||
struct testrun tr_##name = { \
|
||||
.desc = _desc, \
|
||||
.lineno = __LINE__, \
|
||||
.prefill = _prefill, \
|
||||
.func = &trfunc_##name, \
|
||||
.sorted = _sorted }; \
|
||||
static void __attribute__((constructor)) trsetup_##name(void) \
|
||||
{ \
|
||||
struct testrun **inspos = &runs; \
|
||||
while (*inspos && (*inspos)->lineno < tr_##name.lineno) \
|
||||
inspos = &(*inspos)->next; \
|
||||
tr_##name.next = *inspos; \
|
||||
*inspos = &tr_##name; \
|
||||
} \
|
||||
static void trfunc_##name(unsigned int offset) \
|
||||
{ \
|
||||
size_t i = 0, n = 0;
|
||||
|
||||
#define endtestrun \
|
||||
thr[offset].counter = i; \
|
||||
thr[offset].nullops = n; \
|
||||
}
|
||||
|
||||
deftestrun(add, "add vs. add", 0, false)
|
||||
for (; i < NITEM / NTHREADS; i++)
|
||||
alist_add_head(&ahead, &itm[i * NTHREADS + offset]);
|
||||
endtestrun
|
||||
|
||||
deftestrun(del, "del vs. del", NOCLEAR, false)
|
||||
for (; i < NITEM / NTHREADS / 10; i++)
|
||||
alist_del(&ahead, &itm[i * NTHREADS + offset]);
|
||||
endtestrun
|
||||
|
||||
deftestrun(addtail, "add_tail vs. add_tail", 0, false)
|
||||
for (; i < NITEM / NTHREADS; i++)
|
||||
alist_add_tail(&ahead, &itm[i * NTHREADS + offset]);
|
||||
endtestrun
|
||||
|
||||
deftestrun(pop, "pop vs. pop", NOCLEAR, false)
|
||||
for (; i < NITEM / NTHREADS; )
|
||||
if (alist_pop(&ahead))
|
||||
i++;
|
||||
else
|
||||
n++;
|
||||
endtestrun
|
||||
|
||||
deftestrun(headN_vs_pop1, "add_head(N) vs. pop(1)", 1, false);
|
||||
if (offset == 0) {
|
||||
struct item *dr = NULL;
|
||||
|
||||
for (i = n = 0; i < NITEM; ) {
|
||||
dr = alist_pop(&ahead);
|
||||
if (dr)
|
||||
i++;
|
||||
else
|
||||
n++;
|
||||
}
|
||||
} else {
|
||||
for (i = offset; i < NITEM; i += NTHREADS)
|
||||
alist_add_head(&ahead, &itm[i]);
|
||||
i = 0;
|
||||
}
|
||||
endtestrun
|
||||
|
||||
deftestrun(head1_vs_popN, "add_head(1) vs. pop(N)", 0, false);
|
||||
if (offset < NTHREADS - 1) {
|
||||
struct item *dr = NULL;
|
||||
|
||||
for (i = n = 0; i < NITEM / NTHREADS; ) {
|
||||
dr = alist_pop(&ahead);
|
||||
if (dr)
|
||||
i++;
|
||||
else
|
||||
n++;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < NITEM; i++)
|
||||
alist_add_head(&ahead, &itm[i]);
|
||||
i = 0;
|
||||
}
|
||||
endtestrun
|
||||
|
||||
deftestrun(headN_vs_popN, "add_head(N) vs. pop(N)", NTHREADS / 2, false)
|
||||
if (offset < NTHREADS / 2) {
|
||||
struct item *dr = NULL;
|
||||
|
||||
for (i = n = 0; i < NITEM * 2 / NTHREADS; ) {
|
||||
dr = alist_pop(&ahead);
|
||||
if (dr)
|
||||
i++;
|
||||
else
|
||||
n++;
|
||||
}
|
||||
} else {
|
||||
for (i = offset; i < NITEM; i += NTHREADS)
|
||||
alist_add_head(&ahead, &itm[i]);
|
||||
i = 0;
|
||||
}
|
||||
endtestrun
|
||||
|
||||
deftestrun(tailN_vs_pop1, "add_tail(N) vs. pop(1)", 1, false)
|
||||
if (offset == 0) {
|
||||
struct item *dr = NULL;
|
||||
|
||||
for (i = n = 0; i < NITEM - (NITEM / NTHREADS); ) {
|
||||
dr = alist_pop(&ahead);
|
||||
if (dr)
|
||||
i++;
|
||||
else
|
||||
n++;
|
||||
}
|
||||
} else {
|
||||
for (i = offset; i < NITEM; i += NTHREADS)
|
||||
alist_add_tail(&ahead, &itm[i]);
|
||||
i = 0;
|
||||
}
|
||||
endtestrun
|
||||
|
||||
deftestrun(tail1_vs_popN, "add_tail(1) vs. pop(N)", 0, false)
|
||||
if (offset < NTHREADS - 1) {
|
||||
struct item *dr = NULL;
|
||||
|
||||
for (i = n = 0; i < NITEM / NTHREADS; ) {
|
||||
dr = alist_pop(&ahead);
|
||||
if (dr)
|
||||
i++;
|
||||
else
|
||||
n++;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < NITEM; i++)
|
||||
alist_add_tail(&ahead, &itm[i]);
|
||||
i = 0;
|
||||
}
|
||||
endtestrun
|
||||
|
||||
deftestrun(sort_add, "add_sort vs. add_sort", 0, true)
|
||||
for (; i < NITEM / NTHREADS / 10; i++)
|
||||
asort_add(&shead, &itm[i * NTHREADS + offset]);
|
||||
endtestrun
|
||||
|
||||
deftestrun(sort_del, "del_sort vs. del_sort", NOCLEAR, true)
|
||||
for (; i < NITEM / NTHREADS / 10; i++)
|
||||
asort_del(&shead, &itm[i * NTHREADS + offset]);
|
||||
endtestrun
|
||||
|
||||
deftestrun(sort_add_del, "add_sort vs. del_sort", NTHREADS / 2, true)
|
||||
if (offset < NTHREADS / 2) {
|
||||
for (; i < NITEM / NTHREADS / 10; i++)
|
||||
asort_del(&shead, &itm[i * NTHREADS + offset]);
|
||||
} else {
|
||||
for (; i < NITEM / NTHREADS / 10; i++)
|
||||
asort_add(&shead, &itm[i * NTHREADS + offset]);
|
||||
}
|
||||
endtestrun
|
||||
|
||||
static void *thr1func(void *arg)
|
||||
{
|
||||
struct testthread *p = arg;
|
||||
unsigned int offset = (unsigned int)(p - &thr[0]);
|
||||
seqlock_val_t sv;
|
||||
struct testrun *tr;
|
||||
|
||||
for (tr = runs; tr; tr = tr->next) {
|
||||
sv = seqlock_bump(&p->sqlo);
|
||||
seqlock_wait(&sqlo, sv);
|
||||
|
||||
tr->func(offset);
|
||||
}
|
||||
seqlock_bump(&p->sqlo);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void clear_list(size_t prefill)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
memset(&ahead, 0, sizeof(ahead));
|
||||
memset(&shead, 0, sizeof(shead));
|
||||
memset(itm, 0, sizeof(itm));
|
||||
for (i = 0; i < NITEM; i++) {
|
||||
itm[i].val1 = itm[i].val2 = i;
|
||||
if ((i % NTHREADS) < prefill) {
|
||||
alist_add_tail(&ahead, &itm[i]);
|
||||
asort_add(&shead, &itm[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void run_tr(struct testrun *tr)
|
||||
{
|
||||
const char *desc = tr->desc;
|
||||
struct timeval tv;
|
||||
int64_t delta;
|
||||
seqlock_val_t sv;
|
||||
size_t c = 0, s = 0, n = 0;
|
||||
struct item *item, *prev, dummy;
|
||||
|
||||
printf("[%02u] %35s %s\n", seqlock_cur(&sqlo) >> 1, "", desc);
|
||||
fflush(stdout);
|
||||
|
||||
if (tr->prefill != NOCLEAR)
|
||||
clear_list(tr->prefill);
|
||||
|
||||
monotime(&tv);
|
||||
sv = seqlock_bump(&sqlo);
|
||||
for (size_t i = 0; i < NTHREADS; i++) {
|
||||
seqlock_wait(&thr[i].sqlo, seqlock_cur(&sqlo));
|
||||
s += thr[i].counter;
|
||||
n += thr[i].nullops;
|
||||
thr[i].counter = 0;
|
||||
thr[i].nullops = 0;
|
||||
}
|
||||
|
||||
delta = monotime_since(&tv, NULL);
|
||||
if (tr->sorted) {
|
||||
uint64_t prevval = 0;
|
||||
|
||||
for_each(asort, &shead, item) {
|
||||
assert(item->val1 >= prevval);
|
||||
prevval = item->val1;
|
||||
c++;
|
||||
}
|
||||
assert(c == asort_count(&shead));
|
||||
} else {
|
||||
prev = &dummy;
|
||||
for_each(alist, &ahead, item) {
|
||||
assert(item != prev);
|
||||
prev = item;
|
||||
c++;
|
||||
assert(c <= NITEM);
|
||||
}
|
||||
assert(c == alist_count(&ahead));
|
||||
}
|
||||
printf("\033[1A[%02u] %9"PRId64"us c=%5zu s=%5zu n=%5zu %s\n",
|
||||
sv >> 1, delta, c, s, n, desc);
|
||||
}
|
||||
|
||||
#ifdef BASIC_TESTS
|
||||
static void dump(const char *lbl)
|
||||
{
|
||||
struct item *item, *safe;
|
||||
size_t ctr = 0;
|
||||
|
||||
printf("dumping %s:\n", lbl);
|
||||
for_each_safe(alist, &ahead, item) {
|
||||
printf("%s %3zu %p %3"PRIu64" %3"PRIu64"\n", lbl, ctr++,
|
||||
(void *)item, item->val1, item->val2);
|
||||
}
|
||||
}
|
||||
|
||||
static void basic_tests(void)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
memset(&ahead, 0, sizeof(ahead));
|
||||
memset(itm, 0, sizeof(itm));
|
||||
for (i = 0; i < NITEM; i++)
|
||||
itm[i].val1 = itm[i].val2 = i;
|
||||
|
||||
assert(alist_first(&ahead) == NULL);
|
||||
dump("");
|
||||
alist_add_head(&ahead, &itm[0]);
|
||||
dump("");
|
||||
alist_add_head(&ahead, &itm[1]);
|
||||
dump("");
|
||||
alist_add_tail(&ahead, &itm[2]);
|
||||
dump("");
|
||||
alist_add_tail(&ahead, &itm[3]);
|
||||
dump("");
|
||||
alist_del(&ahead, &itm[1]);
|
||||
dump("");
|
||||
printf("POP: %p\n", alist_pop(&ahead));
|
||||
dump("");
|
||||
printf("POP: %p\n", alist_pop(&ahead));
|
||||
printf("POP: %p\n", alist_pop(&ahead));
|
||||
printf("POP: %p\n", alist_pop(&ahead));
|
||||
printf("POP: %p\n", alist_pop(&ahead));
|
||||
dump("");
|
||||
}
|
||||
#else
|
||||
#define basic_tests() do { } while (0)
|
||||
#endif
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
basic_tests();
|
||||
|
||||
seqlock_init(&sqlo);
|
||||
seqlock_acquire_val(&sqlo, 1);
|
||||
|
||||
for (i = 0; i < NTHREADS; i++) {
|
||||
seqlock_init(&thr[i].sqlo);
|
||||
seqlock_acquire(&thr[i].sqlo, &sqlo);
|
||||
thr[i].counter = 0;
|
||||
thr[i].nullops = 0;
|
||||
|
||||
pthread_create(&thr[i].pt, NULL, thr1func, &thr[i]);
|
||||
}
|
||||
|
||||
struct testrun *tr;
|
||||
|
||||
for (tr = runs; tr; tr = tr->next)
|
||||
run_tr(tr);
|
||||
|
||||
for (i = 0; i < NTHREADS; i++)
|
||||
pthread_join(thr[i].pt, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
6
tests/lib/test_atomlist.py
Normal file
6
tests/lib/test_atomlist.py
Normal file
@ -0,0 +1,6 @@
|
||||
import frrtest
|
||||
|
||||
class TestAtomlist(frrtest.TestMultiOut):
|
||||
program = './test_atomlist'
|
||||
|
||||
TestAtomlist.exit_cleanly()
|
122
tests/lib/test_seqlock.c
Normal file
122
tests/lib/test_seqlock.c
Normal file
@ -0,0 +1,122 @@
|
||||
/*
|
||||
* basic test for seqlock
|
||||
*
|
||||
* Copyright (C) 2015 David Lamparter
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
||||
* Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#include <inttypes.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <assert.h>
|
||||
#include <sys/uio.h>
|
||||
|
||||
#include "monotime.h"
|
||||
#include "seqlock.h"
|
||||
|
||||
static struct seqlock sqlo;
|
||||
static pthread_t thr1;
|
||||
static struct timeval start;
|
||||
|
||||
static void writestr(const char *str)
|
||||
{
|
||||
struct iovec iov[2];
|
||||
char buf[32];
|
||||
int64_t usec = monotime_since(&start, NULL);
|
||||
|
||||
snprintf(buf, sizeof(buf), "[%02"PRId64"] ", usec / 100000);
|
||||
|
||||
iov[0].iov_base = buf;
|
||||
iov[0].iov_len = strlen(buf);
|
||||
iov[1].iov_base = (char *)str;
|
||||
iov[1].iov_len = strlen(str);
|
||||
writev(1, iov, 2);
|
||||
}
|
||||
|
||||
static void *thr1func(void *arg)
|
||||
{
|
||||
assert(!seqlock_held(&sqlo));
|
||||
assert(seqlock_check(&sqlo, 1));
|
||||
seqlock_wait(&sqlo, 1);
|
||||
writestr("thr1 (unheld)\n");
|
||||
|
||||
sleep(2);
|
||||
|
||||
assert(seqlock_held(&sqlo));
|
||||
assert(seqlock_check(&sqlo, 1));
|
||||
seqlock_wait(&sqlo, 1);
|
||||
writestr("thr1 @1\n");
|
||||
|
||||
seqlock_wait(&sqlo, 3);
|
||||
writestr("thr1 @3\n");
|
||||
|
||||
seqlock_wait(&sqlo, 5);
|
||||
writestr("thr1 @5\n");
|
||||
|
||||
seqlock_wait(&sqlo, 7);
|
||||
writestr("thr1 @7\n");
|
||||
|
||||
seqlock_wait(&sqlo, 9);
|
||||
writestr("thr1 @9\n");
|
||||
|
||||
seqlock_wait(&sqlo, 11);
|
||||
writestr("thr1 @11\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
monotime(&start);
|
||||
|
||||
seqlock_init(&sqlo);
|
||||
|
||||
assert(!seqlock_held(&sqlo));
|
||||
seqlock_acquire_val(&sqlo, 1);
|
||||
assert(seqlock_held(&sqlo));
|
||||
|
||||
assert(seqlock_cur(&sqlo) == 1);
|
||||
assert(seqlock_bump(&sqlo) == 1);
|
||||
assert(seqlock_cur(&sqlo) == 3);
|
||||
assert(seqlock_bump(&sqlo) == 3);
|
||||
assert(seqlock_bump(&sqlo) == 5);
|
||||
assert(seqlock_bump(&sqlo) == 7);
|
||||
assert(seqlock_cur(&sqlo) == 9);
|
||||
|
||||
assert(seqlock_held(&sqlo));
|
||||
seqlock_release(&sqlo);
|
||||
assert(!seqlock_held(&sqlo));
|
||||
|
||||
pthread_create(&thr1, NULL, thr1func, NULL);
|
||||
sleep(1);
|
||||
|
||||
writestr("main @3\n");
|
||||
seqlock_acquire_val(&sqlo, 3);
|
||||
sleep(2);
|
||||
|
||||
writestr("main @5\n");
|
||||
seqlock_bump(&sqlo);
|
||||
sleep(1);
|
||||
|
||||
writestr("main @9\n");
|
||||
seqlock_acquire_val(&sqlo, 9);
|
||||
sleep(1);
|
||||
|
||||
writestr("main @release\n");
|
||||
seqlock_release(&sqlo);
|
||||
sleep(1);
|
||||
}
|
142
tests/lib/test_typelist.c
Normal file
142
tests/lib/test_typelist.c
Normal file
@ -0,0 +1,142 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2018 David Lamparter, for NetDEF, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <inttypes.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <assert.h>
|
||||
|
||||
#define WNO_ATOMLIST_UNSAFE_FIND
|
||||
|
||||
#include "typesafe.h"
|
||||
#include "atomlist.h"
|
||||
#include "memory.h"
|
||||
#include "monotime.h"
|
||||
|
||||
#include "tests/helpers/c/prng.h"
|
||||
|
||||
/* note: these macros are layered 2-deep because that makes the C
|
||||
* preprocessor expand the "type" argument. Otherwise, you get
|
||||
* "PREDECL_type" instead of "PREDECL_LIST"
|
||||
*/
|
||||
#define _concat(a, b) a ## b
|
||||
#define concat(a, b) _concat(a, b)
|
||||
#define _str(x) #x
|
||||
#define str(x) _str(x)
|
||||
|
||||
#define _PREDECL(type, ...) PREDECL_##type(__VA_ARGS__)
|
||||
#define PREDECL(type, ...) _PREDECL(type, __VA_ARGS__)
|
||||
#define _DECLARE(type, ...) DECLARE_##type(__VA_ARGS__)
|
||||
#define DECLARE(type, ...) _DECLARE(type, __VA_ARGS__)
|
||||
|
||||
#define _U_SORTLIST_UNIQ 1
|
||||
#define _U_SORTLIST_NONUNIQ 0
|
||||
#define _U_HASH 1
|
||||
#define _U_SKIPLIST_UNIQ 1
|
||||
#define _U_SKIPLIST_NONUNIQ 0
|
||||
#define _U_RBTREE_UNIQ 1
|
||||
#define _U_RBTREE_NONUNIQ 0
|
||||
#define _U_ATOMSORT_UNIQ 1
|
||||
#define _U_ATOMSORT_NONUNIQ 0
|
||||
|
||||
#define _IS_UNIQ(type) _U_##type
|
||||
#define IS_UNIQ(type) _IS_UNIQ(type)
|
||||
|
||||
#define _H_SORTLIST_UNIQ 0
|
||||
#define _H_SORTLIST_NONUNIQ 0
|
||||
#define _H_HASH 1
|
||||
#define _H_SKIPLIST_UNIQ 0
|
||||
#define _H_SKIPLIST_NONUNIQ 0
|
||||
#define _H_RBTREE_UNIQ 0
|
||||
#define _H_RBTREE_NONUNIQ 0
|
||||
#define _H_ATOMSORT_UNIQ 0
|
||||
#define _H_ATOMSORT_NONUNIQ 0
|
||||
|
||||
#define _IS_HASH(type) _H_##type
|
||||
#define IS_HASH(type) _IS_HASH(type)
|
||||
|
||||
static struct timeval ref, ref0;
|
||||
|
||||
static void ts_start(void)
|
||||
{
|
||||
monotime(&ref0);
|
||||
monotime(&ref);
|
||||
}
|
||||
static void ts_ref(const char *text)
|
||||
{
|
||||
int64_t us;
|
||||
us = monotime_since(&ref, NULL);
|
||||
printf("%7"PRId64"us %s\n", us, text);
|
||||
monotime(&ref);
|
||||
}
|
||||
static void ts_end(void)
|
||||
{
|
||||
int64_t us;
|
||||
us = monotime_since(&ref0, NULL);
|
||||
printf("%7"PRId64"us total\n", us);
|
||||
}
|
||||
|
||||
#define TYPE SORTLIST_UNIQ
|
||||
#include "test_typelist.h"
|
||||
|
||||
#define TYPE SORTLIST_NONUNIQ
|
||||
#include "test_typelist.h"
|
||||
|
||||
#define TYPE HASH
|
||||
#include "test_typelist.h"
|
||||
|
||||
#define TYPE SKIPLIST_UNIQ
|
||||
#include "test_typelist.h"
|
||||
|
||||
#define TYPE SKIPLIST_NONUNIQ
|
||||
#include "test_typelist.h"
|
||||
|
||||
#define TYPE RBTREE_UNIQ
|
||||
#include "test_typelist.h"
|
||||
|
||||
#define TYPE RBTREE_NONUNIQ
|
||||
#include "test_typelist.h"
|
||||
|
||||
#define TYPE ATOMSORT_UNIQ
|
||||
#include "test_typelist.h"
|
||||
|
||||
#define TYPE ATOMSORT_NONUNIQ
|
||||
#include "test_typelist.h"
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
srandom(1);
|
||||
|
||||
test_SORTLIST_UNIQ();
|
||||
test_SORTLIST_NONUNIQ();
|
||||
test_HASH();
|
||||
test_SKIPLIST_UNIQ();
|
||||
test_SKIPLIST_NONUNIQ();
|
||||
test_RBTREE_UNIQ();
|
||||
test_RBTREE_NONUNIQ();
|
||||
test_ATOMSORT_UNIQ();
|
||||
test_ATOMSORT_NONUNIQ();
|
||||
|
||||
log_memstats_stderr("test: ");
|
||||
return 0;
|
||||
}
|
272
tests/lib/test_typelist.h
Normal file
272
tests/lib/test_typelist.h
Normal file
@ -0,0 +1,272 @@
|
||||
/*
|
||||
* Copyright (c) 2019 David Lamparter, for NetDEF, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/* C++ called, they want their templates back */
|
||||
#define item concat(item_, TYPE)
|
||||
#define itm concat(itm_, TYPE)
|
||||
#define head concat(head_, TYPE)
|
||||
#define list concat(TYPE, )
|
||||
#define list_head concat(TYPE, _head)
|
||||
#define list_item concat(TYPE, _item)
|
||||
#define list_cmp concat(TYPE, _cmp)
|
||||
#define list_hash concat(TYPE, _hash)
|
||||
#define list_init concat(TYPE, _init)
|
||||
#define list_fini concat(TYPE, _fini)
|
||||
#define list_first concat(TYPE, _first)
|
||||
#define list_next concat(TYPE, _next)
|
||||
#define list_next_safe concat(TYPE, _next_safe)
|
||||
#define list_count concat(TYPE, _count)
|
||||
#define list_add concat(TYPE, _add)
|
||||
#define list_find concat(TYPE, _find)
|
||||
#define list_find_lt concat(TYPE, _find_lt)
|
||||
#define list_find_gteq concat(TYPE, _find_gteq)
|
||||
#define list_del concat(TYPE, _del)
|
||||
#define list_pop concat(TYPE, _pop)
|
||||
|
||||
PREDECL(TYPE, list)
|
||||
struct item {
|
||||
uint64_t val;
|
||||
struct list_item itm;
|
||||
int scratchpad;
|
||||
};
|
||||
|
||||
static int list_cmp(const struct item *a, const struct item *b);
|
||||
|
||||
#if IS_HASH(TYPE)
|
||||
static uint32_t list_hash(const struct item *a);
|
||||
DECLARE(TYPE, list, struct item, itm, list_cmp, list_hash)
|
||||
|
||||
static uint32_t list_hash(const struct item *a)
|
||||
{
|
||||
/* crappy hash to get some hash collisions */
|
||||
return a->val ^ (a->val << 29) ^ 0x55AA0000U;
|
||||
}
|
||||
|
||||
#else
|
||||
DECLARE(TYPE, list, struct item, itm, list_cmp)
|
||||
#endif
|
||||
|
||||
static int list_cmp(const struct item *a, const struct item *b)
|
||||
{
|
||||
if (a->val > b->val)
|
||||
return 1;
|
||||
if (a->val < b->val)
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define NITEM 10000
|
||||
struct item itm[NITEM];
|
||||
static struct list_head head = concat(INIT_, TYPE)(head);
|
||||
|
||||
static void concat(test_, TYPE)(void)
|
||||
{
|
||||
size_t i, j, k, l;
|
||||
struct prng *prng;
|
||||
struct item *item, *prev;
|
||||
struct item dummy;
|
||||
|
||||
memset(itm, 0, sizeof(itm));
|
||||
for (i = 0; i < NITEM; i++)
|
||||
itm[i].val = i;
|
||||
|
||||
printf("%s start\n", str(TYPE));
|
||||
ts_start();
|
||||
|
||||
list_init(&head);
|
||||
ts_ref("init");
|
||||
|
||||
assert(list_first(&head) == NULL);
|
||||
|
||||
prng = prng_new(0);
|
||||
k = 0;
|
||||
for (i = 0; i < NITEM; i++) {
|
||||
j = prng_rand(prng) % NITEM;
|
||||
if (itm[j].scratchpad == 0) {
|
||||
list_add(&head, &itm[j]);
|
||||
itm[j].scratchpad = 1;
|
||||
k++;
|
||||
} else
|
||||
assert(list_add(&head, &itm[j]) == &itm[j]);
|
||||
}
|
||||
assert(list_count(&head) == k);
|
||||
assert(list_first(&head) != NULL);
|
||||
ts_ref("fill");
|
||||
|
||||
k = 0;
|
||||
prev = NULL;
|
||||
for_each(list, &head, item) {
|
||||
#if IS_HASH(TYPE)
|
||||
/* hash table doesn't give sorting */
|
||||
(void)prev;
|
||||
#else
|
||||
assert(!prev || prev->val < item->val);
|
||||
#endif
|
||||
prev = item;
|
||||
k++;
|
||||
}
|
||||
assert(list_count(&head) == k);
|
||||
ts_ref("walk");
|
||||
|
||||
#if IS_UNIQ(TYPE)
|
||||
prng_free(prng);
|
||||
prng = prng_new(0);
|
||||
|
||||
for (i = 0; i < NITEM; i++) {
|
||||
j = prng_rand(prng) % NITEM;
|
||||
dummy.val = j;
|
||||
assert(list_find(&head, &dummy) == &itm[j]);
|
||||
}
|
||||
ts_ref("find");
|
||||
|
||||
for (i = 0; i < NITEM; i++) {
|
||||
j = prng_rand(prng) % NITEM;
|
||||
memset(&dummy, 0, sizeof(dummy));
|
||||
dummy.val = j;
|
||||
if (itm[j].scratchpad)
|
||||
assert(list_add(&head, &dummy) == &itm[j]);
|
||||
else {
|
||||
assert(list_add(&head, &dummy) == NULL);
|
||||
list_del(&head, &dummy);
|
||||
}
|
||||
}
|
||||
ts_ref("add-dup");
|
||||
#else /* !IS_UNIQ(TYPE) */
|
||||
for (i = 0; i < NITEM; i++) {
|
||||
j = prng_rand(prng) % NITEM;
|
||||
memset(&dummy, 0, sizeof(dummy));
|
||||
dummy.val = j;
|
||||
|
||||
list_add(&head, &dummy);
|
||||
if (itm[j].scratchpad) {
|
||||
struct item *lt, *gteq, dummy2;
|
||||
|
||||
assert(list_next(&head, &itm[j]) == &dummy ||
|
||||
list_next(&head, &dummy) == &itm[j]);
|
||||
|
||||
memset(&dummy2, 0, sizeof(dummy));
|
||||
dummy2.val = j;
|
||||
lt = list_find_lt(&head, &dummy2);
|
||||
gteq = list_find_gteq(&head, &dummy2);
|
||||
|
||||
assert(gteq == &itm[j] || gteq == &dummy);
|
||||
if (lt)
|
||||
assert(list_next(&head, lt) == &itm[j] ||
|
||||
list_next(&head, lt) == &dummy);
|
||||
else
|
||||
assert(list_first(&head) == &itm[j] ||
|
||||
list_first(&head) == &dummy);
|
||||
} else if (list_next(&head, &dummy))
|
||||
assert(list_next(&head, &dummy)->val > j);
|
||||
list_del(&head, &dummy);
|
||||
}
|
||||
ts_ref("add-dup+find_{lt,gteq}");
|
||||
#endif
|
||||
#if !IS_HASH(TYPE)
|
||||
prng_free(prng);
|
||||
prng = prng_new(123456);
|
||||
|
||||
l = 0;
|
||||
for (i = 0; i < NITEM; i++) {
|
||||
struct item *lt, *gteq, *tmp;
|
||||
|
||||
j = prng_rand(prng) % NITEM;
|
||||
dummy.val = j;
|
||||
|
||||
lt = list_find_lt(&head, &dummy);
|
||||
gteq = list_find_gteq(&head, &dummy);
|
||||
|
||||
if (lt) {
|
||||
assert(lt->val < j);
|
||||
tmp = list_next(&head, lt);
|
||||
assert(tmp == gteq);
|
||||
assert(!tmp || tmp->val >= j);
|
||||
} else
|
||||
assert(gteq == list_first(&head));
|
||||
|
||||
if (gteq)
|
||||
assert(gteq->val >= j);
|
||||
}
|
||||
ts_ref("find_{lt,gteq}");
|
||||
#endif /* !IS_HASH */
|
||||
|
||||
prng_free(prng);
|
||||
prng = prng_new(0);
|
||||
|
||||
l = 0;
|
||||
for (i = 0; i < NITEM; i++) {
|
||||
(void)prng_rand(prng);
|
||||
j = prng_rand(prng) % NITEM;
|
||||
if (itm[j].scratchpad == 1) {
|
||||
list_del(&head, &itm[j]);
|
||||
itm[j].scratchpad = 0;
|
||||
l++;
|
||||
}
|
||||
}
|
||||
assert(l + list_count(&head) == k);
|
||||
ts_ref("del");
|
||||
|
||||
for_each_safe(list, &head, item) {
|
||||
assert(item->scratchpad != 0);
|
||||
|
||||
if (item->val & 1) {
|
||||
list_del(&head, item);
|
||||
item->scratchpad = 0;
|
||||
l++;
|
||||
}
|
||||
}
|
||||
assert(l + list_count(&head) == k);
|
||||
ts_ref("for_each_safe+del");
|
||||
|
||||
while ((item = list_pop(&head))) {
|
||||
assert(item->scratchpad != 0);
|
||||
|
||||
item->scratchpad = 0;
|
||||
l++;
|
||||
}
|
||||
assert(l == k);
|
||||
assert(list_count(&head) == 0);
|
||||
assert(list_first(&head) == NULL);
|
||||
ts_ref("pop");
|
||||
|
||||
list_fini(&head);
|
||||
ts_ref("fini");
|
||||
ts_end();
|
||||
printf("%s end\n", str(TYPE));
|
||||
}
|
||||
|
||||
#undef item
|
||||
#undef itm
|
||||
#undef head
|
||||
#undef list
|
||||
#undef list_head
|
||||
#undef list_item
|
||||
#undef list_cmp
|
||||
#undef list_hash
|
||||
#undef list_init
|
||||
#undef list_fini
|
||||
#undef list_first
|
||||
#undef list_next
|
||||
#undef list_next_safe
|
||||
#undef list_count
|
||||
#undef list_add
|
||||
#undef list_find
|
||||
#undef list_find_lt
|
||||
#undef list_find_gteq
|
||||
#undef list_del
|
||||
#undef list_pop
|
||||
|
||||
#undef TYPE
|
14
tests/lib/test_typelist.py
Normal file
14
tests/lib/test_typelist.py
Normal file
@ -0,0 +1,14 @@
|
||||
import frrtest
|
||||
|
||||
class TestTypelist(frrtest.TestMultiOut):
|
||||
program = './test_typelist'
|
||||
|
||||
TestTypelist.onesimple('SORTLIST_UNIQ end')
|
||||
TestTypelist.onesimple('SORTLIST_NONUNIQ end')
|
||||
TestTypelist.onesimple('HASH end')
|
||||
TestTypelist.onesimple('SKIPLIST_UNIQ end')
|
||||
TestTypelist.onesimple('SKIPLIST_NONUNIQ end')
|
||||
TestTypelist.onesimple('RBTREE_UNIQ end')
|
||||
TestTypelist.onesimple('RBTREE_NONUNIQ end')
|
||||
TestTypelist.onesimple('ATOMSORT_UNIQ end')
|
||||
TestTypelist.onesimple('ATOMSORT_NONUNIQ end')
|
@ -47,6 +47,7 @@ tests/ospf6d/test_lsdb-test_lsdb.$(OBJEXT): tests/ospf6d/test_lsdb_clippy.c
|
||||
|
||||
check_PROGRAMS = \
|
||||
tests/lib/cxxcompat \
|
||||
tests/lib/test_atomlist \
|
||||
tests/lib/test_buffer \
|
||||
tests/lib/test_checksum \
|
||||
tests/lib/test_heavy_thread \
|
||||
@ -59,12 +60,14 @@ check_PROGRAMS = \
|
||||
tests/lib/test_ringbuf \
|
||||
tests/lib/test_srcdest_table \
|
||||
tests/lib/test_segv \
|
||||
tests/lib/test_seqlock \
|
||||
tests/lib/test_sig \
|
||||
tests/lib/test_stream \
|
||||
tests/lib/test_table \
|
||||
tests/lib/test_timer_correctness \
|
||||
tests/lib/test_timer_performance \
|
||||
tests/lib/test_ttable \
|
||||
tests/lib/test_typelist \
|
||||
tests/lib/test_zlog \
|
||||
tests/lib/test_graph \
|
||||
tests/lib/cli/test_cli \
|
||||
@ -103,6 +106,7 @@ noinst_HEADERS += \
|
||||
tests/helpers/c/prng.h \
|
||||
tests/helpers/c/tests.h \
|
||||
tests/lib/cli/common_cli.h \
|
||||
tests/lib/test_typelist.h \
|
||||
# end
|
||||
|
||||
#
|
||||
@ -189,6 +193,10 @@ tests_lib_northbound_test_oper_data_CPPFLAGS = $(TESTS_CPPFLAGS)
|
||||
tests_lib_northbound_test_oper_data_LDADD = $(ALL_TESTS_LDADD)
|
||||
tests_lib_northbound_test_oper_data_SOURCES = tests/lib/northbound/test_oper_data.c
|
||||
nodist_tests_lib_northbound_test_oper_data_SOURCES = yang/frr-test-module.yang.c
|
||||
tests_lib_test_atomlist_CFLAGS = $(TESTS_CFLAGS)
|
||||
tests_lib_test_atomlist_CPPFLAGS = $(TESTS_CPPFLAGS)
|
||||
tests_lib_test_atomlist_LDADD = $(ALL_TESTS_LDADD)
|
||||
tests_lib_test_atomlist_SOURCES = tests/lib/test_atomlist.c
|
||||
tests_lib_test_buffer_CFLAGS = $(TESTS_CFLAGS)
|
||||
tests_lib_test_buffer_CPPFLAGS = $(TESTS_CPPFLAGS)
|
||||
tests_lib_test_buffer_LDADD = $(ALL_TESTS_LDADD)
|
||||
@ -236,6 +244,10 @@ tests_lib_test_segv_CFLAGS = $(TESTS_CFLAGS)
|
||||
tests_lib_test_segv_CPPFLAGS = $(TESTS_CPPFLAGS)
|
||||
tests_lib_test_segv_LDADD = $(ALL_TESTS_LDADD)
|
||||
tests_lib_test_segv_SOURCES = tests/lib/test_segv.c
|
||||
tests_lib_test_seqlock_CFLAGS = $(TESTS_CFLAGS)
|
||||
tests_lib_test_seqlock_CPPFLAGS = $(TESTS_CPPFLAGS)
|
||||
tests_lib_test_seqlock_LDADD = $(ALL_TESTS_LDADD)
|
||||
tests_lib_test_seqlock_SOURCES = tests/lib/test_seqlock.c
|
||||
tests_lib_test_sig_CFLAGS = $(TESTS_CFLAGS)
|
||||
tests_lib_test_sig_CPPFLAGS = $(TESTS_CPPFLAGS)
|
||||
tests_lib_test_sig_LDADD = $(ALL_TESTS_LDADD)
|
||||
@ -264,6 +276,10 @@ tests_lib_test_ttable_CFLAGS = $(TESTS_CFLAGS)
|
||||
tests_lib_test_ttable_CPPFLAGS = $(TESTS_CPPFLAGS)
|
||||
tests_lib_test_ttable_LDADD = $(ALL_TESTS_LDADD)
|
||||
tests_lib_test_ttable_SOURCES = tests/lib/test_ttable.c
|
||||
tests_lib_test_typelist_CFLAGS = $(TESTS_CFLAGS)
|
||||
tests_lib_test_typelist_CPPFLAGS = $(TESTS_CPPFLAGS)
|
||||
tests_lib_test_typelist_LDADD = $(ALL_TESTS_LDADD)
|
||||
tests_lib_test_typelist_SOURCES = tests/lib/test_typelist.c tests/helpers/c/prng.c
|
||||
tests_lib_test_zlog_CFLAGS = $(TESTS_CFLAGS)
|
||||
tests_lib_test_zlog_CPPFLAGS = $(TESTS_CPPFLAGS)
|
||||
tests_lib_test_zlog_LDADD = $(ALL_TESTS_LDADD)
|
||||
@ -301,6 +317,7 @@ EXTRA_DIST += \
|
||||
tests/lib/northbound/test_oper_data.in \
|
||||
tests/lib/northbound/test_oper_data.py \
|
||||
tests/lib/northbound/test_oper_data.refout \
|
||||
tests/lib/test_atomlist.py \
|
||||
tests/lib/test_nexthop_iter.py \
|
||||
tests/lib/test_ringbuf.py \
|
||||
tests/lib/test_srcdest_table.py \
|
||||
@ -310,6 +327,7 @@ EXTRA_DIST += \
|
||||
tests/lib/test_timer_correctness.py \
|
||||
tests/lib/test_ttable.py \
|
||||
tests/lib/test_ttable.refout \
|
||||
tests/lib/test_typelist.py \
|
||||
tests/lib/test_zlog.py \
|
||||
tests/lib/test_graph.py \
|
||||
tests/lib/test_graph.refout \
|
||||
|
@ -158,7 +158,7 @@ static int netlink_route_info_add_nh(netlink_route_info_t *ri,
|
||||
memset(&nhi, 0, sizeof(nhi));
|
||||
src = NULL;
|
||||
|
||||
if (ri->num_nhs >= (int)ZEBRA_NUM_OF(ri->nhs))
|
||||
if (ri->num_nhs >= (int)array_size(ri->nhs))
|
||||
return 0;
|
||||
|
||||
nhi.recursive = nexthop->rparent ? 1 : 0;
|
||||
|
@ -176,7 +176,7 @@ static Fpm__AddRoute *create_add_route_message(qpb_allocator_t *allocator,
|
||||
if (num_nhs >= multipath_num)
|
||||
break;
|
||||
|
||||
if (num_nhs >= ZEBRA_NUM_OF(nexthops))
|
||||
if (num_nhs >= array_size(nexthops))
|
||||
break;
|
||||
|
||||
if (nexthop->type == NEXTHOP_TYPE_BLACKHOLE) {
|
||||
|
@ -3457,7 +3457,7 @@ struct route_table *rib_tables_iter_next(rib_tables_iter_t *iter)
|
||||
while (1) {
|
||||
|
||||
while (iter->afi_safi_ix
|
||||
< (int)ZEBRA_NUM_OF(afi_safis)) {
|
||||
< (int)array_size(afi_safis)) {
|
||||
table = zebra_vrf_table(
|
||||
afi_safis[iter->afi_safi_ix].afi,
|
||||
afi_safis[iter->afi_safi_ix].safi,
|
||||
|
@ -2167,7 +2167,7 @@ static unsigned int neigh_hash_keymake(void *p)
|
||||
return jhash_1word(ip->ipaddr_v4.s_addr, 0);
|
||||
|
||||
return jhash2(ip->ipaddr_v6.s6_addr32,
|
||||
ZEBRA_NUM_OF(ip->ipaddr_v6.s6_addr32), 0);
|
||||
array_size(ip->ipaddr_v6.s6_addr32), 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user