mirror of
https://git.proxmox.com/git/mirror_frr
synced 2025-06-05 13:08:59 +00:00

* bgpd/(general) refcount struct peer and bgp_info, hence allowing us add work_queues for bgp_process. * bgpd/bgp_route.h: (struct bgp_info) Add 'lock' field for refcount. Add bgp_info_{lock,unlock} helper functions. Add bgp_info_{add,delete} helpers, to remove need for users managing locking/freeing of bgp_info and bgp_node's. * bgpd/bgp_table.h: (struct bgp_node) Add a flags field, and BGP_NODE_PROCESS_SCHEDULED to merge redundant processing of nodes. * bgpd/bgp_fsm.h: Make the ON/OFF/ADD/REMOVE macros lock and unlock peer reference as appropriate. * bgpd/bgp_damp.c: Remove its internal prototypes for bgp_info_delete/free. Just use bgp_info_delete. * bgpd/bgpd.h: (struct bgp_master) Add work_queue pointers. (struct peer) Add reference count 'lock' (peer_lock,peer_unlock) New helpers to take/release reference on struct peer. * bgpd/bgp_advertise.c: (general) Add peer and bgp_info refcounting and balance how references are taken and released. (bgp_advertise_free) release bgp_info reference, if appropriate (bgp_adj_out_free) unlock peer (bgp_advertise_clean) leave the adv references alone, or else call bgp_advertise_free cant unlock them. (bgp_adj_out_set) lock the peer on new adj's, leave the reference alone otherwise. lock the new bgp_info reference. (bgp_adj_in_set) lock the peer reference (bgp_adj_in_remove) and unlock it here (bgp_sync_delete) make hash_free on peer conditional, just in case. * bgpd/bgp_fsm.c: (general) document that the timers depend on bgp_event to release a peer reference. (bgp_fsm_change_status) moved up the file, unchanged. (bgp_stop) Decrement peer lock as many times as cancel_event canceled - shouldnt be needed but just in case. stream_fifo_clean of obuf made conditional, just in case. (bgp_event) always unlock the peer, regardless of return value of bgp_fsm_change_status. * bgpd/bgp_packet.c: (general) change several bgp_stop's to BGP_EVENT's. (bgp_read) Add a mysterious extra peer_unlock for ACCEPT_PEERs along with a comment on it. * bgpd/bgp_route.c: (general) Add refcounting of bgp_info, cleanup some of the resource management around bgp_info. Refcount peer. Add workqueues for bgp_process and clear_table. (bgp_info_new) make static (bgp_info_free) Ditto, and unlock the peer reference. (bgp_info_lock,bgp_info_unlock) new exported functions (bgp_info_add) Add a bgp_info to a bgp_node in correct fashion, taking care of reference counts. (bgp_info_delete) do the opposite of bgp_info_add. (bgp_process_rsclient) Converted into a work_queue work function. (bgp_process_main) ditto. (bgp_processq_del) process work queue item deconstructor (bgp_process_queue_init) process work queue init (bgp_process) call init function if required, set up queue item and add to queue, rather than calling process functions directly. (bgp_rib_remove) let bgp_info_delete manage bgp_info refcounts (bgp_rib_withdraw) ditto (bgp_update_rsclient) let bgp_info_add manage refcounts (bgp_update_main) ditto (bgp_clear_route_node) clear_node_queue work function, does per-node aspects of what bgp_clear_route_table did previously (bgp_clear_node_queue_del) clear_node_queue item delete function (bgp_clear_node_complete) clear_node_queue completion function, it unplugs the process queues, which have to be blocked while clear_node_queue is being processed to prevent a race. (bgp_clear_node_queue_init) init function for clear_node_queue work queues (bgp_clear_route_table) Sets up items onto a workqueue now, rather than clearing each node directly. Plugs both process queues to avoid potential race. (bgp_static_withdraw_rsclient) let bgp_info_{add,delete} manage bgp_info refcounts. (bgp_static_update_rsclient) ditto (bgp_static_update_main) ditto (bgp_static_update_vpnv4) ditto, remove unneeded cast. (bgp_static_withdraw) see bgp_static_withdraw_rsclient (bgp_static_withdraw_vpnv4) ditto (bgp_aggregate_{route,add,delete}) ditto (bgp_redistribute_{add,delete,withdraw}) ditto * bgpd/bgp_vty.c: (peer_rsclient_set_vty) lock rsclient list peer reference (peer_rsclient_unset_vty) ditto, but unlock same reference * bgpd/bgpd.c: (peer_free) handle frees of info to be kept for lifetime of struct peer. (peer_lock,peer_unlock) peer refcount helpers (peer_new) add initial refcounts (peer_create,peer_create_accept) lock peer as appropriate (peer_delete) unlock as appropriate, move out some free's to peer_free. (peer_group_bind,peer_group_unbind) peer refcounting as appropriate. (bgp_create) check CALLOC return value. (bgp_terminate) free workqueues too. * lib/memtypes.c: Add MTYPE_BGP_PROCESS_QUEUE and MTYPE_BGP_CLEAR_NODE_QUEUE
85 lines
2.2 KiB
C
85 lines
2.2 KiB
C
/* BGP routing table
|
|
Copyright (C) 1998, 2001 Kunihiro Ishiguro
|
|
|
|
This file is part of GNU Zebra.
|
|
|
|
GNU Zebra is free software; you can redistribute it and/or modify it
|
|
under the terms of the GNU General Public License as published by the
|
|
Free Software Foundation; either version 2, or (at your option) any
|
|
later version.
|
|
|
|
GNU Zebra is distributed in the hope that it will be useful, but
|
|
WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with GNU Zebra; see the file COPYING. If not, write to the Free
|
|
Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
|
|
02111-1307, USA. */
|
|
|
|
#ifndef _QUAGGA_BGP_TABLE_H
|
|
#define _QUAGGA_BGP_TABLE_H
|
|
|
|
typedef enum
|
|
{
|
|
BGP_TABLE_MAIN,
|
|
BGP_TABLE_RSCLIENT,
|
|
} bgp_table_t;
|
|
|
|
struct bgp_table
|
|
{
|
|
bgp_table_t type;
|
|
|
|
/* The owner of this 'bgp_table' structure. */
|
|
void *owner;
|
|
|
|
struct bgp_node *top;
|
|
};
|
|
|
|
struct bgp_node
|
|
{
|
|
struct prefix p;
|
|
|
|
struct bgp_table *table;
|
|
struct bgp_node *parent;
|
|
struct bgp_node *link[2];
|
|
#define l_left link[0]
|
|
#define l_right link[1]
|
|
|
|
unsigned int lock;
|
|
|
|
void *info;
|
|
|
|
struct bgp_adj_out *adj_out;
|
|
|
|
struct bgp_adj_in *adj_in;
|
|
|
|
void *aggregate;
|
|
|
|
struct bgp_node *prn;
|
|
|
|
u_char flags;
|
|
#define BGP_NODE_PROCESS_SCHEDULED (1 << 0)
|
|
};
|
|
|
|
struct bgp_table *bgp_table_init (void);
|
|
void bgp_table_finish (struct bgp_table *);
|
|
void bgp_unlock_node (struct bgp_node *node);
|
|
void bgp_node_delete (struct bgp_node *node);
|
|
struct bgp_node *bgp_table_top (struct bgp_table *);
|
|
struct bgp_node *bgp_route_next (struct bgp_node *);
|
|
struct bgp_node *bgp_route_next_until (struct bgp_node *, struct bgp_node *);
|
|
struct bgp_node *bgp_node_get (struct bgp_table *, struct prefix *);
|
|
struct bgp_node *bgp_node_lookup (struct bgp_table *, struct prefix *);
|
|
struct bgp_node *bgp_lock_node (struct bgp_node *node);
|
|
struct bgp_node *bgp_node_match (struct bgp_table *, struct prefix *);
|
|
struct bgp_node *bgp_node_match_ipv4 (struct bgp_table *,
|
|
struct in_addr *);
|
|
#ifdef HAVE_IPV6
|
|
struct bgp_node *bgp_node_match_ipv6 (struct bgp_table *,
|
|
struct in6_addr *);
|
|
#endif /* HAVE_IPV6 */
|
|
|
|
#endif /* _QUAGGA_BGP_TABLE_H */
|