From 701a01920eee5431d2052aad92aefbdf50ac2139 Mon Sep 17 00:00:00 2001 From: whitespace Date: Wed, 7 Oct 2020 17:22:26 -0400 Subject: [PATCH] *: reformat python files We are now using black. Signed-off-by: Quentin Young --- doc/developer/conf.py | 225 +-- doc/extra/frrlexer.py | 23 +- doc/manpages/conf.py | 251 +-- doc/user/conf.py | 241 +-- python/callgraph-dot.py | 193 ++- python/clidef.py | 293 ++-- python/clippy/__init__.py | 29 +- python/firstheader.py | 14 +- python/makefile.py | 118 +- python/makevars.py | 48 +- tests/bgpd/test_aspath.py | 13 +- tests/bgpd/test_bgp_table.py | 6 +- tests/bgpd/test_capability.py | 12 +- tests/bgpd/test_ecommunity.py | 14 +- tests/bgpd/test_mp_attr.py | 32 +- tests/bgpd/test_mpath.py | 5 +- tests/bgpd/test_peer_attr.py | 380 ++--- tests/helpers/python/frrsix.py | 21 +- tests/helpers/python/frrtest.py | 100 +- tests/isisd/test_fuzz_isis_tlv.py | 18 +- tests/isisd/test_isis_lspdb.py | 4 +- tests/isisd/test_isis_spf.py | 3 +- tests/isisd/test_isis_vertex_queue.py | 4 +- tests/lib/cli/test_cli.py | 3 +- tests/lib/cli/test_commands.py | 10 +- tests/lib/northbound/test_oper_data.py | 3 +- tests/lib/test_atomlist.py | 4 +- tests/lib/test_graph.py | 3 +- tests/lib/test_idalloc.py | 8 +- tests/lib/test_nexthop_iter.py | 10 +- tests/lib/test_ntop.py | 4 +- tests/lib/test_prefix2str.py | 4 +- tests/lib/test_printfrr.py | 4 +- tests/lib/test_ringbuf.py | 4 +- tests/lib/test_srcdest_table.py | 8 +- tests/lib/test_stream.py | 3 +- tests/lib/test_table.py | 10 +- tests/lib/test_timer_correctness.py | 8 +- tests/lib/test_ttable.py | 3 +- tests/lib/test_typelist.py | 34 +- tests/lib/test_versioncmp.py | 4 +- tests/lib/test_zlog.py | 3 +- tests/lib/test_zmq.py | 11 +- tests/ospf6d/test_lsdb.py | 3 +- tests/runtests.py | 2 +- .../test_all_protocol_startup.py | 767 +++++---- .../test_bfd_profiles_topo1.py | 24 +- tests/topotests/bfd-topo3/test_bfd_topo3.py | 46 +- tests/topotests/bgp-auth/test_bgp_auth.py | 4 +- .../test_bgp_basic_functionality.py | 6 +- .../bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py | 12 +- .../bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py | 12 +- tests/topotests/bgp-evpn-mh/test_evpn_mh.py | 123 +- .../test_bgp-vrf-route-leak-basic.py | 76 +- .../test_bgp_aggregate_address_topo1.py | 54 +- .../bgp_as_allow_in/test_bgp_as_allow_in.py | 4 +- .../test_bgp_communities.py | 4 +- .../test_bgp_ebgp_requires_policy.py | 24 +- tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py | 214 +-- .../bgp_features/test_bgp_features.py | 340 ++-- .../bgp_flowspec/test_bgp_flowspec_topo.py | 1 + .../test_bgp_gr_functionality_topo1.py | 4 +- .../test_bgp_gr_functionality_topo2.py | 4 +- tests/topotests/bgp_gshut/test_bgp_gshut.py | 179 +- .../scripts/check_routes.py | 189 ++- .../test_bgp_large_community_topo_1.py | 4 +- .../test_bgp_large_community_topo_2.py | 4 +- .../bgp_link_bw_ip/test_bgp_linkbw_ip.py | 450 ++--- .../test_bgp_multi_vrf_topo1.py | 10 +- .../test_bgp_multi_vrf_topo2.py | 4 +- ...test_bgp_recursive_route_ebgp_multi_hop.py | 92 +- .../bgp_update_delay/test_bgp_update_delay.py | 58 +- .../test_bgp_vrf_dynamic_route_leak_topo1.py | 1449 +++++++++-------- .../test_bgp_vrf_dynamic_route_leak_topo2.py | 754 +++++---- .../test_evpn_type5_chaos_topo1.py | 22 +- .../test_evpn_type5_topo1.py | 44 +- .../isis-sr-topo1/test_isis_sr_topo1.py | 599 ++++--- .../isis-topo1-vrf/test_isis_topo1_vrf.py | 31 +- .../test_ldp_sync_isis_topo1.py | 62 +- .../test_ldp_sync_ospf_topo1.py | 30 +- .../ldp-vpls-topo1/test_ldp_vpls_topo1.py | 3 +- tests/topotests/lib/bgprib.py | 5 +- tests/topotests/lib/common_config.py | 125 +- tests/topotests/lib/ltemplate.py | 119 +- tests/topotests/lib/lutil.py | 265 +-- tests/topotests/lib/ospf.py | 689 ++++---- tests/topotests/lib/test/test_json.py | 129 +- tests/topotests/lib/topogen.py | 18 +- tests/topotests/lib/topojson.py | 72 +- tests/topotests/lib/topotest.py | 71 +- .../ospf-sr-topo1/test_ospf_sr_topo1.py | 35 +- tests/topotests/ospf-topo2/test_ospf_topo2.py | 71 +- .../test_ospf_authentication.py | 2 +- .../test_ospf_ecmp.py | 2 +- .../test_ospf_ecmp_lan.py | 2 +- .../ospf_basic_functionality/test_ospf_lan.py | 2 +- .../test_ospf_nssa.py | 2 +- .../test_ospf_routemaps.py | 2 +- .../test_ospf_rte_calc.py | 16 +- .../test_ospf_single_area.py | 2 +- tests/topotests/pbr-topo1/test_pbr_topo1.py | 18 +- tests/topotests/pim-basic/mcast-rx.py | 5 +- tests/topotests/pim-basic/mcast-tx.py | 9 +- .../topotests/route-scale/test_route_scale.py | 60 +- tools/fixup-deprecated.py | 66 +- tools/frr-reload.py | 885 ++++++---- tools/gcc-plugins/format-test.py | 51 +- tools/generate_support_bundle.py | 153 +- tools/git-reindent-branch.py | 91 +- tools/indent.py | 39 +- tools/render_md.py | 14 +- tools/stringmangle.py | 40 +- tools/symalyzer.py | 255 +-- yang/embedmodel.py | 77 +- 114 files changed, 6453 insertions(+), 4835 deletions(-) diff --git a/doc/developer/conf.py b/doc/developer/conf.py index 9acfab739a..f4bb65ec79 100644 --- a/doc/developer/conf.py +++ b/doc/developer/conf.py @@ -21,48 +21,48 @@ from sphinx.highlighting import lexers # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) +# sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = '1.0' +needs_sphinx = "1.0" # prolog for various variable substitutions -rst_prolog = '' +rst_prolog = "" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['sphinx.ext.todo', 'sphinx.ext.graphviz'] +extensions = ["sphinx.ext.todo", "sphinx.ext.graphviz"] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst'] -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'FRR' -copyright = u'2017, FRR' -author = u'FRR authors' +project = u"FRR" +copyright = u"2017, FRR" +author = u"FRR authors" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # The short X.Y version. -version = u'?.?' +version = u"?.?" # The full version, including alpha/beta/rc tags. -release = u'?.?-?' +release = u"?.?-?" # ----------------------------------------------------------------------------- @@ -72,48 +72,49 @@ release = u'?.?-?' # Various installation prefixes. Values are extracted from config.status. # Reasonable defaults are set in case that file does not exist. replace_vars = { - 'AUTHORS': author, - 'COPYRIGHT_YEAR': '1999-2005', - 'COPYRIGHT_STR': 'Copyright (c) 1999-2005', - 'PACKAGE_NAME': project.lower(), - 'PACKAGE_TARNAME': project.lower(), - 'PACKAGE_STRING': project.lower() + ' latest', - 'PACKAGE_URL': 'https://frrouting.org/', - 'PACKAGE_VERSION': 'latest', - 'INSTALL_PREFIX_ETC': '/etc/frr', - 'INSTALL_PREFIX_SBIN': '/usr/lib/frr', - 'INSTALL_PREFIX_STATE': '/var/run/frr', - 'INSTALL_PREFIX_MODULES': '/usr/lib/frr/modules', - 'INSTALL_USER': 'frr', - 'INSTALL_GROUP': 'frr', - 'INSTALL_VTY_GROUP': 'frrvty', - 'GROUP': 'frr', - 'USER': 'frr', + "AUTHORS": author, + "COPYRIGHT_YEAR": "1999-2005", + "COPYRIGHT_STR": "Copyright (c) 1999-2005", + "PACKAGE_NAME": project.lower(), + "PACKAGE_TARNAME": project.lower(), + "PACKAGE_STRING": project.lower() + " latest", + "PACKAGE_URL": "https://frrouting.org/", + "PACKAGE_VERSION": "latest", + "INSTALL_PREFIX_ETC": "/etc/frr", + "INSTALL_PREFIX_SBIN": "/usr/lib/frr", + "INSTALL_PREFIX_STATE": "/var/run/frr", + "INSTALL_PREFIX_MODULES": "/usr/lib/frr/modules", + "INSTALL_USER": "frr", + "INSTALL_GROUP": "frr", + "INSTALL_VTY_GROUP": "frrvty", + "GROUP": "frr", + "USER": "frr", } # extract version information, installation location, other stuff we need to # use when building final documents val = re.compile('^S\["([^"]+)"\]="(.*)"$') try: - with open('../../config.status', 'r') as cfgstatus: + with open("../../config.status", "r") as cfgstatus: for ln in cfgstatus.readlines(): m = val.match(ln) - if not m or m.group(1) not in replace_vars.keys(): continue + if not m or m.group(1) not in replace_vars.keys(): + continue replace_vars[m.group(1)] = m.group(2) except IOError: # if config.status doesn't exist, just ignore it pass # manually fill out some of these we can't get from config.status -replace_vars['COPYRIGHT_STR'] = "Copyright (c)" -replace_vars['COPYRIGHT_STR'] += ' {0}'.format(replace_vars['COPYRIGHT_YEAR']) -replace_vars['COPYRIGHT_STR'] += ' {0}'.format(replace_vars['AUTHORS']) -release = replace_vars['PACKAGE_VERSION'] -version = release.split('-')[0] +replace_vars["COPYRIGHT_STR"] = "Copyright (c)" +replace_vars["COPYRIGHT_STR"] += " {0}".format(replace_vars["COPYRIGHT_YEAR"]) +replace_vars["COPYRIGHT_STR"] += " {0}".format(replace_vars["AUTHORS"]) +release = replace_vars["PACKAGE_VERSION"] +version = release.split("-")[0] # add substitutions to prolog for key, value in replace_vars.items(): - rst_prolog += '.. |{0}| replace:: {1}\n'.format(key, value) + rst_prolog += ".. |{0}| replace:: {1}\n".format(key, value) # The language for content autogenerated by Sphinx. Refer to documentation @@ -125,37 +126,42 @@ language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build', 'building-libyang.rst', 'topotests-snippets.rst', 'include-compile.rst'] +exclude_patterns = [ + "_build", + "building-libyang.rst", + "topotests-snippets.rst", + "include-compile.rst", +] # The reST default role (used for this markup: `text`) to use for all # documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False +# keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True @@ -165,165 +171,158 @@ todo_include_todos = True # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'default' +html_theme = "default" try: import sphinx_rtd_theme - html_theme = 'sphinx_rtd_theme' + html_theme = "sphinx_rtd_theme" except ImportError: pass # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = { +# html_theme_options = { # 'sidebarbgcolor': '#374249' -#} +# } # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -html_logo = '../figures/frr-icon.svg' +html_logo = "../figures/frr-icon.svg" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -html_favicon = '../figures/frr-logo-icon.png' +html_favicon = "../figures/frr-logo-icon.png" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. -#html_extra_path = [] +# html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -#html_search_language = 'en' +# html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value -#html_search_options = {'type': 'default'} +# html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. -#html_search_scorer = 'scorer.js' +# html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. -htmlhelp_basename = 'FRRdoc' +htmlhelp_basename = "FRRdoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', - -# Latex figure (float) alignment -#'figure_align': 'htbp', + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', + # Latex figure (float) alignment + #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'FRR.tex', u"FRR Developer's Manual", - u'FRR', 'manual'), + (master_doc, "FRR.tex", u"FRR Developer's Manual", u"FRR", "manual"), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -latex_logo = '../figures/frr-logo-medium.png' +latex_logo = "../figures/frr-logo-medium.png" # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'frr', u"FRR Developer's Manual", - [author], 1) -] +man_pages = [(master_doc, "frr", u"FRR Developer's Manual", [author], 1)] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ------------------------------------------- @@ -332,38 +331,44 @@ man_pages = [ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'frr', u"FRR Developer's Manual", - author, 'FRR', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "frr", + u"FRR Developer's Manual", + author, + "FRR", + "One line description of project.", + "Miscellaneous", + ), ] # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +# texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False +# texinfo_no_detailmenu = False # contents of ../extra/frrlexer.py. # This is read here to support VPATH build. Since this section is execfile()'d # with the file location, we can safely use a relative path here to save the # contents of the lexer file for later use even if our relative path changes # due to VPATH. -with open('../extra/frrlexer.py', 'rb') as lex: +with open("../extra/frrlexer.py", "rb") as lex: frrlexerpy = lex.read() # custom extensions here def setup(app): # object type for FRR CLI commands, can be extended to document parent CLI # node later on - app.add_object_type('clicmd', 'clicmd') + app.add_object_type("clicmd", "clicmd") # css overrides for HTML theme - app.add_stylesheet('overrides.css') + app.add_stylesheet("overrides.css") # load Pygments lexer for FRR config syntax # # NB: in Pygments 2.2+ this can be done with `load_lexer_from_file`, but we @@ -373,4 +378,4 @@ def setup(app): # frrlexer = pygments.lexers.load_lexer_from_file('../extra/frrlexer.py', lexername="FRRLexer") custom_namespace = {} exec(frrlexerpy, custom_namespace) - lexers['frr'] = custom_namespace['FRRLexer']() + lexers["frr"] = custom_namespace["FRRLexer"]() diff --git a/doc/extra/frrlexer.py b/doc/extra/frrlexer.py index 528bec985b..e177c3983f 100644 --- a/doc/extra/frrlexer.py +++ b/doc/extra/frrlexer.py @@ -22,17 +22,18 @@ class FRRLexer(RegexLexer): name = "frr" aliases = ["frr"] tokens = { - 'root': [ - (r'^[ \t]*!.*?\n', Comment.Singleline), + "root": [ + (r"^[ \t]*!.*?\n", Comment.Singleline), (r'"(\\\\|\\"|[^"])*"', String.Double), - (r'[a-f0-9]*:[a-f0-9]*:[a-f0-9:]*(:\d+\.\d+\.\d+\.\d+)?(/\d+)?', - Number), # IPv6 - (r'\d+\.\d+\.\d+\.\d+(/\d+)?', Number), # IPv4 - (r'^([ \t]*)(no[ \t]+)?([-\w]+)', - bygroups(Text, Keyword, Name.Function)), - (r'[ \t]+', Text), - (r'\n', Text), - (r'\d+', Number), - (r'\S+', Text), + ( + r"[a-f0-9]*:[a-f0-9]*:[a-f0-9:]*(:\d+\.\d+\.\d+\.\d+)?(/\d+)?", + Number, + ), # IPv6 + (r"\d+\.\d+\.\d+\.\d+(/\d+)?", Number), # IPv4 + (r"^([ \t]*)(no[ \t]+)?([-\w]+)", bygroups(Text, Keyword, Name.Function)), + (r"[ \t]+", Text), + (r"\n", Text), + (r"\d+", Number), + (r"\S+", Text), ], } diff --git a/doc/manpages/conf.py b/doc/manpages/conf.py index 8b9bb021a3..186f7932b2 100644 --- a/doc/manpages/conf.py +++ b/doc/manpages/conf.py @@ -19,48 +19,48 @@ import re # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) +# sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = '1.0' +needs_sphinx = "1.0" # prolog for various variable substitutions -rst_prolog = '' +rst_prolog = "" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['sphinx.ext.todo'] +extensions = ["sphinx.ext.todo"] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst'] -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'FRR' -copyright = u'2017, FRR' -author = u'FRR authors' +project = u"FRR" +copyright = u"2017, FRR" +author = u"FRR authors" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # The short X.Y version. -version = u'?.?' +version = u"?.?" # The full version, including alpha/beta/rc tags. -release = u'?.?-?' +release = u"?.?-?" # ----------------------------------------------------------------------------- @@ -70,48 +70,49 @@ release = u'?.?-?' # Various installation prefixes. Values are extracted from config.status. # Reasonable defaults are set in case that file does not exist. replace_vars = { - 'AUTHORS': author, - 'COPYRIGHT_YEAR': '1999-2005', - 'COPYRIGHT_STR': 'Copyright (c) 1999-2005', - 'PACKAGE_NAME': project.lower(), - 'PACKAGE_TARNAME': project.lower(), - 'PACKAGE_STRING': project.lower() + ' latest', - 'PACKAGE_URL': 'https://frrouting.org/', - 'PACKAGE_VERSION': 'latest', - 'INSTALL_PREFIX_ETC': '/etc/frr', - 'INSTALL_PREFIX_SBIN': '/usr/lib/frr', - 'INSTALL_PREFIX_STATE': '/var/run/frr', - 'INSTALL_PREFIX_MODULES': '/usr/lib/frr/modules', - 'INSTALL_USER': 'frr', - 'INSTALL_GROUP': 'frr', - 'INSTALL_VTY_GROUP': 'frrvty', - 'GROUP': 'frr', - 'USER': 'frr', + "AUTHORS": author, + "COPYRIGHT_YEAR": "1999-2005", + "COPYRIGHT_STR": "Copyright (c) 1999-2005", + "PACKAGE_NAME": project.lower(), + "PACKAGE_TARNAME": project.lower(), + "PACKAGE_STRING": project.lower() + " latest", + "PACKAGE_URL": "https://frrouting.org/", + "PACKAGE_VERSION": "latest", + "INSTALL_PREFIX_ETC": "/etc/frr", + "INSTALL_PREFIX_SBIN": "/usr/lib/frr", + "INSTALL_PREFIX_STATE": "/var/run/frr", + "INSTALL_PREFIX_MODULES": "/usr/lib/frr/modules", + "INSTALL_USER": "frr", + "INSTALL_GROUP": "frr", + "INSTALL_VTY_GROUP": "frrvty", + "GROUP": "frr", + "USER": "frr", } # extract version information, installation location, other stuff we need to # use when building final documents val = re.compile('^S\["([^"]+)"\]="(.*)"$') try: - with open('../../config.status', 'r') as cfgstatus: + with open("../../config.status", "r") as cfgstatus: for ln in cfgstatus.readlines(): m = val.match(ln) - if not m or m.group(1) not in replace_vars.keys(): continue + if not m or m.group(1) not in replace_vars.keys(): + continue replace_vars[m.group(1)] = m.group(2) except IOError: # if config.status doesn't exist, just ignore it pass # manually fill out some of these we can't get from config.status -replace_vars['COPYRIGHT_STR'] = "Copyright (c)" -replace_vars['COPYRIGHT_STR'] += ' {0}'.format(replace_vars['COPYRIGHT_YEAR']) -replace_vars['COPYRIGHT_STR'] += ' {0}'.format(replace_vars['AUTHORS']) -release = replace_vars['PACKAGE_VERSION'] -version = release.split('-')[0] +replace_vars["COPYRIGHT_STR"] = "Copyright (c)" +replace_vars["COPYRIGHT_STR"] += " {0}".format(replace_vars["COPYRIGHT_YEAR"]) +replace_vars["COPYRIGHT_STR"] += " {0}".format(replace_vars["AUTHORS"]) +release = replace_vars["PACKAGE_VERSION"] +version = release.split("-")[0] # add substitutions to prolog for key, value in replace_vars.items(): - rst_prolog += '.. |{0}| replace:: {1}\n'.format(key, value) + rst_prolog += ".. |{0}| replace:: {1}\n".format(key, value) # The language for content autogenerated by Sphinx. Refer to documentation @@ -123,37 +124,43 @@ language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build', 'common-options.rst', 'epilogue.rst', 'defines.rst', 'bfd-options.rst'] +exclude_patterns = [ + "_build", + "common-options.rst", + "epilogue.rst", + "defines.rst", + "bfd-options.rst", +] # The reST default role (used for this markup: `text`) to use for all # documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False +# keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True @@ -163,31 +170,31 @@ todo_include_todos = True # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'default' +html_theme = "default" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, @@ -197,109 +204,105 @@ html_static_path = [] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. -#html_extra_path = [] +# html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -#html_search_language = 'en' +# html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value -#html_search_options = {'type': 'default'} +# html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. -#html_search_scorer = 'scorer.js' +# html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. -htmlhelp_basename = 'FRRdoc' +htmlhelp_basename = "FRRdoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', - -# Latex figure (float) alignment -#'figure_align': 'htbp', + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', + # Latex figure (float) alignment + #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'FRR.tex', u'FRR User Manual', - u'FRR', 'manual'), + (master_doc, "FRR.tex", u"FRR User Manual", u"FRR", "manual"), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output --------------------------------------- @@ -308,33 +311,45 @@ latex_documents = [ # (source start file, name, description, authors, manual section). # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False fwfrr = "{0} routing engine for use with FRRouting." man_pages = [ - ('frr-bfdd', 'frr-bfdd', fwfrr.format("a bfd"), [], 8), - ('frr-bgpd', 'frr-bgpd', fwfrr.format("a BGPv4, BGPv4+, BGPv4-"), [], 8), - ('frr-eigrpd', 'frr-eigrpd', fwfrr.format("an EIGRP"), [], 8), - ('frr-fabricd', 'frr-fabricd', fwfrr.format("an OpenFabric"), [], 8), - ('frr-isisd', 'frr-isisd', fwfrr.format("an IS-IS"), [], 8), - ('frr-ldpd', 'frr-ldpd', fwfrr.format("an LDP"), [], 8), - ('frr-nhrpd', 'frr-nhrpd', fwfrr.format("a Next Hop Routing Protocol"), [], 8), - ('frr-ospf6d', 'frr-ospf6d', fwfrr.format("an OSPFv3"), [], 8), - ('frr-ospfclient', 'frr-ospfclient', 'an example ospf-api client', [], 8), - ('frr-ospfd', 'frr-ospfd', fwfrr.format("an OSPFv2"), [], 8), - ('frr-pbrd', 'frr-pbrd', fwfrr.format("a PBR"), [], 8), - ('frr-pimd', 'frr-pimd', fwfrr.format("a PIM"), [], 8), - ('frr-ripd', 'frr-ripd', fwfrr.format("a RIP"), [], 8), - ('frr-ripngd', 'frr-ripngd', fwfrr.format("a RIPNG"), [], 8), - ('frr-sharpd', 'frr-sharpd', fwfrr.format("a SHARP"), [], 8), - ('frr-staticd', 'frr-staticd', fwfrr.format("a static route manager"), [], 8), - ('frr-vrrpd', 'frr-vrrpd', fwfrr.format("a VRRP"), [], 8), - ('frr-watchfrr', 'frr-watchfrr', 'a program to monitor the status of FRRouting daemons', [], 8), - ('frr-zebra', 'frr-zebra', 'a routing manager for use with associated FRRouting components.', [], 8), - ('frr', 'frr', 'a systemd interaction script', [], 1), - ('mtracebis', 'mtracebis', "a multicast trace client", [], 8), - ('vtysh', 'vtysh', 'an integrated shell for FRRouting.', [], 1), + ("frr-bfdd", "frr-bfdd", fwfrr.format("a bfd"), [], 8), + ("frr-bgpd", "frr-bgpd", fwfrr.format("a BGPv4, BGPv4+, BGPv4-"), [], 8), + ("frr-eigrpd", "frr-eigrpd", fwfrr.format("an EIGRP"), [], 8), + ("frr-fabricd", "frr-fabricd", fwfrr.format("an OpenFabric"), [], 8), + ("frr-isisd", "frr-isisd", fwfrr.format("an IS-IS"), [], 8), + ("frr-ldpd", "frr-ldpd", fwfrr.format("an LDP"), [], 8), + ("frr-nhrpd", "frr-nhrpd", fwfrr.format("a Next Hop Routing Protocol"), [], 8), + ("frr-ospf6d", "frr-ospf6d", fwfrr.format("an OSPFv3"), [], 8), + ("frr-ospfclient", "frr-ospfclient", "an example ospf-api client", [], 8), + ("frr-ospfd", "frr-ospfd", fwfrr.format("an OSPFv2"), [], 8), + ("frr-pbrd", "frr-pbrd", fwfrr.format("a PBR"), [], 8), + ("frr-pimd", "frr-pimd", fwfrr.format("a PIM"), [], 8), + ("frr-ripd", "frr-ripd", fwfrr.format("a RIP"), [], 8), + ("frr-ripngd", "frr-ripngd", fwfrr.format("a RIPNG"), [], 8), + ("frr-sharpd", "frr-sharpd", fwfrr.format("a SHARP"), [], 8), + ("frr-staticd", "frr-staticd", fwfrr.format("a static route manager"), [], 8), + ("frr-vrrpd", "frr-vrrpd", fwfrr.format("a VRRP"), [], 8), + ( + "frr-watchfrr", + "frr-watchfrr", + "a program to monitor the status of FRRouting daemons", + [], + 8, + ), + ( + "frr-zebra", + "frr-zebra", + "a routing manager for use with associated FRRouting components.", + [], + 8, + ), + ("frr", "frr", "a systemd interaction script", [], 1), + ("mtracebis", "mtracebis", "a multicast trace client", [], 8), + ("vtysh", "vtysh", "an integrated shell for FRRouting.", [], 1), ] # -- Options for Texinfo output ------------------------------------------- @@ -344,15 +359,15 @@ man_pages = [ # dir menu entry, description, category) # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +# texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False +# texinfo_no_detailmenu = False # custom extensions here diff --git a/doc/user/conf.py b/doc/user/conf.py index 1f6f050bcf..79b37e7850 100644 --- a/doc/user/conf.py +++ b/doc/user/conf.py @@ -22,48 +22,48 @@ from sphinx.highlighting import lexers # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) +# sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = '1.0' +needs_sphinx = "1.0" # prolog for various variable substitutions -rst_prolog = '' +rst_prolog = "" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['sphinx.ext.todo'] +extensions = ["sphinx.ext.todo"] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst'] -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'FRR' -copyright = u'2017, FRR' -author = u'FRR authors' +project = u"FRR" +copyright = u"2017, FRR" +author = u"FRR authors" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # The short X.Y version. -version = u'?.?' +version = u"?.?" # The full version, including alpha/beta/rc tags. -release = u'?.?-?' +release = u"?.?-?" # ----------------------------------------------------------------------------- @@ -73,48 +73,49 @@ release = u'?.?-?' # Various installation prefixes. Values are extracted from config.status. # Reasonable defaults are set in case that file does not exist. replace_vars = { - 'AUTHORS': author, - 'COPYRIGHT_YEAR': '1999-2005', - 'COPYRIGHT_STR': 'Copyright (c) 1999-2005', - 'PACKAGE_NAME': project.lower(), - 'PACKAGE_TARNAME': project.lower(), - 'PACKAGE_STRING': project.lower() + ' latest', - 'PACKAGE_URL': 'https://frrouting.org/', - 'PACKAGE_VERSION': 'latest', - 'INSTALL_PREFIX_ETC': '/etc/frr', - 'INSTALL_PREFIX_SBIN': '/usr/lib/frr', - 'INSTALL_PREFIX_STATE': '/var/run/frr', - 'INSTALL_PREFIX_MODULES': '/usr/lib/frr/modules', - 'INSTALL_USER': 'frr', - 'INSTALL_GROUP': 'frr', - 'INSTALL_VTY_GROUP': 'frrvty', - 'GROUP': 'frr', - 'USER': 'frr', + "AUTHORS": author, + "COPYRIGHT_YEAR": "1999-2005", + "COPYRIGHT_STR": "Copyright (c) 1999-2005", + "PACKAGE_NAME": project.lower(), + "PACKAGE_TARNAME": project.lower(), + "PACKAGE_STRING": project.lower() + " latest", + "PACKAGE_URL": "https://frrouting.org/", + "PACKAGE_VERSION": "latest", + "INSTALL_PREFIX_ETC": "/etc/frr", + "INSTALL_PREFIX_SBIN": "/usr/lib/frr", + "INSTALL_PREFIX_STATE": "/var/run/frr", + "INSTALL_PREFIX_MODULES": "/usr/lib/frr/modules", + "INSTALL_USER": "frr", + "INSTALL_GROUP": "frr", + "INSTALL_VTY_GROUP": "frrvty", + "GROUP": "frr", + "USER": "frr", } # extract version information, installation location, other stuff we need to # use when building final documents val = re.compile('^S\["([^"]+)"\]="(.*)"$') try: - with open('../../config.status', 'r') as cfgstatus: + with open("../../config.status", "r") as cfgstatus: for ln in cfgstatus.readlines(): m = val.match(ln) - if not m or m.group(1) not in replace_vars.keys(): continue + if not m or m.group(1) not in replace_vars.keys(): + continue replace_vars[m.group(1)] = m.group(2) except IOError: # if config.status doesn't exist, just ignore it pass # manually fill out some of these we can't get from config.status -replace_vars['COPYRIGHT_STR'] = "Copyright (c)" -replace_vars['COPYRIGHT_STR'] += ' {0}'.format(replace_vars['COPYRIGHT_YEAR']) -replace_vars['COPYRIGHT_STR'] += ' {0}'.format(replace_vars['AUTHORS']) -release = replace_vars['PACKAGE_VERSION'] -version = release.split('-')[0] +replace_vars["COPYRIGHT_STR"] = "Copyright (c)" +replace_vars["COPYRIGHT_STR"] += " {0}".format(replace_vars["COPYRIGHT_YEAR"]) +replace_vars["COPYRIGHT_STR"] += " {0}".format(replace_vars["AUTHORS"]) +release = replace_vars["PACKAGE_VERSION"] +version = release.split("-")[0] # add substitutions to prolog for key, value in replace_vars.items(): - rst_prolog += '.. |{0}| replace:: {1}\n'.format(key, value) + rst_prolog += ".. |{0}| replace:: {1}\n".format(key, value) # The language for content autogenerated by Sphinx. Refer to documentation @@ -126,39 +127,45 @@ language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build', 'rpki.rst', 'routeserver.rst', - 'ospf_fundamentals.rst', 'flowspec.rst', 'snmptrap.rst', - 'wecmp_linkbw.rst'] +exclude_patterns = [ + "_build", + "rpki.rst", + "routeserver.rst", + "ospf_fundamentals.rst", + "flowspec.rst", + "snmptrap.rst", + "wecmp_linkbw.rst", +] # The reST default role (used for this markup: `text`) to use for all # documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False +# keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True @@ -168,165 +175,158 @@ todo_include_todos = True # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'default' +html_theme = "default" try: import sphinx_rtd_theme - html_theme = 'sphinx_rtd_theme' + html_theme = "sphinx_rtd_theme" except ImportError: pass # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = { +# html_theme_options = { # 'sidebarbgcolor': '#374249' -#} +# } # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -html_logo = '../figures/frr-icon.svg' +html_logo = "../figures/frr-icon.svg" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -html_favicon = '../figures/frr-logo-icon.png' +html_favicon = "../figures/frr-logo-icon.png" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. -#html_extra_path = [] +# html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -#html_search_language = 'en' +# html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value -#html_search_options = {'type': 'default'} +# html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. -#html_search_scorer = 'scorer.js' +# html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. -htmlhelp_basename = 'FRRdoc' +htmlhelp_basename = "FRRdoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', - -# Latex figure (float) alignment -#'figure_align': 'htbp', + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', + # Latex figure (float) alignment + #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'FRR.tex', u'FRR User Manual', - u'FRR', 'manual'), + (master_doc, "FRR.tex", u"FRR User Manual", u"FRR", "manual"), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -latex_logo = '../figures/frr-logo-medium.png' +latex_logo = "../figures/frr-logo-medium.png" # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'frr', u'FRR User Manual', - [author], 1) -] +man_pages = [(master_doc, "frr", u"FRR User Manual", [author], 1)] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ------------------------------------------- @@ -335,29 +335,35 @@ man_pages = [ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'frr', u'FRR User Manual', - author, 'FRR', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "frr", + u"FRR User Manual", + author, + "FRR", + "One line description of project.", + "Miscellaneous", + ), ] # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +# texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False +# texinfo_no_detailmenu = False # contents of ../extra/frrlexer.py. # This is read here to support VPATH build. Since this section is execfile()'d # with the file location, we can safely use a relative path here to save the # contents of the lexer file for later use even if our relative path changes # due to VPATH. -with open('../extra/frrlexer.py', 'rb') as lex: +with open("../extra/frrlexer.py", "rb") as lex: frrlexerpy = lex.read() # Parse version string into int array @@ -365,7 +371,7 @@ def vparse(s): a = [] for c in s: - if c != '.': + if c != ".": a.append(int(c)) while len(a) < 3: @@ -373,22 +379,23 @@ def vparse(s): return a[:3] + # custom extensions here def setup(app): # object type for FRR CLI commands, can be extended to document parent CLI # node later on - app.add_object_type('clicmd', 'clicmd') + app.add_object_type("clicmd", "clicmd") # css overrides for HTML theme # Note sphinx version differences sver = vparse(sphinx.__version__) - if sver < vparse('1.8.0') : - app.add_stylesheet('overrides.css') - app.add_javascript('overrides.js') + if sver < vparse("1.8.0"): + app.add_stylesheet("overrides.css") + app.add_javascript("overrides.js") else: - app.add_css_file('overrides.css') - app.add_js_file('overrides.js') + app.add_css_file("overrides.css") + app.add_js_file("overrides.js") # load Pygments lexer for FRR config syntax # @@ -399,4 +406,4 @@ def setup(app): # frrlexer = pygments.lexers.load_lexer_from_file('../extra/frrlexer.py', lexername="FRRLexer") custom_namespace = {} exec(frrlexerpy, custom_namespace) - lexers['frr'] = custom_namespace['FRRLexer']() + lexers["frr"] = custom_namespace["FRRLexer"]() diff --git a/python/callgraph-dot.py b/python/callgraph-dot.py index 4faf1dae16..f80766a080 100644 --- a/python/callgraph-dot.py +++ b/python/callgraph-dot.py @@ -20,6 +20,7 @@ import re import sys import json + class FunctionNode(object): funcs = {} @@ -39,7 +40,7 @@ class FunctionNode(object): def define(self, attrs): self.defined = True - self.defs.append((attrs['filename'], attrs['line'])) + self.defs.append((attrs["filename"], attrs["line"])) return self def add_call(self, called, attrs): @@ -63,11 +64,12 @@ class FunctionNode(object): return cls.funcs[name] return FunctionNode(name) + class CallEdge(object): def __init__(self, i, o, attrs): self.i = i self.o = o - self.is_external = attrs['is_external'] + self.is_external = attrs["is_external"] self.attrs = attrs i.out.append(self) @@ -76,11 +78,13 @@ class CallEdge(object): def __repr__(self): return '<"%s()" -> "%s()">' % (self.i.name, self.o.name) + def nameclean(n): - if '.' in n: - return n.split('.', 1)[0] + if "." in n: + return n.split(".", 1)[0] return n + def calc_rank(queue, direction): nextq = queue @@ -98,7 +102,7 @@ def calc_rank(queue, direction): queue = nextq nextq = [] - #sys.stderr.write('rank %d\n' % currank) + # sys.stderr.write('rank %d\n' % currank) cont = False @@ -123,6 +127,7 @@ def calc_rank(queue, direction): return nextq + class Graph(dict): class Subgraph(set): def __init__(self): @@ -166,6 +171,7 @@ class Graph(dict): def calls(self): return self._calls + def calld(self): return self._calld @@ -245,7 +251,7 @@ class Graph(dict): else: evalset.add(evnode) - #if len(candidates) > 1: + # if len(candidates) > 1: # for candidate in candidates: # if candidate != node: # #node.merge(candidate) @@ -266,7 +272,7 @@ class Graph(dict): self._linear_nodes = [] while len(nodes): - sys.stderr.write('%d\n' % len(nodes)) + sys.stderr.write("%d\n" % len(nodes)) node = nodes.pop(0) down[node] = set() @@ -304,106 +310,90 @@ class Graph(dict): return self._subgraphs, self._linear_nodes -with open(sys.argv[1], 'r') as fd: +with open(sys.argv[1], "r") as fd: data = json.load(fd) extra_info = { # zebra - LSP WQ - ('lsp_processq_add', 'work_queue_add'): [ - 'lsp_process', - 'lsp_processq_del', - 'lsp_processq_complete', + ("lsp_processq_add", "work_queue_add"): [ + "lsp_process", + "lsp_processq_del", + "lsp_processq_complete", ], # zebra - main WQ - ('mq_add_handler', 'work_queue_add'): [ - 'meta_queue_process', - ], - ('meta_queue_process', 'work_queue_add'): [ - 'meta_queue_process', - ], + ("mq_add_handler", "work_queue_add"): ["meta_queue_process",], + ("meta_queue_process", "work_queue_add"): ["meta_queue_process",], # bgpd - label pool WQ - ('bgp_lp_get', 'work_queue_add'): [ - 'lp_cbq_docallback', - ], - ('bgp_lp_event_chunk', 'work_queue_add'): [ - 'lp_cbq_docallback', - ], - ('bgp_lp_event_zebra_up', 'work_queue_add'): [ - 'lp_cbq_docallback', - ], + ("bgp_lp_get", "work_queue_add"): ["lp_cbq_docallback",], + ("bgp_lp_event_chunk", "work_queue_add"): ["lp_cbq_docallback",], + ("bgp_lp_event_zebra_up", "work_queue_add"): ["lp_cbq_docallback",], # bgpd - main WQ - ('bgp_process', 'work_queue_add'): [ - 'bgp_process_wq', - 'bgp_processq_del', - ], - ('bgp_add_eoiu_mark', 'work_queue_add'): [ - 'bgp_process_wq', - 'bgp_processq_del', - ], + ("bgp_process", "work_queue_add"): ["bgp_process_wq", "bgp_processq_del",], + ("bgp_add_eoiu_mark", "work_queue_add"): ["bgp_process_wq", "bgp_processq_del",], # clear node WQ - ('bgp_clear_route_table', 'work_queue_add'): [ - 'bgp_clear_route_node', - 'bgp_clear_node_queue_del', - 'bgp_clear_node_complete', + ("bgp_clear_route_table", "work_queue_add"): [ + "bgp_clear_route_node", + "bgp_clear_node_queue_del", + "bgp_clear_node_complete", ], # rfapi WQs - ('rfapi_close', 'work_queue_add'): [ - 'rfapi_deferred_close_workfunc', - ], - ('rfapiRibUpdatePendingNode', 'work_queue_add'): [ - 'rfapiRibDoQueuedCallback', - 'rfapiRibQueueItemDelete', + ("rfapi_close", "work_queue_add"): ["rfapi_deferred_close_workfunc",], + ("rfapiRibUpdatePendingNode", "work_queue_add"): [ + "rfapiRibDoQueuedCallback", + "rfapiRibQueueItemDelete", ], } -for func, fdata in data['functions'].items(): +for func, fdata in data["functions"].items(): func = nameclean(func) fnode = FunctionNode.get(func).define(fdata) - for call in fdata['calls']: - if call.get('type') in [None, 'unnamed', 'thread_sched']: - if call.get('target') is None: + for call in fdata["calls"]: + if call.get("type") in [None, "unnamed", "thread_sched"]: + if call.get("target") is None: continue - tgt = nameclean(call['target']) + tgt = nameclean(call["target"]) fnode.add_call(FunctionNode.get(tgt), call) - for fptr in call.get('funcptrs', []): + for fptr in call.get("funcptrs", []): fnode.add_call(FunctionNode.get(nameclean(fptr)), call) - if tgt == 'work_queue_add': + if tgt == "work_queue_add": if (func, tgt) not in extra_info: - sys.stderr.write('%s:%d:%s(): work_queue_add() not handled\n' % ( - call['filename'], call['line'], func)) + sys.stderr.write( + "%s:%d:%s(): work_queue_add() not handled\n" + % (call["filename"], call["line"], func) + ) else: attrs = dict(call) - attrs.update({'is_external': False, 'type': 'workqueue'}) + attrs.update({"is_external": False, "type": "workqueue"}) for dst in extra_info[func, tgt]: fnode.add_call(FunctionNode.get(dst), call) - elif call['type'] == 'install_element': - vty_node = FunctionNode.get('VTY_NODE_%d' % call['vty_node']) - vty_node.add_call(FunctionNode.get(nameclean(call['target'])), call) - elif call['type'] == 'hook': + elif call["type"] == "install_element": + vty_node = FunctionNode.get("VTY_NODE_%d" % call["vty_node"]) + vty_node.add_call(FunctionNode.get(nameclean(call["target"])), call) + elif call["type"] == "hook": # TODO: edges for hooks from data['hooks'] pass n = FunctionNode.funcs # fix some very low end functions cycling back very far to the top -if 'peer_free' in n: - n['peer_free'].unlink(n['bgp_timer_set']) - n['peer_free'].unlink(n['bgp_addpath_set_peer_type']) -if 'bgp_path_info_extra_free' in n: - n['bgp_path_info_extra_free'].rank = 0 +if "peer_free" in n: + n["peer_free"].unlink(n["bgp_timer_set"]) + n["peer_free"].unlink(n["bgp_addpath_set_peer_type"]) +if "bgp_path_info_extra_free" in n: + n["bgp_path_info_extra_free"].rank = 0 -if 'zlog_ref' in n: - n['zlog_ref'].rank = 0 -if 'mt_checkalloc' in n: - n['mt_checkalloc'].rank = 0 +if "zlog_ref" in n: + n["zlog_ref"].rank = 0 +if "mt_checkalloc" in n: + n["mt_checkalloc"].rank = 0 queue = list(FunctionNode.funcs.values()) queue = calc_rank(queue, 1) queue = calc_rank(queue, -1) -sys.stderr.write('%d functions in cyclic set\n' % len(queue)) +sys.stderr.write("%d functions in cyclic set\n" % len(queue)) graph = Graph(queue) graph.automerge() @@ -411,10 +401,12 @@ graph.automerge() gv_nodes = [] gv_edges = [] -sys.stderr.write('%d groups after automerge\n' % len(graph._groups)) +sys.stderr.write("%d groups after automerge\n" % len(graph._groups)) + def is_vnc(n): - return n.startswith('rfapi') or n.startswith('vnc') or ('_vnc_' in n) + return n.startswith("rfapi") or n.startswith("vnc") or ("_vnc_" in n) + _vncstyle = ',fillcolor="#ffffcc",style=filled' cyclic_set_names = set([fn.name for fn in graph.values()]) @@ -422,55 +414,76 @@ cyclic_set_names = set([fn.name for fn in graph.values()]) for i, group in enumerate(graph._groups): if len(group) > 1: group.num = i - gv_nodes.append('\tsubgraph cluster_%d {' % i) - gv_nodes.append('\t\tcolor=blue;') + gv_nodes.append("\tsubgraph cluster_%d {" % i) + gv_nodes.append("\t\tcolor=blue;") for gn in group: has_cycle_callers = set(gn.calld()) - group - has_ext_callers = set([edge.i.name for edge in gn._fn.inb]) - cyclic_set_names + has_ext_callers = ( + set([edge.i.name for edge in gn._fn.inb]) - cyclic_set_names + ) - style = '' - etext = '' + style = "" + etext = "" if is_vnc(gn.name): style += _vncstyle if has_cycle_callers: - style += ',color=blue,penwidth=3' + style += ",color=blue,penwidth=3" if has_ext_callers: style += ',fillcolor="#ffeebb",style=filled' - etext += '
(%d other callers)' % (len(has_ext_callers)) + etext += '
(%d other callers)' % ( + len(has_ext_callers) + ) - gv_nodes.append('\t\t"%s" [shape=box,label=<%s%s>%s];' % (gn.name, '
'.join([fn.name for fn in gn._fns]), etext, style)) - gv_nodes.append('\t}') + gv_nodes.append( + '\t\t"%s" [shape=box,label=<%s%s>%s];' + % (gn.name, "
".join([fn.name for fn in gn._fns]), etext, style) + ) + gv_nodes.append("\t}") else: for gn in group: - has_ext_callers = set([edge.i.name for edge in gn._fn.inb]) - cyclic_set_names + has_ext_callers = ( + set([edge.i.name for edge in gn._fn.inb]) - cyclic_set_names + ) - style = '' - etext = '' + style = "" + etext = "" if is_vnc(gn.name): style += _vncstyle if has_ext_callers: style += ',fillcolor="#ffeebb",style=filled' - etext += '
(%d other callers)' % (len(has_ext_callers)) - gv_nodes.append('\t"%s" [shape=box,label=<%s%s>%s];' % (gn.name, '
'.join([fn.name for fn in gn._fns]), etext, style)) + etext += '
(%d other callers)' % ( + len(has_ext_callers) + ) + gv_nodes.append( + '\t"%s" [shape=box,label=<%s%s>%s];' + % (gn.name, "
".join([fn.name for fn in gn._fns]), etext, style) + ) edges = set() for gn in graph.values(): for calls in gn.calls(): if gn._group == calls._group: - gv_edges.append('\t"%s" -> "%s" [color="#55aa55",style=dashed];' % (gn.name, calls.name)) + gv_edges.append( + '\t"%s" -> "%s" [color="#55aa55",style=dashed];' % (gn.name, calls.name) + ) else: + def xname(nn): if len(nn._group) > 1: - return 'cluster_%d' % nn._group.num + return "cluster_%d" % nn._group.num else: return nn.name + tup = xname(gn), calls.name if tup[0] != tup[1] and tup not in edges: gv_edges.append('\t"%s" -> "%s" [weight=0.0,w=0.0,color=blue];' % tup) edges.add(tup) -with open(sys.argv[2], 'w') as fd: - fd.write('''digraph { +with open(sys.argv[2], "w") as fd: + fd.write( + """digraph { node [fontsize=13,fontname="Fira Sans"]; %s -}''' % '\n'.join(gv_nodes + [''] + gv_edges)) +}""" + % "\n".join(gv_nodes + [""] + gv_edges) + ) diff --git a/python/clidef.py b/python/clidef.py index baa6ed52b2..a47cee2d6b 100644 --- a/python/clidef.py +++ b/python/clidef.py @@ -26,39 +26,49 @@ from io import StringIO # the various handlers generate output C code for a particular type of # CLI token, choosing the most useful output C type. + class RenderHandler(object): def __init__(self, token): pass + def combine(self, other): if type(self) == type(other): return other return StringHandler(None) - deref = '' + deref = "" drop_str = False canfail = True canassert = False + class StringHandler(RenderHandler): - argtype = 'const char *' - decl = Template('const char *$varname = NULL;') - code = Template('$varname = (argv[_i]->type == WORD_TKN) ? argv[_i]->text : argv[_i]->arg;') + argtype = "const char *" + decl = Template("const char *$varname = NULL;") + code = Template( + "$varname = (argv[_i]->type == WORD_TKN) ? argv[_i]->text : argv[_i]->arg;" + ) drop_str = True canfail = False canassert = True + class LongHandler(RenderHandler): - argtype = 'long' - decl = Template('long $varname = 0;') - code = Template('''\ + argtype = "long" + decl = Template("long $varname = 0;") + code = Template( + """\ char *_end; $varname = strtol(argv[_i]->arg, &_end, 10); -_fail = (_end == argv[_i]->arg) || (*_end != '\\0');''') +_fail = (_end == argv[_i]->arg) || (*_end != '\\0');""" + ) + # A.B.C.D/M (prefix_ipv4) and # X:X::X:X/M (prefix_ipv6) are "compatible" and can merge into a # struct prefix: + class PrefixBase(RenderHandler): def combine(self, other): if type(self) == type(other): @@ -66,23 +76,33 @@ class PrefixBase(RenderHandler): if isinstance(other, PrefixBase): return PrefixGenHandler(None) return StringHandler(None) - deref = '&' + + deref = "&" + + class Prefix4Handler(PrefixBase): - argtype = 'const struct prefix_ipv4 *' - decl = Template('struct prefix_ipv4 $varname = { };') - code = Template('_fail = !str2prefix_ipv4(argv[_i]->arg, &$varname);') + argtype = "const struct prefix_ipv4 *" + decl = Template("struct prefix_ipv4 $varname = { };") + code = Template("_fail = !str2prefix_ipv4(argv[_i]->arg, &$varname);") + + class Prefix6Handler(PrefixBase): - argtype = 'const struct prefix_ipv6 *' - decl = Template('struct prefix_ipv6 $varname = { };') - code = Template('_fail = !str2prefix_ipv6(argv[_i]->arg, &$varname);') + argtype = "const struct prefix_ipv6 *" + decl = Template("struct prefix_ipv6 $varname = { };") + code = Template("_fail = !str2prefix_ipv6(argv[_i]->arg, &$varname);") + + class PrefixEthHandler(PrefixBase): - argtype = 'struct prefix_eth *' - decl = Template('struct prefix_eth $varname = { };') - code = Template('_fail = !str2prefix_eth(argv[_i]->arg, &$varname);') + argtype = "struct prefix_eth *" + decl = Template("struct prefix_eth $varname = { };") + code = Template("_fail = !str2prefix_eth(argv[_i]->arg, &$varname);") + + class PrefixGenHandler(PrefixBase): - argtype = 'const struct prefix *' - decl = Template('struct prefix $varname = { };') - code = Template('_fail = !str2prefix(argv[_i]->arg, &$varname);') + argtype = "const struct prefix *" + decl = Template("struct prefix $varname = { };") + code = Template("_fail = !str2prefix(argv[_i]->arg, &$varname);") + # same for IP addresses. result is union sockunion. class IPBase(RenderHandler): @@ -92,18 +112,27 @@ class IPBase(RenderHandler): if type(other) in [IP4Handler, IP6Handler, IPGenHandler]: return IPGenHandler(None) return StringHandler(None) + + class IP4Handler(IPBase): - argtype = 'struct in_addr' - decl = Template('struct in_addr $varname = { INADDR_ANY };') - code = Template('_fail = !inet_aton(argv[_i]->arg, &$varname);') + argtype = "struct in_addr" + decl = Template("struct in_addr $varname = { INADDR_ANY };") + code = Template("_fail = !inet_aton(argv[_i]->arg, &$varname);") + + class IP6Handler(IPBase): - argtype = 'struct in6_addr' - decl = Template('struct in6_addr $varname = {};') - code = Template('_fail = !inet_pton(AF_INET6, argv[_i]->arg, &$varname);') + argtype = "struct in6_addr" + decl = Template("struct in6_addr $varname = {};") + code = Template("_fail = !inet_pton(AF_INET6, argv[_i]->arg, &$varname);") + + class IPGenHandler(IPBase): - argtype = 'const union sockunion *' - decl = Template('''union sockunion s__$varname = { .sa.sa_family = AF_UNSPEC }, *$varname = NULL;''') - code = Template('''\ + argtype = "const union sockunion *" + decl = Template( + """union sockunion s__$varname = { .sa.sa_family = AF_UNSPEC }, *$varname = NULL;""" + ) + code = Template( + """\ if (argv[_i]->text[0] == 'X') { s__$varname.sa.sa_family = AF_INET6; _fail = !inet_pton(AF_INET6, argv[_i]->arg, &s__$varname.sin6.sin6_addr); @@ -112,26 +141,30 @@ if (argv[_i]->text[0] == 'X') { s__$varname.sa.sa_family = AF_INET; _fail = !inet_aton(argv[_i]->arg, &s__$varname.sin.sin_addr); $varname = &s__$varname; -}''') +}""" + ) canassert = True + def mix_handlers(handlers): def combine(a, b): if a is None: return b return a.combine(b) + return reduce(combine, handlers, None) + handlers = { - 'WORD_TKN': StringHandler, - 'VARIABLE_TKN': StringHandler, - 'RANGE_TKN': LongHandler, - 'IPV4_TKN': IP4Handler, - 'IPV4_PREFIX_TKN': Prefix4Handler, - 'IPV6_TKN': IP6Handler, - 'IPV6_PREFIX_TKN': Prefix6Handler, - 'MAC_TKN': PrefixEthHandler, - 'MAC_PREFIX_TKN': PrefixEthHandler, + "WORD_TKN": StringHandler, + "VARIABLE_TKN": StringHandler, + "RANGE_TKN": LongHandler, + "IPV4_TKN": IP4Handler, + "IPV4_PREFIX_TKN": Prefix4Handler, + "IPV6_TKN": IP6Handler, + "IPV6_PREFIX_TKN": Prefix6Handler, + "MAC_TKN": PrefixEthHandler, + "MAC_PREFIX_TKN": PrefixEthHandler, } # core template invoked for each occurence of DEFPY. @@ -139,7 +172,8 @@ handlers = { # the "#if $..." bits are there to keep this template unified into one # common form, without requiring a more advanced template engine (e.g. # jinja2) -templ = Template('''/* $fnname => "$cmddef" */ +templ = Template( + """/* $fnname => "$cmddef" */ DEFUN_CMD_FUNC_DECL($fnname) #define funcdecl_$fnname static int ${fnname}_magic(\\ const struct cmd_element *self __attribute__ ((unused)),\\ @@ -178,18 +212,22 @@ $argassert return ${fnname}_magic(self, vty, argc, argv$arglist); } -''') +""" +) # invoked for each named parameter -argblock = Template(''' +argblock = Template( + """ if (!strcmp(argv[_i]->varname, \"$varname\")) {$strblock $code - }''') + }""" +) -def get_always_args(token, always_args, args = [], stack = []): + +def get_always_args(token, always_args, args=[], stack=[]): if token in stack: return - if token.type == 'END_TKN': + if token.type == "END_TKN": for arg in list(always_args): if arg not in args: always_args.remove(arg) @@ -201,38 +239,45 @@ def get_always_args(token, always_args, args = [], stack = []): for nexttkn in token.next(): get_always_args(nexttkn, always_args, args, stack) + class Macros(dict): def load(self, filename): filedata = clippy.parse(filename) - for entry in filedata['data']: - if entry['type'] != 'PREPROC': + for entry in filedata["data"]: + if entry["type"] != "PREPROC": continue - ppdir = entry['line'].lstrip().split(None, 1) - if ppdir[0] != 'define' or len(ppdir) != 2: + ppdir = entry["line"].lstrip().split(None, 1) + if ppdir[0] != "define" or len(ppdir) != 2: continue ppdef = ppdir[1].split(None, 1) name = ppdef[0] - if '(' in name: + if "(" in name: continue - val = ppdef[1] if len(ppdef) == 2 else '' + val = ppdef[1] if len(ppdef) == 2 else "" - val = val.strip(' \t\n\\') + val = val.strip(" \t\n\\") if name in self: - sys.stderr.write('warning: macro %s redefined!\n' % (name)) + sys.stderr.write("warning: macro %s redefined!\n" % (name)) self[name] = val + def process_file(fn, ofd, dumpfd, all_defun, macros): errors = 0 filedata = clippy.parse(fn) - for entry in filedata['data']: - if entry['type'].startswith('DEFPY') or (all_defun and entry['type'].startswith('DEFUN')): - if len(entry['args'][0]) != 1: - sys.stderr.write('%s:%d: DEFPY function name not parseable (%r)\n' % (fn, entry['lineno'], entry['args'][0])) + for entry in filedata["data"]: + if entry["type"].startswith("DEFPY") or ( + all_defun and entry["type"].startswith("DEFUN") + ): + if len(entry["args"][0]) != 1: + sys.stderr.write( + "%s:%d: DEFPY function name not parseable (%r)\n" + % (fn, entry["lineno"], entry["args"][0]) + ) errors += 1 continue - cmddef = entry['args'][2] + cmddef = entry["args"][2] cmddefx = [] for i in cmddef: while i in macros: @@ -241,13 +286,16 @@ def process_file(fn, ofd, dumpfd, all_defun, macros): cmddefx.append(i[1:-1]) continue - sys.stderr.write('%s:%d: DEFPY command string not parseable (%r)\n' % (fn, entry['lineno'], cmddef)) + sys.stderr.write( + "%s:%d: DEFPY command string not parseable (%r)\n" + % (fn, entry["lineno"], cmddef) + ) errors += 1 cmddefx = None break if cmddefx is None: continue - cmddef = ''.join([i for i in cmddefx]) + cmddef = "".join([i for i in cmddefx]) graph = clippy.Graph(cmddef) args = OrderedDict() @@ -263,12 +311,12 @@ def process_file(fn, ofd, dumpfd, all_defun, macros): get_always_args(graph.first(), always_args) - #print('-' * 76) - #pprint(entry) - #clippy.dump(graph) - #pprint(args) + # print('-' * 76) + # pprint(entry) + # clippy.dump(graph) + # pprint(args) - params = { 'cmddef': cmddef, 'fnname': entry['args'][0][0] } + params = {"cmddef": cmddef, "fnname": entry["args"][0][0]} argdefs = [] argdecls = [] arglist = [] @@ -277,63 +325,96 @@ def process_file(fn, ofd, dumpfd, all_defun, macros): doc = [] canfail = 0 - def do_add(handler, basename, varname, attr = ''): - argdefs.append(',\\\n\t%s %s%s' % (handler.argtype, varname, attr)) - argdecls.append('\t%s\n' % (handler.decl.substitute({'varname': varname}).replace('\n', '\n\t'))) - arglist.append(', %s%s' % (handler.deref, varname)) + def do_add(handler, basename, varname, attr=""): + argdefs.append(",\\\n\t%s %s%s" % (handler.argtype, varname, attr)) + argdecls.append( + "\t%s\n" + % ( + handler.decl.substitute({"varname": varname}).replace( + "\n", "\n\t" + ) + ) + ) + arglist.append(", %s%s" % (handler.deref, varname)) if basename in always_args and handler.canassert: - argassert.append('''\tif (!%s) { + argassert.append( + """\tif (!%s) { \t\tvty_out(vty, "Internal CLI error [%%s]\\n", "%s"); \t\treturn CMD_WARNING; -\t}\n''' % (varname, varname)) - if attr == '': +\t}\n""" + % (varname, varname) + ) + if attr == "": at = handler.argtype - if not at.startswith('const '): - at = '. . . ' + at - doc.append('\t%-26s %s %s' % (at, 'alw' if basename in always_args else 'opt', varname)) + if not at.startswith("const "): + at = ". . . " + at + doc.append( + "\t%-26s %s %s" + % (at, "alw" if basename in always_args else "opt", varname) + ) for varname in args.keys(): handler = mix_handlers(args[varname]) - #print(varname, handler) - if handler is None: continue + # print(varname, handler) + if handler is None: + continue do_add(handler, varname, varname) - code = handler.code.substitute({'varname': varname}).replace('\n', '\n\t\t\t') + code = handler.code.substitute({"varname": varname}).replace( + "\n", "\n\t\t\t" + ) if handler.canfail: canfail = 1 - strblock = '' + strblock = "" if not handler.drop_str: - do_add(StringHandler(None), varname, '%s_str' % (varname), ' __attribute__ ((unused))') - strblock = '\n\t\t\t%s_str = argv[_i]->arg;' % (varname) - argblocks.append(argblock.substitute({'varname': varname, 'strblock': strblock, 'code': code})) + do_add( + StringHandler(None), + varname, + "%s_str" % (varname), + " __attribute__ ((unused))", + ) + strblock = "\n\t\t\t%s_str = argv[_i]->arg;" % (varname) + argblocks.append( + argblock.substitute( + {"varname": varname, "strblock": strblock, "code": code} + ) + ) if dumpfd is not None: if len(arglist) > 0: - dumpfd.write('"%s":\n%s\n\n' % (cmddef, '\n'.join(doc))) + dumpfd.write('"%s":\n%s\n\n' % (cmddef, "\n".join(doc))) else: dumpfd.write('"%s":\n\t---- no magic arguments ----\n\n' % (cmddef)) - params['argdefs'] = ''.join(argdefs) - params['argdecls'] = ''.join(argdecls) - params['arglist'] = ''.join(arglist) - params['argblocks'] = ''.join(argblocks) - params['canfail'] = canfail - params['nonempty'] = len(argblocks) - params['argassert'] = ''.join(argassert) + params["argdefs"] = "".join(argdefs) + params["argdecls"] = "".join(argdecls) + params["arglist"] = "".join(arglist) + params["argblocks"] = "".join(argblocks) + params["canfail"] = canfail + params["nonempty"] = len(argblocks) + params["argassert"] = "".join(argassert) ofd.write(templ.substitute(params)) return errors -if __name__ == '__main__': + +if __name__ == "__main__": import argparse - argp = argparse.ArgumentParser(description = 'FRR CLI preprocessor in Python') - argp.add_argument('--all-defun', action = 'store_const', const = True, - help = 'process DEFUN() statements in addition to DEFPY()') - argp.add_argument('--show', action = 'store_const', const = True, - help = 'print out list of arguments and types for each definition') - argp.add_argument('-o', type = str, metavar = 'OUTFILE', - help = 'output C file name') - argp.add_argument('cfile', type = str) + argp = argparse.ArgumentParser(description="FRR CLI preprocessor in Python") + argp.add_argument( + "--all-defun", + action="store_const", + const=True, + help="process DEFUN() statements in addition to DEFPY()", + ) + argp.add_argument( + "--show", + action="store_const", + const=True, + help="print out list of arguments and types for each definition", + ) + argp.add_argument("-o", type=str, metavar="OUTFILE", help="output C file name") + argp.add_argument("cfile", type=str) args = argp.parse_args() dumpfd = None @@ -349,15 +430,17 @@ if __name__ == '__main__': basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) macros = Macros() - macros.load('lib/route_types.h') - macros.load(os.path.join(basepath, 'lib/command.h')) - macros.load(os.path.join(basepath, 'bgpd/bgp_vty.h')) + macros.load("lib/route_types.h") + macros.load(os.path.join(basepath, "lib/command.h")) + macros.load(os.path.join(basepath, "bgpd/bgp_vty.h")) # sigh :( - macros['PROTO_REDIST_STR'] = 'FRR_REDIST_STR_ISISD' + macros["PROTO_REDIST_STR"] = "FRR_REDIST_STR_ISISD" errors = process_file(args.cfile, ofd, dumpfd, args.all_defun, macros) if errors != 0: sys.exit(1) if args.o is not None: - clippy.wrdiff(args.o, ofd, [args.cfile, os.path.realpath(__file__), sys.executable]) + clippy.wrdiff( + args.o, ofd, [args.cfile, os.path.realpath(__file__), sys.executable] + ) diff --git a/python/clippy/__init__.py b/python/clippy/__init__.py index 41aeae6b4d..d6865ff484 100644 --- a/python/clippy/__init__.py +++ b/python/clippy/__init__.py @@ -20,11 +20,12 @@ import os, stat import _clippy from _clippy import parse, Graph, GraphNode + def graph_iterate(graph): - '''iterator yielding all nodes of a graph + """iterator yielding all nodes of a graph nodes arrive in input/definition order, graph circles are avoided. - ''' + """ queue = [(graph.first(), frozenset(), 0)] while len(queue) > 0: @@ -42,21 +43,25 @@ def graph_iterate(graph): if n not in stop and n is not node: queue.insert(0, (n, stop, depth + 1)) + def dump(graph): - '''print out clippy.Graph''' + """print out clippy.Graph""" for i, depth in graph_iterate(graph): - print('\t%s%s %r' % (' ' * (depth * 2), i.type, i.text)) + print("\t%s%s %r" % (" " * (depth * 2), i.type, i.text)) -def wrdiff(filename, buf, reffiles = []): - '''write buffer to file if contents changed''' - expl = '' - if hasattr(buf, 'getvalue'): +def wrdiff(filename, buf, reffiles=[]): + """write buffer to file if contents changed""" + + expl = "" + if hasattr(buf, "getvalue"): buf = buf.getvalue() old = None - try: old = open(filename, 'r').read() - except: pass + try: + old = open(filename, "r").read() + except: + pass if old == buf: for reffile in reffiles: # ensure output timestamp is newer than inputs, for make @@ -67,7 +72,7 @@ def wrdiff(filename, buf, reffiles = []): # sys.stderr.write('%s unchanged, not written\n' % (filename)) return - newname = '%s.new-%d' % (filename, os.getpid()) - with open(newname, 'w') as out: + newname = "%s.new-%d" % (filename, os.getpid()) + with open(newname, "w") as out: out.write(buf) os.rename(newname, filename) diff --git a/python/firstheader.py b/python/firstheader.py index 19a85b63e5..bf50f33a33 100644 --- a/python/firstheader.py +++ b/python/firstheader.py @@ -9,21 +9,21 @@ include_re = re.compile('^#\s*include\s+["<]([^ ">]+)[">]', re.M) errors = 0 -files = subprocess.check_output(['git', 'ls-files']).decode('ASCII') +files = subprocess.check_output(["git", "ls-files"]).decode("ASCII") for fn in files.splitlines(): - if not fn.endswith('.c'): + if not fn.endswith(".c"): continue - if fn.startswith('tools/'): + if fn.startswith("tools/"): continue - with open(fn, 'r') as fd: + with open(fn, "r") as fd: data = fd.read() m = include_re.search(data) if m is None: - #sys.stderr.write('no #include in %s?\n' % (fn)) + # sys.stderr.write('no #include in %s?\n' % (fn)) continue - if m.group(1) in ['config.h', 'zebra.h', 'lib/zebra.h']: + if m.group(1) in ["config.h", "zebra.h", "lib/zebra.h"]: continue - sys.stderr.write('%s: %s\n' % (fn, m.group(0))) + sys.stderr.write("%s: %s\n" % (fn, m.group(0))) errors += 1 if errors: diff --git a/python/makefile.py b/python/makefile.py index fe20945ccc..10c73df72d 100644 --- a/python/makefile.py +++ b/python/makefile.py @@ -13,69 +13,91 @@ import argparse from string import Template from makevars import MakeReVars -argp = argparse.ArgumentParser(description = 'FRR Makefile extensions') -argp.add_argument('--dev-build', action = 'store_const', const = True, - help = 'run additional developer checks') +argp = argparse.ArgumentParser(description="FRR Makefile extensions") +argp.add_argument( + "--dev-build", + action="store_const", + const=True, + help="run additional developer checks", +) args = argp.parse_args() -with open('Makefile', 'r') as fd: +with open("Makefile", "r") as fd: before = fd.read() mv = MakeReVars(before) -clippy_scan = mv['clippy_scan'].strip().split() +clippy_scan = mv["clippy_scan"].strip().split() for clippy_file in clippy_scan: - assert clippy_file.endswith('.c') + assert clippy_file.endswith(".c") # check for files using clippy but not listed in clippy_scan if args.dev_build: basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - if os.path.exists(os.path.join(basepath, '.git')): - clippy_ref = subprocess.check_output([ - 'git', '-C', basepath, 'grep', '-l', '-P', '^#\s*include.*_clippy.c', '--', '**.c']).decode('US-ASCII') + if os.path.exists(os.path.join(basepath, ".git")): + clippy_ref = subprocess.check_output( + [ + "git", + "-C", + basepath, + "grep", + "-l", + "-P", + "^#\s*include.*_clippy.c", + "--", + "**.c", + ] + ).decode("US-ASCII") clippy_ref = set(clippy_ref.splitlines()) missing = clippy_ref - set(clippy_scan) if len(missing) > 0: - sys.stderr.write('error: files seem to be using clippy, but not listed in "clippy_scan" in subdir.am:\n\t%s\n' % ('\n\t'.join(sorted(missing)))) + sys.stderr.write( + 'error: files seem to be using clippy, but not listed in "clippy_scan" in subdir.am:\n\t%s\n' + % ("\n\t".join(sorted(missing))) + ) sys.exit(1) -clippydep = Template(''' +clippydep = Template( + """ ${clippybase}.$$(OBJEXT): ${clippybase}_clippy.c ${clippybase}.lo: ${clippybase}_clippy.c -${clippybase}_clippy.c: $$(CLIPPY_DEPS)''') +${clippybase}_clippy.c: $$(CLIPPY_DEPS)""" +) -clippyauxdep = Template('''# clippy{ +clippyauxdep = Template( + """# clippy{ # auxiliary clippy target ${target}: ${clippybase}_clippy.c -# }clippy''') +# }clippy""" +) lines = before.splitlines() -autoderp = '#AUTODERP# ' +autoderp = "#AUTODERP# " out_lines = [] bcdeps = [] -make_rule_re = re.compile('^([^:\s]+):\s*([^:\s]+)\s*($|\n)') +make_rule_re = re.compile("^([^:\s]+):\s*([^:\s]+)\s*($|\n)") while lines: line = lines.pop(0) if line.startswith(autoderp): - line = line[len(autoderp):] + line = line[len(autoderp) :] - if line == '# clippy{': + if line == "# clippy{": while lines: line = lines.pop(0) - if line == '# }clippy': + if line == "# }clippy": break continue - if line.startswith('#'): + if line.startswith("#"): out_lines.append(line) continue full_line = line full_lines = lines[:] - while full_line.endswith('\\'): + while full_line.endswith("\\"): full_line = full_line[:-1] + full_lines.pop(0) m = make_rule_re.match(full_line) @@ -87,43 +109,51 @@ while lines: target, dep = m.group(1), m.group(2) - if target.endswith('.lo') or target.endswith('.o'): - if not dep.endswith('.h'): - bcdeps.append('%s.bc: %s' % (target, target)) - bcdeps.append('\t$(AM_V_LLVM_BC)$(COMPILE) -emit-llvm -c -o $@ %s' % (dep)) + if target.endswith(".lo") or target.endswith(".o"): + if not dep.endswith(".h"): + bcdeps.append("%s.bc: %s" % (target, target)) + bcdeps.append("\t$(AM_V_LLVM_BC)$(COMPILE) -emit-llvm -c -o $@ %s" % (dep)) if m.group(2) in clippy_scan: - out_lines.append(clippyauxdep.substitute(target=m.group(1), clippybase=m.group(2)[:-2])) + out_lines.append( + clippyauxdep.substitute(target=m.group(1), clippybase=m.group(2)[:-2]) + ) out_lines.append(line) -out_lines.append('# clippy{\n# main clippy targets') +out_lines.append("# clippy{\n# main clippy targets") for clippy_file in clippy_scan: - out_lines.append(clippydep.substitute(clippybase = clippy_file[:-2])) + out_lines.append(clippydep.substitute(clippybase=clippy_file[:-2])) -out_lines.append('') +out_lines.append("") out_lines.extend(bcdeps) -out_lines.append('') +out_lines.append("") bc_targets = [] -for varname in ['bin_PROGRAMS', 'sbin_PROGRAMS', 'lib_LTLIBRARIES', 'module_LTLIBRARIES', 'noinst_LIBRARIES']: +for varname in [ + "bin_PROGRAMS", + "sbin_PROGRAMS", + "lib_LTLIBRARIES", + "module_LTLIBRARIES", + "noinst_LIBRARIES", +]: bc_targets.extend(mv[varname].strip().split()) for target in bc_targets: - amtgt = target.replace('/', '_').replace('.', '_').replace('-', '_') - objs = mv[amtgt + '_OBJECTS'].strip().split() - objs = [obj + '.bc' for obj in objs] - deps = mv.get(amtgt + '_DEPENDENCIES', '').strip().split() - deps = [d + '.bc' for d in deps if d.endswith('.a')] + amtgt = target.replace("/", "_").replace(".", "_").replace("-", "_") + objs = mv[amtgt + "_OBJECTS"].strip().split() + objs = [obj + ".bc" for obj in objs] + deps = mv.get(amtgt + "_DEPENDENCIES", "").strip().split() + deps = [d + ".bc" for d in deps if d.endswith(".a")] objs.extend(deps) - out_lines.append('%s.bc: %s' % (target, ' '.join(objs))) - out_lines.append('\t$(AM_V_LLVM_LD)$(LLVM_LINK) -o $@ $^') - out_lines.append('') + out_lines.append("%s.bc: %s" % (target, " ".join(objs))) + out_lines.append("\t$(AM_V_LLVM_LD)$(LLVM_LINK) -o $@ $^") + out_lines.append("") -out_lines.append('# }clippy') -out_lines.append('') +out_lines.append("# }clippy") +out_lines.append("") -after = '\n'.join(out_lines) +after = "\n".join(out_lines) if after == before: sys.exit(0) -with open('Makefile.pyout', 'w') as fd: +with open("Makefile.pyout", "w") as fd: fd.write(after) -os.rename('Makefile.pyout', 'Makefile') +os.rename("Makefile.pyout", "Makefile") diff --git a/python/makevars.py b/python/makevars.py index 63bf8c5eeb..951cd3438b 100644 --- a/python/makevars.py +++ b/python/makevars.py @@ -6,10 +6,12 @@ import os import subprocess import re + class MakeVarsBase(object): - ''' + """ common code between MakeVars and MakeReVars - ''' + """ + def __init__(self): self._data = dict() @@ -18,31 +20,35 @@ class MakeVarsBase(object): self.getvars([k]) return self._data[k] - def get(self, k, defval = None): + def get(self, k, defval=None): if k not in self._data: self.getvars([k]) return self._data.get(k) or defval + class MakeVars(MakeVarsBase): - ''' + """ makevars['FOO_CFLAGS'] gets you "FOO_CFLAGS" from Makefile This variant works by invoking make as a subprocess, i.e. Makefile must be valid and working. (This is sometimes a problem if depfiles have not been generated.) - ''' + """ + def getvars(self, varlist): - ''' + """ get a batch list of variables from make. faster than individual calls. - ''' + """ rdfd, wrfd = os.pipe() - shvars = ['shvar-%s' % s for s in varlist] - make = subprocess.Popen(['make', '-s', 'VARFD=%d' % wrfd] + shvars, pass_fds = [wrfd]) + shvars = ["shvar-%s" % s for s in varlist] + make = subprocess.Popen( + ["make", "-s", "VARFD=%d" % wrfd] + shvars, pass_fds=[wrfd] + ) os.close(wrfd) - data = b'' + data = b"" - rdf = os.fdopen(rdfd, 'rb') + rdf = os.fdopen(rdfd, "rb") while True: rdata = rdf.read() if len(rdata) == 0: @@ -52,30 +58,34 @@ class MakeVars(MakeVarsBase): del rdf make.wait() - data = data.decode('US-ASCII').strip().split('\n') + data = data.decode("US-ASCII").strip().split("\n") for row in data: - k, v = row.split('=', 1) + k, v = row.split("=", 1) v = v[1:-1] self._data[k] = v + class MakeReVars(MakeVarsBase): - ''' + """ makevars['FOO_CFLAGS'] gets you "FOO_CFLAGS" from Makefile This variant works by regexing through Makefile. This means the Makefile does not need to be fully working, but on the other hand it doesn't support fancy complicated make expressions. - ''' - var_re = re.compile(r'^([^=#\n\s]+)[ \t]*=[ \t]*([^#\n]*)(?:#.*)?$', flags=re.MULTILINE) - repl_re = re.compile(r'\$(?:([A-Za-z])|\(([^\)]+)\))') + """ + + var_re = re.compile( + r"^([^=#\n\s]+)[ \t]*=[ \t]*([^#\n]*)(?:#.*)?$", flags=re.MULTILINE + ) + repl_re = re.compile(r"\$(?:([A-Za-z])|\(([^\)]+)\))") def __init__(self, maketext): super(MakeReVars, self).__init__() - self._vars = dict(self.var_re.findall(maketext.replace('\\\n', ''))) + self._vars = dict(self.var_re.findall(maketext.replace("\\\n", ""))) def replacevar(self, match): varname = match.group(1) or match.group(2) - return self._vars.get(varname, '') + return self._vars.get(varname, "") def getvars(self, varlist): for varname in varlist: diff --git a/tests/bgpd/test_aspath.py b/tests/bgpd/test_aspath.py index 5fa1f11629..88579ad3e4 100644 --- a/tests/bgpd/test_aspath.py +++ b/tests/bgpd/test_aspath.py @@ -1,14 +1,16 @@ import frrtest import re -re_okfail = re.compile(r'^(?:\x1b\[3[12]m)?(?POK|failed)'.encode('utf8'), - re.MULTILINE) +re_okfail = re.compile( + r"^(?:\x1b\[3[12]m)?(?POK|failed)".encode("utf8"), re.MULTILINE +) + class TestAspath(frrtest.TestMultiOut): - program = './test_aspath' + program = "./test_aspath" def _parsertest(self, line): - if not hasattr(self, 'parserno'): + if not hasattr(self, "parserno"): self.parserno = -1 self.parserno += 1 @@ -17,13 +19,14 @@ class TestAspath(frrtest.TestMultiOut): self._okfail("empty prepend %s:" % line, okfail=re_okfail) def _attrtest(self, line): - if not hasattr(self, 'attrno'): + if not hasattr(self, "attrno"): self.attrno = -1 self.attrno += 1 self._onesimple("aspath_attr test %d" % self.attrno) self._okfail(line, okfail=re_okfail) + TestAspath.parsertest("seq1") TestAspath.parsertest("seq2") TestAspath.parsertest("seq3") diff --git a/tests/bgpd/test_bgp_table.py b/tests/bgpd/test_bgp_table.py index 53bd37233a..8f0544249c 100644 --- a/tests/bgpd/test_bgp_table.py +++ b/tests/bgpd/test_bgp_table.py @@ -1,7 +1,9 @@ import frrtest + class TestTable(frrtest.TestMultiOut): - program = './test_bgp_table' + program = "./test_bgp_table" + for i in range(7): - TestTable.onesimple('Checks successfull') + TestTable.onesimple("Checks successfull") diff --git a/tests/bgpd/test_capability.py b/tests/bgpd/test_capability.py index 872fcb6d12..e275195537 100644 --- a/tests/bgpd/test_capability.py +++ b/tests/bgpd/test_capability.py @@ -1,7 +1,9 @@ import frrtest + class TestCapability(frrtest.TestMultiOut): - program = './test_capability' + program = "./test_capability" + TestCapability.okfail("MP4: MP IP/Uni") TestCapability.okfail("MPv6: MP IPv6/Uni") @@ -43,5 +45,9 @@ TestCapability.okfail("AS4real2: AS4 capability, in series of capabilities") TestCapability.okfail("DynCap: Dynamic Capability Message, IP/Multicast") TestCapability.okfail("DynCapLong: Dynamic Capability Message, IP/Multicast, truncated") TestCapability.okfail("DynCapPadded: Dynamic Capability Message, IP/Multicast, padded") -TestCapability.okfail("DynCapMPCpadded: Dynamic Capability Message, IP/Multicast, cap data padded") -TestCapability.okfail("DynCapMPCoverflow: Dynamic Capability Message, IP/Multicast, cap data != length") +TestCapability.okfail( + "DynCapMPCpadded: Dynamic Capability Message, IP/Multicast, cap data padded" +) +TestCapability.okfail( + "DynCapMPCoverflow: Dynamic Capability Message, IP/Multicast, cap data != length" +) diff --git a/tests/bgpd/test_ecommunity.py b/tests/bgpd/test_ecommunity.py index 3a17ec9e31..1499294f7b 100644 --- a/tests/bgpd/test_ecommunity.py +++ b/tests/bgpd/test_ecommunity.py @@ -1,9 +1,11 @@ import frrtest -class TestEcommunity(frrtest.TestMultiOut): - program = './test_ecommunity' -TestEcommunity.okfail('ipaddr') -TestEcommunity.okfail('ipaddr-so') -TestEcommunity.okfail('asn') -TestEcommunity.okfail('asn4') +class TestEcommunity(frrtest.TestMultiOut): + program = "./test_ecommunity" + + +TestEcommunity.okfail("ipaddr") +TestEcommunity.okfail("ipaddr-so") +TestEcommunity.okfail("asn") +TestEcommunity.okfail("asn4") diff --git a/tests/bgpd/test_mp_attr.py b/tests/bgpd/test_mp_attr.py index 46d0c42402..d9612bb8d3 100644 --- a/tests/bgpd/test_mp_attr.py +++ b/tests/bgpd/test_mp_attr.py @@ -1,7 +1,9 @@ import frrtest + class TestMpAttr(frrtest.TestMultiOut): - program = './test_mp_attr' + program = "./test_mp_attr" + TestMpAttr.okfail("IPv6: IPV6 MP Reach, global nexthop, 1 NLRI") TestMpAttr.okfail("IPv6-2: IPV6 MP Reach, global nexthop, 2 NLRIs") @@ -16,13 +18,27 @@ TestMpAttr.okfail("IPv4: IPv4 MP Reach, 2 NLRIs + default") TestMpAttr.okfail("IPv4-nhlen: IPv4 MP Reach, nexthop lenth overflow") TestMpAttr.okfail("IPv4-nlrilen: IPv4 MP Reach, nlri lenth overflow") TestMpAttr.okfail("IPv4-VPNv4: IPv4/VPNv4 MP Reach, RD, Nexthop, 2 NLRIs") -TestMpAttr.okfail("IPv4-VPNv4-bogus-plen: IPv4/MPLS-labeled VPN MP Reach, RD, Nexthop, NLRI / bogus p'len") -TestMpAttr.okfail("IPv4-VPNv4-plen1-short: IPv4/VPNv4 MP Reach, RD, Nexthop, 2 NLRIs, 1st plen short") -TestMpAttr.okfail("IPv4-VPNv4-plen1-long: IPv4/VPNv4 MP Reach, RD, Nexthop, 2 NLRIs, 1st plen long") -TestMpAttr.okfail("IPv4-VPNv4-plenn-long: IPv4/VPNv4 MP Reach, RD, Nexthop, 3 NLRIs, last plen long") -TestMpAttr.okfail("IPv4-VPNv4-plenn-short: IPv4/VPNv4 MP Reach, RD, Nexthop, 2 NLRIs, last plen short") -TestMpAttr.okfail("IPv4-VPNv4-bogus-rd-type: IPv4/VPNv4 MP Reach, RD, NH, 2 NLRI, unknown RD in 1st (log, but parse)") -TestMpAttr.okfail("IPv4-VPNv4-0-nlri: IPv4/VPNv4 MP Reach, RD, Nexthop, 3 NLRI, 3rd 0 bogus") +TestMpAttr.okfail( + "IPv4-VPNv4-bogus-plen: IPv4/MPLS-labeled VPN MP Reach, RD, Nexthop, NLRI / bogus p'len" +) +TestMpAttr.okfail( + "IPv4-VPNv4-plen1-short: IPv4/VPNv4 MP Reach, RD, Nexthop, 2 NLRIs, 1st plen short" +) +TestMpAttr.okfail( + "IPv4-VPNv4-plen1-long: IPv4/VPNv4 MP Reach, RD, Nexthop, 2 NLRIs, 1st plen long" +) +TestMpAttr.okfail( + "IPv4-VPNv4-plenn-long: IPv4/VPNv4 MP Reach, RD, Nexthop, 3 NLRIs, last plen long" +) +TestMpAttr.okfail( + "IPv4-VPNv4-plenn-short: IPv4/VPNv4 MP Reach, RD, Nexthop, 2 NLRIs, last plen short" +) +TestMpAttr.okfail( + "IPv4-VPNv4-bogus-rd-type: IPv4/VPNv4 MP Reach, RD, NH, 2 NLRI, unknown RD in 1st (log, but parse)" +) +TestMpAttr.okfail( + "IPv4-VPNv4-0-nlri: IPv4/VPNv4 MP Reach, RD, Nexthop, 3 NLRI, 3rd 0 bogus" +) TestMpAttr.okfail("IPv6-bug: IPv6, global nexthop, 1 default NLRI") TestMpAttr.okfail("IPv6-unreach: IPV6 MP Unreach, 1 NLRI") TestMpAttr.okfail("IPv6-unreach2: IPV6 MP Unreach, 2 NLRIs") diff --git a/tests/bgpd/test_mpath.py b/tests/bgpd/test_mpath.py index ce34ff8436..582fd25c20 100644 --- a/tests/bgpd/test_mpath.py +++ b/tests/bgpd/test_mpath.py @@ -1,9 +1,10 @@ import frrtest + class TestMpath(frrtest.TestMultiOut): - program = './test_mpath' + program = "./test_mpath" + TestMpath.okfail("bgp maximum-paths config") TestMpath.okfail("bgp_mp_list") TestMpath.okfail("bgp_path_info_mpath_update") - diff --git a/tests/bgpd/test_peer_attr.py b/tests/bgpd/test_peer_attr.py index 44068605ee..16b441b25d 100644 --- a/tests/bgpd/test_peer_attr.py +++ b/tests/bgpd/test_peer_attr.py @@ -1,196 +1,198 @@ import frrtest + class TestFlag(frrtest.TestMultiOut): - program = './test_peer_attr' + program = "./test_peer_attr" + # List of tests can be generated by executing: # $> ./test_peer_attr 2>&1 | sed -n 's/\\/\\\\/g; s/\S\+ \[test\] \(.\+\)/TestFlag.okfail(\x27\1\x27)/pg' # -TestFlag.okfail('peer\\advertisement-interval') -TestFlag.okfail('peer\\capability dynamic') -TestFlag.okfail('peer\\capability extended-nexthop') -#TestFlag.okfail('peer\\capability extended-nexthop') -TestFlag.okfail('peer\\description') -TestFlag.okfail('peer\\disable-connected-check') -TestFlag.okfail('peer\\dont-capability-negotiate') -TestFlag.okfail('peer\\enforce-first-as') -TestFlag.okfail('peer\\local-as') -TestFlag.okfail('peer\\local-as 1 no-prepend') -TestFlag.okfail('peer\\local-as 1 no-prepend replace-as') -TestFlag.okfail('peer\\override-capability') -TestFlag.okfail('peer\\passive') -TestFlag.okfail('peer\\password') -TestFlag.okfail('peer\\shutdown') -TestFlag.okfail('peer\\strict-capability-match') -TestFlag.okfail('peer\\timers') -TestFlag.okfail('peer\\timers connect') -TestFlag.okfail('peer\\update-source') -TestFlag.okfail('peer\\update-source') -TestFlag.okfail('peer\\ipv4-unicast\\addpath') -TestFlag.okfail('peer\\ipv4-multicast\\addpath') -TestFlag.okfail('peer\\ipv6-unicast\\addpath') -TestFlag.okfail('peer\\ipv6-multicast\\addpath') -TestFlag.okfail('peer\\ipv4-unicast\\allowas-in') -TestFlag.okfail('peer\\ipv4-multicast\\allowas-in') -TestFlag.okfail('peer\\ipv6-unicast\\allowas-in') -TestFlag.okfail('peer\\ipv6-multicast\\allowas-in') -TestFlag.okfail('peer\\ipv4-unicast\\allowas-in origin') -TestFlag.okfail('peer\\ipv4-multicast\\allowas-in origin') -TestFlag.okfail('peer\\ipv6-unicast\\allowas-in origin') -TestFlag.okfail('peer\\ipv6-multicast\\allowas-in origin') -TestFlag.okfail('peer\\ipv4-unicast\\as-override') -TestFlag.okfail('peer\\ipv4-multicast\\as-override') -TestFlag.okfail('peer\\ipv6-unicast\\as-override') -TestFlag.okfail('peer\\ipv6-multicast\\as-override') -TestFlag.okfail('peer\\ipv4-unicast\\attribute-unchanged as-path') -TestFlag.okfail('peer\\ipv4-multicast\\attribute-unchanged as-path') -TestFlag.okfail('peer\\ipv6-unicast\\attribute-unchanged as-path') -TestFlag.okfail('peer\\ipv6-multicast\\attribute-unchanged as-path') -TestFlag.okfail('peer\\ipv4-unicast\\attribute-unchanged next-hop') -TestFlag.okfail('peer\\ipv4-multicast\\attribute-unchanged next-hop') -TestFlag.okfail('peer\\ipv6-unicast\\attribute-unchanged next-hop') -TestFlag.okfail('peer\\ipv6-multicast\\attribute-unchanged next-hop') -TestFlag.okfail('peer\\ipv4-unicast\\attribute-unchanged med') -TestFlag.okfail('peer\\ipv4-multicast\\attribute-unchanged med') -TestFlag.okfail('peer\\ipv6-unicast\\attribute-unchanged med') -TestFlag.okfail('peer\\ipv6-multicast\\attribute-unchanged med') -TestFlag.okfail('peer\\ipv4-unicast\\attribute-unchanged as-path next-hop') -TestFlag.okfail('peer\\ipv4-multicast\\attribute-unchanged as-path next-hop') -TestFlag.okfail('peer\\ipv6-unicast\\attribute-unchanged as-path next-hop') -TestFlag.okfail('peer\\ipv6-multicast\\attribute-unchanged as-path next-hop') -TestFlag.okfail('peer\\ipv4-unicast\\attribute-unchanged as-path med') -TestFlag.okfail('peer\\ipv4-multicast\\attribute-unchanged as-path med') -TestFlag.okfail('peer\\ipv6-unicast\\attribute-unchanged as-path med') -TestFlag.okfail('peer\\ipv6-multicast\\attribute-unchanged as-path med') -TestFlag.okfail('peer\\ipv4-unicast\\attribute-unchanged as-path next-hop med') -TestFlag.okfail('peer\\ipv4-multicast\\attribute-unchanged as-path next-hop med') -TestFlag.okfail('peer\\ipv6-unicast\\attribute-unchanged as-path next-hop med') -TestFlag.okfail('peer\\ipv6-multicast\\attribute-unchanged as-path next-hop med') -TestFlag.okfail('peer\\ipv4-unicast\\capability orf prefix-list send') -TestFlag.okfail('peer\\ipv4-multicast\\capability orf prefix-list send') -TestFlag.okfail('peer\\ipv6-unicast\\capability orf prefix-list send') -TestFlag.okfail('peer\\ipv6-multicast\\capability orf prefix-list send') -TestFlag.okfail('peer\\ipv4-unicast\\capability orf prefix-list receive') -TestFlag.okfail('peer\\ipv4-multicast\\capability orf prefix-list receive') -TestFlag.okfail('peer\\ipv6-unicast\\capability orf prefix-list receive') -TestFlag.okfail('peer\\ipv6-multicast\\capability orf prefix-list receive') -TestFlag.okfail('peer\\ipv4-unicast\\capability orf prefix-list both') -TestFlag.okfail('peer\\ipv4-multicast\\capability orf prefix-list both') -TestFlag.okfail('peer\\ipv6-unicast\\capability orf prefix-list both') -TestFlag.okfail('peer\\ipv6-multicast\\capability orf prefix-list both') -TestFlag.okfail('peer\\ipv4-unicast\\default-originate') -TestFlag.okfail('peer\\ipv4-multicast\\default-originate') -TestFlag.okfail('peer\\ipv6-unicast\\default-originate') -TestFlag.okfail('peer\\ipv6-multicast\\default-originate') -TestFlag.okfail('peer\\ipv4-unicast\\default-originate route-map') -TestFlag.okfail('peer\\ipv4-multicast\\default-originate route-map') -TestFlag.okfail('peer\\ipv6-unicast\\default-originate route-map') -TestFlag.okfail('peer\\ipv6-multicast\\default-originate route-map') -TestFlag.okfail('peer\\ipv4-unicast\\distribute-list') -TestFlag.okfail('peer\\ipv4-multicast\\distribute-list') -TestFlag.okfail('peer\\ipv6-unicast\\distribute-list') -TestFlag.okfail('peer\\ipv6-multicast\\distribute-list') -TestFlag.okfail('peer\\ipv4-unicast\\distribute-list') -TestFlag.okfail('peer\\ipv4-multicast\\distribute-list') -TestFlag.okfail('peer\\ipv6-unicast\\distribute-list') -TestFlag.okfail('peer\\ipv6-multicast\\distribute-list') -TestFlag.okfail('peer\\ipv4-unicast\\filter-list') -TestFlag.okfail('peer\\ipv4-multicast\\filter-list') -TestFlag.okfail('peer\\ipv6-unicast\\filter-list') -TestFlag.okfail('peer\\ipv6-multicast\\filter-list') -TestFlag.okfail('peer\\ipv4-unicast\\filter-list') -TestFlag.okfail('peer\\ipv4-multicast\\filter-list') -TestFlag.okfail('peer\\ipv6-unicast\\filter-list') -TestFlag.okfail('peer\\ipv6-multicast\\filter-list') -TestFlag.okfail('peer\\ipv4-unicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv4-multicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv6-unicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv6-multicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv4-unicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv4-multicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv6-unicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv6-multicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv4-unicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv4-multicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv6-unicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv6-multicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv4-unicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv4-multicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv6-unicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv6-multicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv4-unicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv4-multicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv6-unicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv6-multicast\\maximum-prefix') -TestFlag.okfail('peer\\ipv4-unicast\\next-hop-self') -TestFlag.okfail('peer\\ipv4-multicast\\next-hop-self') -TestFlag.okfail('peer\\ipv6-unicast\\next-hop-self') -TestFlag.okfail('peer\\ipv6-multicast\\next-hop-self') -TestFlag.okfail('peer\\ipv4-unicast\\next-hop-self force') -TestFlag.okfail('peer\\ipv4-multicast\\next-hop-self force') -TestFlag.okfail('peer\\ipv6-unicast\\next-hop-self force') -TestFlag.okfail('peer\\ipv6-multicast\\next-hop-self force') -TestFlag.okfail('peer\\ipv4-unicast\\prefix-list') -TestFlag.okfail('peer\\ipv4-multicast\\prefix-list') -TestFlag.okfail('peer\\ipv6-unicast\\prefix-list') -TestFlag.okfail('peer\\ipv6-multicast\\prefix-list') -TestFlag.okfail('peer\\ipv4-unicast\\prefix-list') -TestFlag.okfail('peer\\ipv4-multicast\\prefix-list') -TestFlag.okfail('peer\\ipv6-unicast\\prefix-list') -TestFlag.okfail('peer\\ipv6-multicast\\prefix-list') -TestFlag.okfail('peer\\ipv4-unicast\\remove-private-AS') -TestFlag.okfail('peer\\ipv4-multicast\\remove-private-AS') -TestFlag.okfail('peer\\ipv6-unicast\\remove-private-AS') -TestFlag.okfail('peer\\ipv6-multicast\\remove-private-AS') -TestFlag.okfail('peer\\ipv4-unicast\\remove-private-AS all') -TestFlag.okfail('peer\\ipv4-multicast\\remove-private-AS all') -TestFlag.okfail('peer\\ipv6-unicast\\remove-private-AS all') -TestFlag.okfail('peer\\ipv6-multicast\\remove-private-AS all') -TestFlag.okfail('peer\\ipv4-unicast\\remove-private-AS replace-AS') -TestFlag.okfail('peer\\ipv4-multicast\\remove-private-AS replace-AS') -TestFlag.okfail('peer\\ipv6-unicast\\remove-private-AS replace-AS') -TestFlag.okfail('peer\\ipv6-multicast\\remove-private-AS replace-AS') -TestFlag.okfail('peer\\ipv4-unicast\\remove-private-AS all replace-AS') -TestFlag.okfail('peer\\ipv4-multicast\\remove-private-AS all replace-AS') -TestFlag.okfail('peer\\ipv6-unicast\\remove-private-AS all replace-AS') -TestFlag.okfail('peer\\ipv6-multicast\\remove-private-AS all replace-AS') -TestFlag.okfail('peer\\ipv4-unicast\\route-map') -TestFlag.okfail('peer\\ipv4-multicast\\route-map') -TestFlag.okfail('peer\\ipv6-unicast\\route-map') -TestFlag.okfail('peer\\ipv6-multicast\\route-map') -TestFlag.okfail('peer\\ipv4-unicast\\route-map') -TestFlag.okfail('peer\\ipv4-multicast\\route-map') -TestFlag.okfail('peer\\ipv6-unicast\\route-map') -TestFlag.okfail('peer\\ipv6-multicast\\route-map') -TestFlag.okfail('peer\\ipv4-unicast\\route-reflector-client') -TestFlag.okfail('peer\\ipv4-multicast\\route-reflector-client') -TestFlag.okfail('peer\\ipv6-unicast\\route-reflector-client') -TestFlag.okfail('peer\\ipv6-multicast\\route-reflector-client') -TestFlag.okfail('peer\\ipv4-unicast\\route-server-client') -TestFlag.okfail('peer\\ipv4-multicast\\route-server-client') -TestFlag.okfail('peer\\ipv6-unicast\\route-server-client') -TestFlag.okfail('peer\\ipv6-multicast\\route-server-client') -TestFlag.okfail('peer\\ipv4-unicast\\send-community') -TestFlag.okfail('peer\\ipv4-multicast\\send-community') -TestFlag.okfail('peer\\ipv6-unicast\\send-community') -TestFlag.okfail('peer\\ipv6-multicast\\send-community') -TestFlag.okfail('peer\\ipv4-unicast\\send-community extended') -TestFlag.okfail('peer\\ipv4-multicast\\send-community extended') -TestFlag.okfail('peer\\ipv6-unicast\\send-community extended') -TestFlag.okfail('peer\\ipv6-multicast\\send-community extended') -TestFlag.okfail('peer\\ipv4-unicast\\send-community large') -TestFlag.okfail('peer\\ipv4-multicast\\send-community large') -TestFlag.okfail('peer\\ipv6-unicast\\send-community large') -TestFlag.okfail('peer\\ipv6-multicast\\send-community large') -TestFlag.okfail('peer\\ipv4-unicast\\soft-reconfiguration inbound') -TestFlag.okfail('peer\\ipv4-multicast\\soft-reconfiguration inbound') -TestFlag.okfail('peer\\ipv6-unicast\\soft-reconfiguration inbound') -TestFlag.okfail('peer\\ipv6-multicast\\soft-reconfiguration inbound') -TestFlag.okfail('peer\\ipv4-unicast\\unsuppress-map') -TestFlag.okfail('peer\\ipv4-multicast\\unsuppress-map') -TestFlag.okfail('peer\\ipv6-unicast\\unsuppress-map') -TestFlag.okfail('peer\\ipv6-multicast\\unsuppress-map') -TestFlag.okfail('peer\\ipv4-unicast\\weight') -TestFlag.okfail('peer\\ipv4-multicast\\weight') -TestFlag.okfail('peer\\ipv6-unicast\\weight') -TestFlag.okfail('peer\\ipv6-multicast\\weight') +TestFlag.okfail("peer\\advertisement-interval") +TestFlag.okfail("peer\\capability dynamic") +TestFlag.okfail("peer\\capability extended-nexthop") +# TestFlag.okfail('peer\\capability extended-nexthop') +TestFlag.okfail("peer\\description") +TestFlag.okfail("peer\\disable-connected-check") +TestFlag.okfail("peer\\dont-capability-negotiate") +TestFlag.okfail("peer\\enforce-first-as") +TestFlag.okfail("peer\\local-as") +TestFlag.okfail("peer\\local-as 1 no-prepend") +TestFlag.okfail("peer\\local-as 1 no-prepend replace-as") +TestFlag.okfail("peer\\override-capability") +TestFlag.okfail("peer\\passive") +TestFlag.okfail("peer\\password") +TestFlag.okfail("peer\\shutdown") +TestFlag.okfail("peer\\strict-capability-match") +TestFlag.okfail("peer\\timers") +TestFlag.okfail("peer\\timers connect") +TestFlag.okfail("peer\\update-source") +TestFlag.okfail("peer\\update-source") +TestFlag.okfail("peer\\ipv4-unicast\\addpath") +TestFlag.okfail("peer\\ipv4-multicast\\addpath") +TestFlag.okfail("peer\\ipv6-unicast\\addpath") +TestFlag.okfail("peer\\ipv6-multicast\\addpath") +TestFlag.okfail("peer\\ipv4-unicast\\allowas-in") +TestFlag.okfail("peer\\ipv4-multicast\\allowas-in") +TestFlag.okfail("peer\\ipv6-unicast\\allowas-in") +TestFlag.okfail("peer\\ipv6-multicast\\allowas-in") +TestFlag.okfail("peer\\ipv4-unicast\\allowas-in origin") +TestFlag.okfail("peer\\ipv4-multicast\\allowas-in origin") +TestFlag.okfail("peer\\ipv6-unicast\\allowas-in origin") +TestFlag.okfail("peer\\ipv6-multicast\\allowas-in origin") +TestFlag.okfail("peer\\ipv4-unicast\\as-override") +TestFlag.okfail("peer\\ipv4-multicast\\as-override") +TestFlag.okfail("peer\\ipv6-unicast\\as-override") +TestFlag.okfail("peer\\ipv6-multicast\\as-override") +TestFlag.okfail("peer\\ipv4-unicast\\attribute-unchanged as-path") +TestFlag.okfail("peer\\ipv4-multicast\\attribute-unchanged as-path") +TestFlag.okfail("peer\\ipv6-unicast\\attribute-unchanged as-path") +TestFlag.okfail("peer\\ipv6-multicast\\attribute-unchanged as-path") +TestFlag.okfail("peer\\ipv4-unicast\\attribute-unchanged next-hop") +TestFlag.okfail("peer\\ipv4-multicast\\attribute-unchanged next-hop") +TestFlag.okfail("peer\\ipv6-unicast\\attribute-unchanged next-hop") +TestFlag.okfail("peer\\ipv6-multicast\\attribute-unchanged next-hop") +TestFlag.okfail("peer\\ipv4-unicast\\attribute-unchanged med") +TestFlag.okfail("peer\\ipv4-multicast\\attribute-unchanged med") +TestFlag.okfail("peer\\ipv6-unicast\\attribute-unchanged med") +TestFlag.okfail("peer\\ipv6-multicast\\attribute-unchanged med") +TestFlag.okfail("peer\\ipv4-unicast\\attribute-unchanged as-path next-hop") +TestFlag.okfail("peer\\ipv4-multicast\\attribute-unchanged as-path next-hop") +TestFlag.okfail("peer\\ipv6-unicast\\attribute-unchanged as-path next-hop") +TestFlag.okfail("peer\\ipv6-multicast\\attribute-unchanged as-path next-hop") +TestFlag.okfail("peer\\ipv4-unicast\\attribute-unchanged as-path med") +TestFlag.okfail("peer\\ipv4-multicast\\attribute-unchanged as-path med") +TestFlag.okfail("peer\\ipv6-unicast\\attribute-unchanged as-path med") +TestFlag.okfail("peer\\ipv6-multicast\\attribute-unchanged as-path med") +TestFlag.okfail("peer\\ipv4-unicast\\attribute-unchanged as-path next-hop med") +TestFlag.okfail("peer\\ipv4-multicast\\attribute-unchanged as-path next-hop med") +TestFlag.okfail("peer\\ipv6-unicast\\attribute-unchanged as-path next-hop med") +TestFlag.okfail("peer\\ipv6-multicast\\attribute-unchanged as-path next-hop med") +TestFlag.okfail("peer\\ipv4-unicast\\capability orf prefix-list send") +TestFlag.okfail("peer\\ipv4-multicast\\capability orf prefix-list send") +TestFlag.okfail("peer\\ipv6-unicast\\capability orf prefix-list send") +TestFlag.okfail("peer\\ipv6-multicast\\capability orf prefix-list send") +TestFlag.okfail("peer\\ipv4-unicast\\capability orf prefix-list receive") +TestFlag.okfail("peer\\ipv4-multicast\\capability orf prefix-list receive") +TestFlag.okfail("peer\\ipv6-unicast\\capability orf prefix-list receive") +TestFlag.okfail("peer\\ipv6-multicast\\capability orf prefix-list receive") +TestFlag.okfail("peer\\ipv4-unicast\\capability orf prefix-list both") +TestFlag.okfail("peer\\ipv4-multicast\\capability orf prefix-list both") +TestFlag.okfail("peer\\ipv6-unicast\\capability orf prefix-list both") +TestFlag.okfail("peer\\ipv6-multicast\\capability orf prefix-list both") +TestFlag.okfail("peer\\ipv4-unicast\\default-originate") +TestFlag.okfail("peer\\ipv4-multicast\\default-originate") +TestFlag.okfail("peer\\ipv6-unicast\\default-originate") +TestFlag.okfail("peer\\ipv6-multicast\\default-originate") +TestFlag.okfail("peer\\ipv4-unicast\\default-originate route-map") +TestFlag.okfail("peer\\ipv4-multicast\\default-originate route-map") +TestFlag.okfail("peer\\ipv6-unicast\\default-originate route-map") +TestFlag.okfail("peer\\ipv6-multicast\\default-originate route-map") +TestFlag.okfail("peer\\ipv4-unicast\\distribute-list") +TestFlag.okfail("peer\\ipv4-multicast\\distribute-list") +TestFlag.okfail("peer\\ipv6-unicast\\distribute-list") +TestFlag.okfail("peer\\ipv6-multicast\\distribute-list") +TestFlag.okfail("peer\\ipv4-unicast\\distribute-list") +TestFlag.okfail("peer\\ipv4-multicast\\distribute-list") +TestFlag.okfail("peer\\ipv6-unicast\\distribute-list") +TestFlag.okfail("peer\\ipv6-multicast\\distribute-list") +TestFlag.okfail("peer\\ipv4-unicast\\filter-list") +TestFlag.okfail("peer\\ipv4-multicast\\filter-list") +TestFlag.okfail("peer\\ipv6-unicast\\filter-list") +TestFlag.okfail("peer\\ipv6-multicast\\filter-list") +TestFlag.okfail("peer\\ipv4-unicast\\filter-list") +TestFlag.okfail("peer\\ipv4-multicast\\filter-list") +TestFlag.okfail("peer\\ipv6-unicast\\filter-list") +TestFlag.okfail("peer\\ipv6-multicast\\filter-list") +TestFlag.okfail("peer\\ipv4-unicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv4-multicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv6-unicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv6-multicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv4-unicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv4-multicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv6-unicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv6-multicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv4-unicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv4-multicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv6-unicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv6-multicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv4-unicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv4-multicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv6-unicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv6-multicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv4-unicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv4-multicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv6-unicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv6-multicast\\maximum-prefix") +TestFlag.okfail("peer\\ipv4-unicast\\next-hop-self") +TestFlag.okfail("peer\\ipv4-multicast\\next-hop-self") +TestFlag.okfail("peer\\ipv6-unicast\\next-hop-self") +TestFlag.okfail("peer\\ipv6-multicast\\next-hop-self") +TestFlag.okfail("peer\\ipv4-unicast\\next-hop-self force") +TestFlag.okfail("peer\\ipv4-multicast\\next-hop-self force") +TestFlag.okfail("peer\\ipv6-unicast\\next-hop-self force") +TestFlag.okfail("peer\\ipv6-multicast\\next-hop-self force") +TestFlag.okfail("peer\\ipv4-unicast\\prefix-list") +TestFlag.okfail("peer\\ipv4-multicast\\prefix-list") +TestFlag.okfail("peer\\ipv6-unicast\\prefix-list") +TestFlag.okfail("peer\\ipv6-multicast\\prefix-list") +TestFlag.okfail("peer\\ipv4-unicast\\prefix-list") +TestFlag.okfail("peer\\ipv4-multicast\\prefix-list") +TestFlag.okfail("peer\\ipv6-unicast\\prefix-list") +TestFlag.okfail("peer\\ipv6-multicast\\prefix-list") +TestFlag.okfail("peer\\ipv4-unicast\\remove-private-AS") +TestFlag.okfail("peer\\ipv4-multicast\\remove-private-AS") +TestFlag.okfail("peer\\ipv6-unicast\\remove-private-AS") +TestFlag.okfail("peer\\ipv6-multicast\\remove-private-AS") +TestFlag.okfail("peer\\ipv4-unicast\\remove-private-AS all") +TestFlag.okfail("peer\\ipv4-multicast\\remove-private-AS all") +TestFlag.okfail("peer\\ipv6-unicast\\remove-private-AS all") +TestFlag.okfail("peer\\ipv6-multicast\\remove-private-AS all") +TestFlag.okfail("peer\\ipv4-unicast\\remove-private-AS replace-AS") +TestFlag.okfail("peer\\ipv4-multicast\\remove-private-AS replace-AS") +TestFlag.okfail("peer\\ipv6-unicast\\remove-private-AS replace-AS") +TestFlag.okfail("peer\\ipv6-multicast\\remove-private-AS replace-AS") +TestFlag.okfail("peer\\ipv4-unicast\\remove-private-AS all replace-AS") +TestFlag.okfail("peer\\ipv4-multicast\\remove-private-AS all replace-AS") +TestFlag.okfail("peer\\ipv6-unicast\\remove-private-AS all replace-AS") +TestFlag.okfail("peer\\ipv6-multicast\\remove-private-AS all replace-AS") +TestFlag.okfail("peer\\ipv4-unicast\\route-map") +TestFlag.okfail("peer\\ipv4-multicast\\route-map") +TestFlag.okfail("peer\\ipv6-unicast\\route-map") +TestFlag.okfail("peer\\ipv6-multicast\\route-map") +TestFlag.okfail("peer\\ipv4-unicast\\route-map") +TestFlag.okfail("peer\\ipv4-multicast\\route-map") +TestFlag.okfail("peer\\ipv6-unicast\\route-map") +TestFlag.okfail("peer\\ipv6-multicast\\route-map") +TestFlag.okfail("peer\\ipv4-unicast\\route-reflector-client") +TestFlag.okfail("peer\\ipv4-multicast\\route-reflector-client") +TestFlag.okfail("peer\\ipv6-unicast\\route-reflector-client") +TestFlag.okfail("peer\\ipv6-multicast\\route-reflector-client") +TestFlag.okfail("peer\\ipv4-unicast\\route-server-client") +TestFlag.okfail("peer\\ipv4-multicast\\route-server-client") +TestFlag.okfail("peer\\ipv6-unicast\\route-server-client") +TestFlag.okfail("peer\\ipv6-multicast\\route-server-client") +TestFlag.okfail("peer\\ipv4-unicast\\send-community") +TestFlag.okfail("peer\\ipv4-multicast\\send-community") +TestFlag.okfail("peer\\ipv6-unicast\\send-community") +TestFlag.okfail("peer\\ipv6-multicast\\send-community") +TestFlag.okfail("peer\\ipv4-unicast\\send-community extended") +TestFlag.okfail("peer\\ipv4-multicast\\send-community extended") +TestFlag.okfail("peer\\ipv6-unicast\\send-community extended") +TestFlag.okfail("peer\\ipv6-multicast\\send-community extended") +TestFlag.okfail("peer\\ipv4-unicast\\send-community large") +TestFlag.okfail("peer\\ipv4-multicast\\send-community large") +TestFlag.okfail("peer\\ipv6-unicast\\send-community large") +TestFlag.okfail("peer\\ipv6-multicast\\send-community large") +TestFlag.okfail("peer\\ipv4-unicast\\soft-reconfiguration inbound") +TestFlag.okfail("peer\\ipv4-multicast\\soft-reconfiguration inbound") +TestFlag.okfail("peer\\ipv6-unicast\\soft-reconfiguration inbound") +TestFlag.okfail("peer\\ipv6-multicast\\soft-reconfiguration inbound") +TestFlag.okfail("peer\\ipv4-unicast\\unsuppress-map") +TestFlag.okfail("peer\\ipv4-multicast\\unsuppress-map") +TestFlag.okfail("peer\\ipv6-unicast\\unsuppress-map") +TestFlag.okfail("peer\\ipv6-multicast\\unsuppress-map") +TestFlag.okfail("peer\\ipv4-unicast\\weight") +TestFlag.okfail("peer\\ipv4-multicast\\weight") +TestFlag.okfail("peer\\ipv6-unicast\\weight") +TestFlag.okfail("peer\\ipv6-multicast\\weight") diff --git a/tests/helpers/python/frrsix.py b/tests/helpers/python/frrsix.py index 91714f0c67..df737d92ef 100644 --- a/tests/helpers/python/frrsix.py +++ b/tests/helpers/python/frrsix.py @@ -29,24 +29,29 @@ import sys PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 + def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): orig_vars = cls.__dict__.copy() - slots = orig_vars.get('__slots__') + slots = orig_vars.get("__slots__") if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) + orig_vars.pop("__dict__", None) + orig_vars.pop("__weakref__", None) return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + if PY3: import builtins - exec_ = getattr(builtins,'exec') + + exec_ = getattr(builtins, "exec") def reraise(tp, value, tb=None): try: @@ -59,7 +64,9 @@ if PY3: value = None tb = None + else: + def exec_(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: @@ -72,9 +79,11 @@ else: _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") - exec_("""def reraise(tp, value, tb=None): + exec_( + """def reraise(tp, value, tb=None): try: raise tp, value, tb finally: tb = None -""") +""" + ) diff --git a/tests/helpers/python/frrtest.py b/tests/helpers/python/frrtest.py index 60bee5c88c..0ac54fd900 100644 --- a/tests/helpers/python/frrtest.py +++ b/tests/helpers/python/frrtest.py @@ -39,35 +39,41 @@ import frrsix srcbase = os.path.abspath(inspect.getsourcefile(frrsix)) for i in range(0, 3): srcbase = os.path.dirname(srcbase) + + def binpath(srcpath): return os.path.relpath(os.path.abspath(srcpath), srcbase) + class MultiTestFailure(Exception): pass + class MetaTestMultiOut(type): def __getattr__(cls, name): - if name.startswith('_'): + if name.startswith("_"): raise AttributeError - internal_name = '_{}'.format(name) + internal_name = "_{}".format(name) if internal_name not in dir(cls): raise AttributeError def registrar(*args, **kwargs): - cls._add_test(getattr(cls,internal_name), *args, **kwargs) + cls._add_test(getattr(cls, internal_name), *args, **kwargs) + return registrar + @frrsix.add_metaclass(MetaTestMultiOut) class _TestMultiOut(object): def _run_tests(self): - if 'tests_run' in dir(self.__class__) and self.tests_run: + if "tests_run" in dir(self.__class__) and self.tests_run: return self.__class__.tests_run = True basedir = os.path.dirname(inspect.getsourcefile(type(self))) program = os.path.join(basedir, self.program) proc = subprocess.Popen([binpath(program)], stdout=subprocess.PIPE) - self.output,_ = proc.communicate('') + self.output, _ = proc.communicate("") self.exitcode = proc.wait() self.__class__.testresults = {} @@ -85,13 +91,14 @@ class _TestMultiOut(object): @classmethod def _add_test(cls, method, *args, **kwargs): - if 'tests' not in dir(cls): - setattr(cls,'tests',[]) + if "tests" not in dir(cls): + setattr(cls, "tests", []) if method is not cls._exit_cleanly: cls._add_test(cls._exit_cleanly) def matchfunction(self): method(self, *args, **kwargs) + cls.tests.append(matchfunction) def testfunction(self): @@ -100,17 +107,18 @@ class _TestMultiOut(object): if result is not None: frrsix.reraise(*result) - testname = re.sub(r'[^A-Za-z0-9]', '_', '%r%r' % (args, kwargs)) - testname = re.sub(r'__*', '_', testname) - testname = testname.strip('_') + testname = re.sub(r"[^A-Za-z0-9]", "_", "%r%r" % (args, kwargs)) + testname = re.sub(r"__*", "_", testname) + testname = testname.strip("_") if not testname: - testname = method.__name__.strip('_') + testname = method.__name__.strip("_") if "test_%s" % testname in dir(cls): index = 2 - while "test_%s_%d" % (testname,index) in dir(cls): + while "test_%s_%d" % (testname, index) in dir(cls): index += 1 testname = "%s_%d" % (testname, index) - setattr(cls,"test_%s" % testname, testfunction) + setattr(cls, "test_%s" % testname, testfunction) + # # This class houses the actual TestMultiOut tests types. @@ -127,15 +135,16 @@ class _TestMultiOut(object): # modified according to consumed content. # -re_okfail = re.compile(r'(?:[3[12]m|^)?(?POK|failed)'.encode('utf8'), - re.MULTILINE) +re_okfail = re.compile(r"(?:[3[12]m|^)?(?POK|failed)".encode("utf8"), re.MULTILINE) + + class TestMultiOut(_TestMultiOut): def _onesimple(self, line): if type(line) is str: - line = line.encode('utf8') + line = line.encode("utf8") idx = self.output.find(line) if idx != -1: - self.output = self.output[idx+len(line):] + self.output = self.output[idx + len(line) :] else: raise MultiTestFailure("%r could not be found" % line) @@ -144,58 +153,67 @@ class TestMultiOut(_TestMultiOut): m = okfail.search(self.output) if m is None: - raise MultiTestFailure('OK/fail not found') - self.output = self.output[m.end():] + raise MultiTestFailure("OK/fail not found") + self.output = self.output[m.end() :] + + if m.group("ret") != "OK".encode("utf8"): + raise MultiTestFailure("Test output indicates failure") - if m.group('ret') != 'OK'.encode('utf8'): - raise MultiTestFailure('Test output indicates failure') # # This class implements a test comparing the output of a program against # an existing reference output # + class TestRefMismatch(Exception): def __init__(self, _test, outtext, reftext): - self.outtext = outtext.decode('utf8') if type(outtext) is bytes else outtext - self.reftext = reftext.decode('utf8') if type(reftext) is bytes else reftext + self.outtext = outtext.decode("utf8") if type(outtext) is bytes else outtext + self.reftext = reftext.decode("utf8") if type(reftext) is bytes else reftext def __str__(self): - rv = 'Expected output and actual output differ:\n' - rv += '\n'.join(difflib.unified_diff(self.reftext.splitlines(), - self.outtext.splitlines(), - 'outtext', 'reftext', - lineterm='')) + rv = "Expected output and actual output differ:\n" + rv += "\n".join( + difflib.unified_diff( + self.reftext.splitlines(), + self.outtext.splitlines(), + "outtext", + "reftext", + lineterm="", + ) + ) return rv + class TestExitNonzero(Exception): pass + class TestRefOut(object): def test_refout(self): basedir = os.path.dirname(inspect.getsourcefile(type(self))) program = os.path.join(basedir, self.program) - if getattr(self, 'built_refin', False): - refin = binpath(program) + '.in' + if getattr(self, "built_refin", False): + refin = binpath(program) + ".in" else: - refin = program + '.in' - if getattr(self, 'built_refout', False): - refout = binpath(program) + '.refout' + refin = program + ".in" + if getattr(self, "built_refout", False): + refout = binpath(program) + ".refout" else: - refout = program + '.refout' + refout = program + ".refout" - intext = '' + intext = "" if os.path.exists(refin): - with open(refin, 'rb') as f: + with open(refin, "rb") as f: intext = f.read() - with open(refout, 'rb') as f: + with open(refout, "rb") as f: reftext = f.read() - proc = subprocess.Popen([binpath(program)], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE) - outtext,_ = proc.communicate(intext) + proc = subprocess.Popen( + [binpath(program)], stdin=subprocess.PIPE, stdout=subprocess.PIPE + ) + outtext, _ = proc.communicate(intext) if outtext != reftext: raise TestRefMismatch(self, outtext, reftext) if proc.wait() != 0: diff --git a/tests/isisd/test_fuzz_isis_tlv.py b/tests/isisd/test_fuzz_isis_tlv.py index d96e3c4fee..8fd20aabd1 100644 --- a/tests/isisd/test_fuzz_isis_tlv.py +++ b/tests/isisd/test_fuzz_isis_tlv.py @@ -9,18 +9,24 @@ import socket # on musl, ntop compresses a single :0: -> :: which is against RFC ## def inet_ntop_broken(): - addr = '1:2:3:4:0:6:7:8' - return socket.inet_ntop(socket.AF_INET6, - socket.inet_pton(socket.AF_INET6, addr)) != addr + addr = "1:2:3:4:0:6:7:8" + return ( + socket.inet_ntop(socket.AF_INET6, socket.inet_pton(socket.AF_INET6, addr)) + != addr + ) -if platform.uname()[0] == 'SunOS' or inet_ntop_broken(): +if platform.uname()[0] == "SunOS" or inet_ntop_broken(): + class TestFuzzIsisTLV: - @pytest.mark.skipif(True, reason='Test unsupported') + @pytest.mark.skipif(True, reason="Test unsupported") def test_exit_cleanly(self): pass + + else: + class TestFuzzIsisTLV(frrtest.TestMultiOut): - program = './test_fuzz_isis_tlv' + program = "./test_fuzz_isis_tlv" TestFuzzIsisTLV.exit_cleanly() diff --git a/tests/isisd/test_isis_lspdb.py b/tests/isisd/test_isis_lspdb.py index cd0b5345c7..280ed1c328 100644 --- a/tests/isisd/test_isis_lspdb.py +++ b/tests/isisd/test_isis_lspdb.py @@ -1,6 +1,8 @@ import frrtest + class TestIsisLSPDB(frrtest.TestMultiOut): - program = './test_isis_lspdb' + program = "./test_isis_lspdb" + TestIsisLSPDB.exit_cleanly() diff --git a/tests/isisd/test_isis_spf.py b/tests/isisd/test_isis_spf.py index 21e7ef6269..f44fa70d6b 100644 --- a/tests/isisd/test_isis_spf.py +++ b/tests/isisd/test_isis_spf.py @@ -1,4 +1,5 @@ import frrtest + class TestIsisSPF(frrtest.TestRefOut): - program = './test_isis_spf' + program = "./test_isis_spf" diff --git a/tests/isisd/test_isis_vertex_queue.py b/tests/isisd/test_isis_vertex_queue.py index 5974edecc9..b9d2fc5fa2 100644 --- a/tests/isisd/test_isis_vertex_queue.py +++ b/tests/isisd/test_isis_vertex_queue.py @@ -1,6 +1,8 @@ import frrtest + class TestIsisVertexQueue(frrtest.TestMultiOut): - program = './test_isis_vertex_queue' + program = "./test_isis_vertex_queue" + TestIsisVertexQueue.exit_cleanly() diff --git a/tests/lib/cli/test_cli.py b/tests/lib/cli/test_cli.py index 7371db283a..6fdd6faa65 100644 --- a/tests/lib/cli/test_cli.py +++ b/tests/lib/cli/test_cli.py @@ -1,5 +1,6 @@ import frrtest + class TestCli(frrtest.TestRefOut): - program = './test_cli' + program = "./test_cli" built_refout = True diff --git a/tests/lib/cli/test_commands.py b/tests/lib/cli/test_commands.py index d55345186a..cf99077c35 100644 --- a/tests/lib/cli/test_commands.py +++ b/tests/lib/cli/test_commands.py @@ -2,10 +2,12 @@ import frrtest import pytest import os -class TestCommands(frrtest.TestRefOut): - program = './test_commands' - @pytest.mark.skipif('QUAGGA_TEST_COMMANDS' not in os.environ, - reason='QUAGGA_TEST_COMMANDS not set') +class TestCommands(frrtest.TestRefOut): + program = "./test_commands" + + @pytest.mark.skipif( + "QUAGGA_TEST_COMMANDS" not in os.environ, reason="QUAGGA_TEST_COMMANDS not set" + ) def test_refout(self): return super(TestCommands, self).test_refout() diff --git a/tests/lib/northbound/test_oper_data.py b/tests/lib/northbound/test_oper_data.py index 8f5fdd6fd0..a02bf05c1a 100644 --- a/tests/lib/northbound/test_oper_data.py +++ b/tests/lib/northbound/test_oper_data.py @@ -1,4 +1,5 @@ import frrtest + class TestNbOperData(frrtest.TestRefOut): - program = './test_oper_data' + program = "./test_oper_data" diff --git a/tests/lib/test_atomlist.py b/tests/lib/test_atomlist.py index 293d47f316..719a2e791d 100644 --- a/tests/lib/test_atomlist.py +++ b/tests/lib/test_atomlist.py @@ -1,6 +1,8 @@ import frrtest + class TestAtomlist(frrtest.TestMultiOut): - program = './test_atomlist' + program = "./test_atomlist" + TestAtomlist.exit_cleanly() diff --git a/tests/lib/test_graph.py b/tests/lib/test_graph.py index 697e56c149..b26986c83c 100644 --- a/tests/lib/test_graph.py +++ b/tests/lib/test_graph.py @@ -1,4 +1,5 @@ import frrtest + class TestGraph(frrtest.TestRefOut): - program = './test_graph' + program = "./test_graph" diff --git a/tests/lib/test_idalloc.py b/tests/lib/test_idalloc.py index 22de082be4..e2186dc521 100644 --- a/tests/lib/test_idalloc.py +++ b/tests/lib/test_idalloc.py @@ -1,6 +1,8 @@ import frrtest -class TestIDAlloc(frrtest.TestMultiOut): - program = './test_idalloc' -TestIDAlloc.onesimple('ID Allocator test successful.') +class TestIDAlloc(frrtest.TestMultiOut): + program = "./test_idalloc" + + +TestIDAlloc.onesimple("ID Allocator test successful.") diff --git a/tests/lib/test_nexthop_iter.py b/tests/lib/test_nexthop_iter.py index bb330a1c75..0c39dce08e 100644 --- a/tests/lib/test_nexthop_iter.py +++ b/tests/lib/test_nexthop_iter.py @@ -1,7 +1,9 @@ import frrtest -class TestNexthopIter(frrtest.TestMultiOut): - program = './test_nexthop_iter' -TestNexthopIter.onesimple('Simple test passed.') -TestNexthopIter.onesimple('PRNG test passed.') +class TestNexthopIter(frrtest.TestMultiOut): + program = "./test_nexthop_iter" + + +TestNexthopIter.onesimple("Simple test passed.") +TestNexthopIter.onesimple("PRNG test passed.") diff --git a/tests/lib/test_ntop.py b/tests/lib/test_ntop.py index 2526f53db5..69c4353620 100644 --- a/tests/lib/test_ntop.py +++ b/tests/lib/test_ntop.py @@ -1,6 +1,8 @@ import frrtest + class TestNtop(frrtest.TestMultiOut): - program = './test_ntop' + program = "./test_ntop" + TestNtop.exit_cleanly() diff --git a/tests/lib/test_prefix2str.py b/tests/lib/test_prefix2str.py index 6e26d1b409..fd883ed530 100644 --- a/tests/lib/test_prefix2str.py +++ b/tests/lib/test_prefix2str.py @@ -1,6 +1,8 @@ import frrtest + class TestPrefix2str(frrtest.TestMultiOut): - program = './test_prefix2str' + program = "./test_prefix2str" + TestPrefix2str.exit_cleanly() diff --git a/tests/lib/test_printfrr.py b/tests/lib/test_printfrr.py index 4fe238618e..b8ab89e337 100644 --- a/tests/lib/test_printfrr.py +++ b/tests/lib/test_printfrr.py @@ -1,6 +1,8 @@ import frrtest + class TestPrintfrr(frrtest.TestMultiOut): - program = './test_printfrr' + program = "./test_printfrr" + TestPrintfrr.exit_cleanly() diff --git a/tests/lib/test_ringbuf.py b/tests/lib/test_ringbuf.py index 5d994ddd7b..0cd9dee2b7 100644 --- a/tests/lib/test_ringbuf.py +++ b/tests/lib/test_ringbuf.py @@ -1,6 +1,8 @@ import frrtest + class TestRingbuf(frrtest.TestMultiOut): - program = './test_ringbuf' + program = "./test_ringbuf" + TestRingbuf.exit_cleanly() diff --git a/tests/lib/test_srcdest_table.py b/tests/lib/test_srcdest_table.py index ee73121025..d0dde6a8e5 100644 --- a/tests/lib/test_srcdest_table.py +++ b/tests/lib/test_srcdest_table.py @@ -1,6 +1,8 @@ import frrtest -class TestSrcdestTable(frrtest.TestMultiOut): - program = './test_srcdest_table' -TestSrcdestTable.onesimple('PRNG Test successful.') +class TestSrcdestTable(frrtest.TestMultiOut): + program = "./test_srcdest_table" + + +TestSrcdestTable.onesimple("PRNG Test successful.") diff --git a/tests/lib/test_stream.py b/tests/lib/test_stream.py index 6f42db1839..11d902eb95 100644 --- a/tests/lib/test_stream.py +++ b/tests/lib/test_stream.py @@ -1,4 +1,5 @@ import frrtest + class TestStream(frrtest.TestRefOut): - program = './test_stream' + program = "./test_stream" diff --git a/tests/lib/test_table.py b/tests/lib/test_table.py index e724421273..ee1849fd86 100644 --- a/tests/lib/test_table.py +++ b/tests/lib/test_table.py @@ -1,10 +1,12 @@ import frrtest + class TestTable(frrtest.TestMultiOut): - program = './test_table' + program = "./test_table" + for i in range(6): - TestTable.onesimple('Verifying cmp') + TestTable.onesimple("Verifying cmp") for i in range(11): - TestTable.onesimple('Verifying successor') -TestTable.onesimple('Verified pausing') + TestTable.onesimple("Verifying successor") +TestTable.onesimple("Verified pausing") diff --git a/tests/lib/test_timer_correctness.py b/tests/lib/test_timer_correctness.py index 8b4a765a81..71f45f980c 100644 --- a/tests/lib/test_timer_correctness.py +++ b/tests/lib/test_timer_correctness.py @@ -1,6 +1,8 @@ import frrtest -class TestTimerCorrectness(frrtest.TestMultiOut): - program = './test_timer_correctness' -TestTimerCorrectness.onesimple('Expected output and actual output match.') +class TestTimerCorrectness(frrtest.TestMultiOut): + program = "./test_timer_correctness" + + +TestTimerCorrectness.onesimple("Expected output and actual output match.") diff --git a/tests/lib/test_ttable.py b/tests/lib/test_ttable.py index 1d93932ad2..9151181a72 100644 --- a/tests/lib/test_ttable.py +++ b/tests/lib/test_ttable.py @@ -1,4 +1,5 @@ import frrtest + class TestTTable(frrtest.TestRefOut): - program = './test_ttable' + program = "./test_ttable" diff --git a/tests/lib/test_typelist.py b/tests/lib/test_typelist.py index 0b3c743971..fe3499cad8 100644 --- a/tests/lib/test_typelist.py +++ b/tests/lib/test_typelist.py @@ -1,19 +1,21 @@ import frrtest -class TestTypelist(frrtest.TestMultiOut): - program = './test_typelist' -TestTypelist.onesimple('LIST end') -TestTypelist.onesimple('DLIST end') -TestTypelist.onesimple('ATOMLIST end') -TestTypelist.onesimple('HEAP end') -TestTypelist.onesimple('SORTLIST_UNIQ end') -TestTypelist.onesimple('SORTLIST_NONUNIQ end') -TestTypelist.onesimple('HASH end') -TestTypelist.onesimple('HASH_collisions end') -TestTypelist.onesimple('SKIPLIST_UNIQ end') -TestTypelist.onesimple('SKIPLIST_NONUNIQ end') -TestTypelist.onesimple('RBTREE_UNIQ end') -TestTypelist.onesimple('RBTREE_NONUNIQ end') -TestTypelist.onesimple('ATOMSORT_UNIQ end') -TestTypelist.onesimple('ATOMSORT_NONUNIQ end') +class TestTypelist(frrtest.TestMultiOut): + program = "./test_typelist" + + +TestTypelist.onesimple("LIST end") +TestTypelist.onesimple("DLIST end") +TestTypelist.onesimple("ATOMLIST end") +TestTypelist.onesimple("HEAP end") +TestTypelist.onesimple("SORTLIST_UNIQ end") +TestTypelist.onesimple("SORTLIST_NONUNIQ end") +TestTypelist.onesimple("HASH end") +TestTypelist.onesimple("HASH_collisions end") +TestTypelist.onesimple("SKIPLIST_UNIQ end") +TestTypelist.onesimple("SKIPLIST_NONUNIQ end") +TestTypelist.onesimple("RBTREE_UNIQ end") +TestTypelist.onesimple("RBTREE_NONUNIQ end") +TestTypelist.onesimple("ATOMSORT_UNIQ end") +TestTypelist.onesimple("ATOMSORT_NONUNIQ end") diff --git a/tests/lib/test_versioncmp.py b/tests/lib/test_versioncmp.py index 0990757000..8ded53bd58 100644 --- a/tests/lib/test_versioncmp.py +++ b/tests/lib/test_versioncmp.py @@ -1,6 +1,8 @@ import frrtest + class TestVersionCmp(frrtest.TestMultiOut): - program = './test_versioncmp' + program = "./test_versioncmp" + TestVersionCmp.exit_cleanly() diff --git a/tests/lib/test_zlog.py b/tests/lib/test_zlog.py index 2ca2585886..2a2d54e204 100644 --- a/tests/lib/test_zlog.py +++ b/tests/lib/test_zlog.py @@ -1,4 +1,5 @@ import frrtest + class TestZlog(frrtest.TestMultiOut): - program = './test_zlog' + program = "./test_zlog" diff --git a/tests/lib/test_zmq.py b/tests/lib/test_zmq.py index 1f8ee54169..5f6189d919 100644 --- a/tests/lib/test_zmq.py +++ b/tests/lib/test_zmq.py @@ -2,10 +2,13 @@ import frrtest import pytest import os -class TestZMQ(frrtest.TestRefOut): - program = './test_zmq' - @pytest.mark.skipif('S["ZEROMQ_TRUE"]=""\n' not in open('../config.status').readlines(), - reason='ZEROMQ not enabled') +class TestZMQ(frrtest.TestRefOut): + program = "./test_zmq" + + @pytest.mark.skipif( + 'S["ZEROMQ_TRUE"]=""\n' not in open("../config.status").readlines(), + reason="ZEROMQ not enabled", + ) def test_refout(self): return super(TestZMQ, self).test_refout() diff --git a/tests/ospf6d/test_lsdb.py b/tests/ospf6d/test_lsdb.py index 6a94395113..6ada617657 100644 --- a/tests/ospf6d/test_lsdb.py +++ b/tests/ospf6d/test_lsdb.py @@ -1,4 +1,5 @@ import frrtest + class TestLSDB(frrtest.TestRefOut): - program = './test_lsdb' + program = "./test_lsdb" diff --git a/tests/runtests.py b/tests/runtests.py index 533dc6b167..4677796152 100644 --- a/tests/runtests.py +++ b/tests/runtests.py @@ -2,5 +2,5 @@ import pytest import sys import os -sys.path.append(os.path.join(os.path.dirname(__file__), 'helpers','python')) +sys.path.append(os.path.join(os.path.dirname(__file__), "helpers", "python")) raise SystemExit(pytest.main(sys.argv[1:])) diff --git a/tests/topotests/all-protocol-startup/test_all_protocol_startup.py b/tests/topotests/all-protocol-startup/test_all_protocol_startup.py index 4b57928366..0254ff6af6 100644 --- a/tests/topotests/all-protocol-startup/test_all_protocol_startup.py +++ b/tests/topotests/all-protocol-startup/test_all_protocol_startup.py @@ -55,6 +55,7 @@ fatal_error = "" ## ##################################################### + class NetworkTopo(Topo): "All Protocol Startup Test" @@ -64,15 +65,15 @@ class NetworkTopo(Topo): router = {} # # Setup Main Router - router[1] = topotest.addRouter(self, 'r1') + router[1] = topotest.addRouter(self, "r1") # # Setup Switches switch = {} # for i in range(0, 10): - switch[i] = self.addSwitch('sw%s' % i, cls=topotest.LegacySwitch) - self.addLink(switch[i], router[1], intfName2='r1-eth%s' % i ) + switch[i] = self.addSwitch("sw%s" % i, cls=topotest.LegacySwitch) + self.addLink(switch[i], router[1], intfName2="r1-eth%s" % i) ##################################################### @@ -81,6 +82,7 @@ class NetworkTopo(Topo): ## ##################################################### + def setup_module(module): global topo, net global fatal_error @@ -89,8 +91,8 @@ def setup_module(module): print("******************************************\n") print("Cleanup old Mininet runs") - os.system('sudo mn -c > /dev/null 2>&1') - os.system('sudo rm /tmp/r* > /dev/null 2>&1') + os.system("sudo mn -c > /dev/null 2>&1") + os.system("sudo rm /tmp/r* > /dev/null 2>&1") thisDir = os.path.dirname(os.path.realpath(__file__)) topo = NetworkTopo() @@ -98,33 +100,35 @@ def setup_module(module): net = Mininet(controller=None, topo=topo) net.start() - if net['r1'].get_routertype() != 'frr': + if net["r1"].get_routertype() != "frr": fatal_error = "Test is only implemented for FRR" - sys.stderr.write('\n\nTest is only implemented for FRR - Skipping\n\n') + sys.stderr.write("\n\nTest is only implemented for FRR - Skipping\n\n") pytest.skip(fatal_error) - + # Starting Routers # # Main router for i in range(1, 2): - net['r%s' % i].loadConf('zebra', '%s/r%s/zebra.conf' % (thisDir, i)) - net['r%s' % i].loadConf('ripd', '%s/r%s/ripd.conf' % (thisDir, i)) - net['r%s' % i].loadConf('ripngd', '%s/r%s/ripngd.conf' % (thisDir, i)) - net['r%s' % i].loadConf('ospfd', '%s/r%s/ospfd.conf' % (thisDir, i)) - if net['r1'].checkRouterVersion('<', '4.0'): - net['r%s' % i].loadConf('ospf6d', '%s/r%s/ospf6d.conf-pre-v4' % (thisDir, i)) + net["r%s" % i].loadConf("zebra", "%s/r%s/zebra.conf" % (thisDir, i)) + net["r%s" % i].loadConf("ripd", "%s/r%s/ripd.conf" % (thisDir, i)) + net["r%s" % i].loadConf("ripngd", "%s/r%s/ripngd.conf" % (thisDir, i)) + net["r%s" % i].loadConf("ospfd", "%s/r%s/ospfd.conf" % (thisDir, i)) + if net["r1"].checkRouterVersion("<", "4.0"): + net["r%s" % i].loadConf( + "ospf6d", "%s/r%s/ospf6d.conf-pre-v4" % (thisDir, i) + ) else: - net['r%s' % i].loadConf('ospf6d', '%s/r%s/ospf6d.conf' % (thisDir, i)) - net['r%s' % i].loadConf('isisd', '%s/r%s/isisd.conf' % (thisDir, i)) - net['r%s' % i].loadConf('bgpd', '%s/r%s/bgpd.conf' % (thisDir, i)) - if net['r%s' % i].daemon_available('ldpd'): + net["r%s" % i].loadConf("ospf6d", "%s/r%s/ospf6d.conf" % (thisDir, i)) + net["r%s" % i].loadConf("isisd", "%s/r%s/isisd.conf" % (thisDir, i)) + net["r%s" % i].loadConf("bgpd", "%s/r%s/bgpd.conf" % (thisDir, i)) + if net["r%s" % i].daemon_available("ldpd"): # Only test LDPd if it's installed and Kernel >= 4.5 - net['r%s' % i].loadConf('ldpd', '%s/r%s/ldpd.conf' % (thisDir, i)) - net['r%s' % i].loadConf('sharpd') - net['r%s' % i].loadConf('nhrpd', '%s/r%s/nhrpd.conf' % (thisDir, i)) - net['r%s' % i].loadConf('babeld', '%s/r%s/babeld.conf' % (thisDir, i)) - net['r%s' % i].loadConf('pbrd', '%s/r%s/pbrd.conf' % (thisDir, i)) - net['r%s' % i].startRouter() + net["r%s" % i].loadConf("ldpd", "%s/r%s/ldpd.conf" % (thisDir, i)) + net["r%s" % i].loadConf("sharpd") + net["r%s" % i].loadConf("nhrpd", "%s/r%s/nhrpd.conf" % (thisDir, i)) + net["r%s" % i].loadConf("babeld", "%s/r%s/babeld.conf" % (thisDir, i)) + net["r%s" % i].loadConf("pbrd", "%s/r%s/pbrd.conf" % (thisDir, i)) + net["r%s" % i].startRouter() # For debugging after starting FRR daemons, uncomment the next line # CLI(net) @@ -145,7 +149,7 @@ def test_router_running(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) print("\n\n** Check if FRR is running on each Router node") @@ -154,7 +158,7 @@ def test_router_running(): # Starting Routers for i in range(1, 2): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR daemons, uncomment the next line @@ -166,7 +170,7 @@ def test_error_messages_vtysh(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) print("\n\n** Check for error messages on VTYSH") @@ -179,38 +183,38 @@ def test_error_messages_vtysh(): # # VTYSH output from router - vtystdout = net['r%s' % i].cmd('vtysh -c "show version" 2> /dev/null').rstrip() + vtystdout = net["r%s" % i].cmd('vtysh -c "show version" 2> /dev/null').rstrip() # Fix newlines (make them all the same) - vtystdout = ('\n'.join(vtystdout.splitlines()) + '\n').rstrip() + vtystdout = ("\n".join(vtystdout.splitlines()) + "\n").rstrip() # Drop everything starting with "FRRouting X.xx" message vtystdout = re.sub(r"FRRouting [0-9]+.*", "", vtystdout, flags=re.DOTALL) - if (vtystdout == ''): + if vtystdout == "": print("r%s StdOut ok" % i) - assert vtystdout == '', "Vtysh StdOut Output check failed for router r%s" % i + assert vtystdout == "", "Vtysh StdOut Output check failed for router r%s" % i # # Second checking Standard Error # # VTYSH StdErr output from router - vtystderr = net['r%s' % i].cmd('vtysh -c "show version" > /dev/null').rstrip() + vtystderr = net["r%s" % i].cmd('vtysh -c "show version" > /dev/null').rstrip() # Fix newlines (make them all the same) - vtystderr = ('\n'.join(vtystderr.splitlines()) + '\n').rstrip() + vtystderr = ("\n".join(vtystderr.splitlines()) + "\n").rstrip() # # Drop everything starting with "FRRouting X.xx" message - # vtystderr = re.sub(r"FRRouting [0-9]+.*", "", vtystderr, flags=re.DOTALL) + # vtystderr = re.sub(r"FRRouting [0-9]+.*", "", vtystderr, flags=re.DOTALL) - if (vtystderr == ''): + if vtystderr == "": print("r%s StdErr ok" % i) - assert vtystderr == '', "Vtysh StdErr Output check failed for router r%s" % i + assert vtystderr == "", "Vtysh StdErr Output check failed for router r%s" % i # Make sure that all daemons are running for i in range(1, 2): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR daemons, uncomment the next line @@ -222,7 +226,7 @@ def test_error_messages_daemons(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) print("\n\n** Check for error messages in daemons") @@ -231,67 +235,73 @@ def test_error_messages_daemons(): error_logs = "" for i in range(1, 2): - log = net['r%s' % i].getStdErr('ripd') + log = net["r%s" % i].getStdErr("ripd") if log: error_logs += "r%s RIPd StdErr Output:\n" % i error_logs += log - log = net['r%s' % i].getStdErr('ripngd') + log = net["r%s" % i].getStdErr("ripngd") if log: error_logs += "r%s RIPngd StdErr Output:\n" % i error_logs += log - log = net['r%s' % i].getStdErr('ospfd') + log = net["r%s" % i].getStdErr("ospfd") if log: error_logs += "r%s OSPFd StdErr Output:\n" % i error_logs += log - log = net['r%s' % i].getStdErr('ospf6d') + log = net["r%s" % i].getStdErr("ospf6d") if log: error_logs += "r%s OSPF6d StdErr Output:\n" % i error_logs += log - log = net['r%s' % i].getStdErr('isisd') + log = net["r%s" % i].getStdErr("isisd") # ISIS shows debugging enabled status on StdErr # Remove these messages log = re.sub(r"^IS-IS .* debugging is on.*", "", log).rstrip() if log: error_logs += "r%s ISISd StdErr Output:\n" % i error_logs += log - log = net['r%s' % i].getStdErr('bgpd') + log = net["r%s" % i].getStdErr("bgpd") if log: error_logs += "r%s BGPd StdErr Output:\n" % i error_logs += log - if (net['r%s' % i].daemon_available('ldpd')): - log = net['r%s' % i].getStdErr('ldpd') + if net["r%s" % i].daemon_available("ldpd"): + log = net["r%s" % i].getStdErr("ldpd") if log: error_logs += "r%s LDPd StdErr Output:\n" % i error_logs += log - log = net['r1'].getStdErr('nhrpd') + log = net["r1"].getStdErr("nhrpd") if log: error_logs += "r%s NHRPd StdErr Output:\n" % i error_logs += log - log = net['r1'].getStdErr('babeld') + log = net["r1"].getStdErr("babeld") if log: error_logs += "r%s BABELd StdErr Output:\n" % i error_logs += log - log = net['r1'].getStdErr('pbrd') + log = net["r1"].getStdErr("pbrd") if log: error_logs += "r%s PBRd StdErr Output:\n" % i error_logs += log - log = net['r%s' % i].getStdErr('zebra') + log = net["r%s" % i].getStdErr("zebra") if log: error_logs += "r%s Zebra StdErr Output:\n" error_logs += log if error_logs: - sys.stderr.write('Failed check for StdErr Output on daemons:\n%s\n' % error_logs) + sys.stderr.write( + "Failed check for StdErr Output on daemons:\n%s\n" % error_logs + ) # Ignoring the issue if told to ignore (ie not yet fixed) - if (error_logs != ""): - if (os.environ.get('bamboo_TOPOTESTS_ISSUE_349') == "IGNORE"): - sys.stderr.write('Known issue - IGNORING. See https://github.com/FRRouting/frr/issues/349\n') - pytest.skip('Known issue - IGNORING. See https://github.com/FRRouting/frr/issues/349') + if error_logs != "": + if os.environ.get("bamboo_TOPOTESTS_ISSUE_349") == "IGNORE": + sys.stderr.write( + "Known issue - IGNORING. See https://github.com/FRRouting/frr/issues/349\n" + ) + pytest.skip( + "Known issue - IGNORING. See https://github.com/FRRouting/frr/issues/349" + ) assert error_logs == "", "Daemons report errors to StdErr" @@ -304,7 +314,7 @@ def test_converge_protocols(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -318,62 +328,84 @@ def test_converge_protocols(): # Make sure that all daemons are running failures = 0 for i in range(1, 2): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error - print("Show that v4 routes are right\n"); - v4_routesFile = '%s/r%s/ipv4_routes.ref' % (thisDir, i) + print("Show that v4 routes are right\n") + v4_routesFile = "%s/r%s/ipv4_routes.ref" % (thisDir, i) expected = open(v4_routesFile).read().rstrip() - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) - actual = net['r%s' %i].cmd('vtysh -c "show ip route" | /usr/bin/tail -n +7 | env LC_ALL=en_US.UTF-8 sort 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd( + 'vtysh -c "show ip route" | /usr/bin/tail -n +7 | env LC_ALL=en_US.UTF-8 sort 2> /dev/null' + ) + .rstrip() + ) # Drop time in last update actual = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", actual) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) - diff = topotest.get_textdiff(actual, expected, - title1="Actual IP Routing Table", - title2="Expected IP RoutingTable") + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) + diff = topotest.get_textdiff( + actual, + expected, + title1="Actual IP Routing Table", + title2="Expected IP RoutingTable", + ) if diff: - sys.stderr.write('r%s failed IP Routing table check:\n%s\n' % (i, diff)) + sys.stderr.write("r%s failed IP Routing table check:\n%s\n" % (i, diff)) failures += 1 else: - print("r%s ok" %i) + print("r%s ok" % i) assert failures == 0, "IP Routing table failed for r%s\n%s" % (i, diff) failures = 0 print("Show that v6 routes are right\n") - v6_routesFile = '%s/r%s/ipv6_routes.ref' % (thisDir, i) + v6_routesFile = "%s/r%s/ipv6_routes.ref" % (thisDir, i) expected = open(v6_routesFile).read().rstrip() - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) - actual = net['r%s' %i].cmd('vtysh -c "show ipv6 route" | /usr/bin/tail -n +7 | env LC_ALL=en_US.UTF-8 sort 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd( + 'vtysh -c "show ipv6 route" | /usr/bin/tail -n +7 | env LC_ALL=en_US.UTF-8 sort 2> /dev/null' + ) + .rstrip() + ) # Drop time in last update actual = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", actual) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) - diff = topotest.get_textdiff(actual, expected, - title1="Actual IPv6 Routing Table", - title2="Expected IPv6 RoutingTable") + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) + diff = topotest.get_textdiff( + actual, + expected, + title1="Actual IPv6 Routing Table", + title2="Expected IPv6 RoutingTable", + ) if diff: - sys.stderr.write('r%s failed IPv6 Routing table check:\n%s\n' % (i, diff)) + sys.stderr.write("r%s failed IPv6 Routing table check:\n%s\n" % (i, diff)) failures += 1 else: - print("r%s ok" %i) + print("r%s ok" % i) assert failures == 0, "IPv6 Routing table failed for r%s\n%s" % (i, diff) # For debugging after starting FRR daemons, uncomment the next line ## CLI(net) + def route_get_nhg_id(route_str): output = net["r1"].cmd('vtysh -c "show ip route %s nexthop-group"' % route_str) match = re.search(r"Nexthop Group ID: (\d+)", output) - assert match is not None, "Nexthop Group ID not found for sharpd route %s" % route_str + assert match is not None, ( + "Nexthop Group ID not found for sharpd route %s" % route_str + ) nhg_id = int(match.group(1)) return nhg_id + def verify_nexthop_group(nhg_id, recursive=False, ecmp=0): # Verify NHG is valid/installed output = net["r1"].cmd('vtysh -c "show nexthop-group rib %d"' % nhg_id) @@ -389,10 +421,14 @@ def verify_nexthop_group(nhg_id, recursive=False, ecmp=0): depends = re.findall(r"\((\d+)\)", match.group(0)) if ecmp: - assert (len(depends) == ecmp), "Nexthop Group ID=%d doesn't match ecmp size" % nhg_id + assert len(depends) == ecmp, ( + "Nexthop Group ID=%d doesn't match ecmp size" % nhg_id + ) else: # If recursive, we need to look at its resolved group - assert (len(depends) == 1), "Nexthop Group ID=%d should only have one recursive depend" % nhg_id + assert len(depends) == 1, ( + "Nexthop Group ID=%d should only have one recursive depend" % nhg_id + ) resolved_id = int(depends[0]) verify_nexthop_group(resolved_id, False) @@ -400,17 +436,19 @@ def verify_nexthop_group(nhg_id, recursive=False, ecmp=0): match = re.search(r"Installed", output) assert match is not None, "Nexthop Group ID=%d not marked Installed" % nhg_id + def verify_route_nexthop_group(route_str, recursive=False, ecmp=0): # Verify route and that zebra created NHGs for and they are valid/installed nhg_id = route_get_nhg_id(route_str) verify_nexthop_group(nhg_id, recursive, ecmp) + def test_nexthop_groups(): global fatal_error global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) print("\n\n** Verifying Nexthop Groups") @@ -421,7 +459,9 @@ def test_nexthop_groups(): ## Basic test # Create a lib nexthop-group - net["r1"].cmd('vtysh -c "c t" -c "nexthop-group basic" -c "nexthop 1.1.1.1" -c "nexthop 1.1.1.2"') + net["r1"].cmd( + 'vtysh -c "c t" -c "nexthop-group basic" -c "nexthop 1.1.1.1" -c "nexthop 1.1.1.2"' + ) # Create with sharpd using nexthop-group net["r1"].cmd('vtysh -c "sharp install routes 2.2.2.1 nexthop-group basic 1"') @@ -430,7 +470,9 @@ def test_nexthop_groups(): ## Connected - net["r1"].cmd('vtysh -c "c t" -c "nexthop-group connected" -c "nexthop r1-eth1" -c "nexthop r1-eth2"') + net["r1"].cmd( + 'vtysh -c "c t" -c "nexthop-group connected" -c "nexthop r1-eth1" -c "nexthop r1-eth2"' + ) net["r1"].cmd('vtysh -c "sharp install routes 2.2.2.2 nexthop-group connected 1"') @@ -438,15 +480,21 @@ def test_nexthop_groups(): ## Recursive - net["r1"].cmd('vtysh -c "c t" -c "nexthop-group basic-recursive" -c "nexthop 2.2.2.1"') + net["r1"].cmd( + 'vtysh -c "c t" -c "nexthop-group basic-recursive" -c "nexthop 2.2.2.1"' + ) - net["r1"].cmd('vtysh -c "sharp install routes 3.3.3.1 nexthop-group basic-recursive 1"') + net["r1"].cmd( + 'vtysh -c "sharp install routes 3.3.3.1 nexthop-group basic-recursive 1"' + ) verify_route_nexthop_group("3.3.3.1/32", True) ## Duplicate - net["r1"].cmd('vtysh -c "c t" -c "nexthop-group duplicate" -c "nexthop 2.2.2.1" -c "nexthop 1.1.1.1"') + net["r1"].cmd( + 'vtysh -c "c t" -c "nexthop-group duplicate" -c "nexthop 2.2.2.1" -c "nexthop 1.1.1.1"' + ) net["r1"].cmd('vtysh -c "sharp install routes 3.3.3.2 nexthop-group duplicate 1"') @@ -454,15 +502,19 @@ def test_nexthop_groups(): ## Two 4-Way ECMP - net["r1"].cmd('vtysh -c "c t" -c "nexthop-group fourA" -c "nexthop 1.1.1.1" -c "nexthop 1.1.1.2" \ - -c "nexthop 1.1.1.3" -c "nexthop 1.1.1.4"') + net["r1"].cmd( + 'vtysh -c "c t" -c "nexthop-group fourA" -c "nexthop 1.1.1.1" -c "nexthop 1.1.1.2" \ + -c "nexthop 1.1.1.3" -c "nexthop 1.1.1.4"' + ) net["r1"].cmd('vtysh -c "sharp install routes 4.4.4.1 nexthop-group fourA 1"') verify_route_nexthop_group("4.4.4.1/32") - net["r1"].cmd('vtysh -c "c t" -c "nexthop-group fourB" -c "nexthop 1.1.1.5" -c "nexthop 1.1.1.6" \ - -c "nexthop 1.1.1.7" -c "nexthop 1.1.1.8"') + net["r1"].cmd( + 'vtysh -c "c t" -c "nexthop-group fourB" -c "nexthop 1.1.1.5" -c "nexthop 1.1.1.6" \ + -c "nexthop 1.1.1.7" -c "nexthop 1.1.1.8"' + ) net["r1"].cmd('vtysh -c "sharp install routes 4.4.4.2 nexthop-group fourB 1"') @@ -470,9 +522,13 @@ def test_nexthop_groups(): ## Recursive to 8-Way ECMP - net["r1"].cmd('vtysh -c "c t" -c "nexthop-group eight-recursive" -c "nexthop 4.4.4.1" -c "nexthop 4.4.4.2"') + net["r1"].cmd( + 'vtysh -c "c t" -c "nexthop-group eight-recursive" -c "nexthop 4.4.4.1" -c "nexthop 4.4.4.2"' + ) - net["r1"].cmd('vtysh -c "sharp install routes 5.5.5.1 nexthop-group eight-recursive 1"') + net["r1"].cmd( + 'vtysh -c "sharp install routes 5.5.5.1 nexthop-group eight-recursive 1"' + ) verify_route_nexthop_group("5.5.5.1/32") @@ -488,12 +544,13 @@ def test_nexthop_groups(): net["r1"].cmd('vtysh -c "sharp remove routes 4.4.4.2 1"') net["r1"].cmd('vtysh -c "sharp remove routes 5.5.5.1 1"') + def test_rip_status(): global fatal_error global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -502,30 +559,37 @@ def test_rip_status(): print("******************************************\n") failures = 0 for i in range(1, 2): - refTableFile = '%s/r%s/rip_status.ref' % (thisDir, i) + refTableFile = "%s/r%s/rip_status.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ip rip status" 2> /dev/null').rstrip() - # Drop time in next due + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show ip rip status" 2> /dev/null') + .rstrip() + ) + # Drop time in next due actual = re.sub(r"in [0-9]+ seconds", "in XX seconds", actual) # Drop time in last update actual = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", actual) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual IP RIP status", - title2="expected IP RIP status") + title2="expected IP RIP status", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed IP RIP status check:\n%s\n' % (i, diff)) + sys.stderr.write("r%s failed IP RIP status check:\n%s\n" % (i, diff)) failures += 1 else: print("r%s ok" % i) @@ -534,7 +598,7 @@ def test_rip_status(): # Make sure that all daemons are running for i in range(1, 2): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR daemons, uncomment the next line @@ -546,7 +610,7 @@ def test_ripng_status(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -555,41 +619,53 @@ def test_ripng_status(): print("******************************************\n") failures = 0 for i in range(1, 2): - refTableFile = '%s/r%s/ripng_status.ref' % (thisDir, i) + refTableFile = "%s/r%s/ripng_status.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ipv6 ripng status" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show ipv6 ripng status" 2> /dev/null') + .rstrip() + ) # Mask out Link-Local mac address portion. They are random... actual = re.sub(r" fe80::[0-9a-f:]+", " fe80::XXXX:XXXX:XXXX:XXXX", actual) - # Drop time in next due + # Drop time in next due actual = re.sub(r"in [0-9]+ seconds", "in XX seconds", actual) # Drop time in last update actual = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", actual) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual IPv6 RIPng status", - title2="expected IPv6 RIPng status") + title2="expected IPv6 RIPng status", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed IPv6 RIPng status check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed IPv6 RIPng status check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "IPv6 RIPng status failed for router r%s:\n%s" % (i, diff) + assert failures == 0, "IPv6 RIPng status failed for router r%s:\n%s" % ( + i, + diff, + ) # Make sure that all daemons are running for i in range(1, 2): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR daemons, uncomment the next line @@ -601,7 +677,7 @@ def test_ospfv2_interfaces(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -610,50 +686,71 @@ def test_ospfv2_interfaces(): print("******************************************\n") failures = 0 for i in range(1, 2): - refTableFile = '%s/r%s/show_ip_ospf_interface.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_ip_ospf_interface.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ip ospf interface" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show ip ospf interface" 2> /dev/null') + .rstrip() + ) # Mask out Bandwidth portion. They may change.. actual = re.sub(r"BW [0-9]+ Mbit", "BW XX Mbit", actual) actual = re.sub(r"ifindex [0-9]", "ifindex X", actual) - # Drop time in next due + # Drop time in next due actual = re.sub(r"Hello due in [0-9\.]+s", "Hello due in XX.XXXs", actual) - actual = re.sub(r"Hello due in [0-9\.]+ usecs", "Hello due in XX.XXXs", actual) + actual = re.sub( + r"Hello due in [0-9\.]+ usecs", "Hello due in XX.XXXs", actual + ) # Fix 'MTU mismatch detection: enabled' vs 'MTU mismatch detection:enabled' - accept both - actual = re.sub(r"MTU mismatch detection:([a-z]+.*)", r"MTU mismatch detection: \1", actual) + actual = re.sub( + r"MTU mismatch detection:([a-z]+.*)", + r"MTU mismatch detection: \1", + actual, + ) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual SHOW IP OSPF INTERFACE", - title2="expected SHOW IP OSPF INTERFACE") + title2="expected SHOW IP OSPF INTERFACE", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed SHOW IP OSPF INTERFACE check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed SHOW IP OSPF INTERFACE check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) # Ignoring the issue if told to ignore (ie not yet fixed) - if (failures != 0): - if (os.environ.get('bamboo_TOPOTESTS_ISSUE_348') == "IGNORE"): - sys.stderr.write('Known issue - IGNORING. See https://github.com/FRRouting/frr/issues/348\n') - pytest.skip('Known issue - IGNORING. See https://github.com/FRRouting/frr/issues/348') + if failures != 0: + if os.environ.get("bamboo_TOPOTESTS_ISSUE_348") == "IGNORE": + sys.stderr.write( + "Known issue - IGNORING. See https://github.com/FRRouting/frr/issues/348\n" + ) + pytest.skip( + "Known issue - IGNORING. See https://github.com/FRRouting/frr/issues/348" + ) - assert failures == 0, "SHOW IP OSPF INTERFACE failed for router r%s:\n%s" % (i, diff) + assert ( + failures == 0 + ), "SHOW IP OSPF INTERFACE failed for router r%s:\n%s" % (i, diff) # Make sure that all daemons are running for i in range(1, 2): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR daemons, uncomment the next line @@ -665,7 +762,7 @@ def test_isis_interfaces(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -674,42 +771,52 @@ def test_isis_interfaces(): print("******************************************\n") failures = 0 for i in range(1, 2): - refTableFile = '%s/r%s/show_isis_interface_detail.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_isis_interface_detail.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show isis interface detail" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show isis interface detail" 2> /dev/null') + .rstrip() + ) # Mask out Link-Local mac address portion. They are random... actual = re.sub(r"fe80::[0-9a-f:]+", "fe80::XXXX:XXXX:XXXX:XXXX", actual) # Mask out SNPA mac address portion. They are random... actual = re.sub(r"SNPA: [0-9a-f\.]+", "SNPA: XXXX.XXXX.XXXX", actual) # Mask out Circuit ID number - actual = re.sub(r"Circuit Id: 0x[0-9a-f]+", "Circuit Id: 0xXX", - actual) + actual = re.sub(r"Circuit Id: 0x[0-9a-f]+", "Circuit Id: 0xXX", actual) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual SHOW ISIS INTERFACE DETAIL", - title2="expected SHOW ISIS OSPF6 INTERFACE DETAIL") + title2="expected SHOW ISIS OSPF6 INTERFACE DETAIL", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed SHOW ISIS INTERFACE DETAIL check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed SHOW ISIS INTERFACE DETAIL check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "SHOW ISIS INTERFACE DETAIL failed for router r%s:\n%s" % (i, diff) + assert ( + failures == 0 + ), "SHOW ISIS INTERFACE DETAIL failed for router r%s:\n%s" % (i, diff) # Make sure that all daemons are running for i in range(1, 2): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR daemons, uncomment the next line @@ -721,7 +828,7 @@ def test_bgp_summary(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -730,15 +837,19 @@ def test_bgp_summary(): print("******************************************\n") failures = 0 for i in range(1, 2): - refTableFile = '%s/r%s/show_ip_bgp_summary.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_ip_bgp_summary.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show ip bgp summary" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show ip bgp summary" 2> /dev/null') + .rstrip() + ) # Mask out "using XXiXX bytes" portion. They are random... actual = re.sub(r"using [0-9]+ bytes", "using XXXX bytes", actual) # Mask out "using XiXXX KiB" portion. They are random... @@ -747,50 +858,60 @@ def test_bgp_summary(): # Remove extra summaries which exist with newer versions # # Remove summary lines (changed recently) - actual = re.sub(r'Total number.*', '', actual) - actual = re.sub(r'Displayed.*', '', actual) + actual = re.sub(r"Total number.*", "", actual) + actual = re.sub(r"Displayed.*", "", actual) # Remove IPv4 Unicast Summary (Title only) - actual = re.sub(r'IPv4 Unicast Summary:', '', actual) + actual = re.sub(r"IPv4 Unicast Summary:", "", actual) # Remove IPv4 Multicast Summary (all of it) - actual = re.sub(r'IPv4 Multicast Summary:', '', actual) - actual = re.sub(r'No IPv4 Multicast neighbor is configured', '', actual) + actual = re.sub(r"IPv4 Multicast Summary:", "", actual) + actual = re.sub(r"No IPv4 Multicast neighbor is configured", "", actual) # Remove IPv4 VPN Summary (all of it) - actual = re.sub(r'IPv4 VPN Summary:', '', actual) - actual = re.sub(r'No IPv4 VPN neighbor is configured', '', actual) + actual = re.sub(r"IPv4 VPN Summary:", "", actual) + actual = re.sub(r"No IPv4 VPN neighbor is configured", "", actual) # Remove IPv4 Encap Summary (all of it) - actual = re.sub(r'IPv4 Encap Summary:', '', actual) - actual = re.sub(r'No IPv4 Encap neighbor is configured', '', actual) + actual = re.sub(r"IPv4 Encap Summary:", "", actual) + actual = re.sub(r"No IPv4 Encap neighbor is configured", "", actual) # Remove Unknown Summary (all of it) - actual = re.sub(r'Unknown Summary:', '', actual) - actual = re.sub(r'No Unknown neighbor is configured', '', actual) + actual = re.sub(r"Unknown Summary:", "", actual) + actual = re.sub(r"No Unknown neighbor is configured", "", actual) - actual = re.sub(r'IPv4 labeled-unicast Summary:', '', actual) - actual = re.sub(r'No IPv4 labeled-unicast neighbor is configured', '', actual) + actual = re.sub(r"IPv4 labeled-unicast Summary:", "", actual) + actual = re.sub( + r"No IPv4 labeled-unicast neighbor is configured", "", actual + ) # Strip empty lines actual = actual.lstrip() actual = actual.rstrip() # # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual SHOW IP BGP SUMMARY", - title2="expected SHOW IP BGP SUMMARY") + title2="expected SHOW IP BGP SUMMARY", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed SHOW IP BGP SUMMARY check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed SHOW IP BGP SUMMARY check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "SHOW IP BGP SUMMARY failed for router r%s:\n%s" % (i, diff) + assert failures == 0, "SHOW IP BGP SUMMARY failed for router r%s:\n%s" % ( + i, + diff, + ) # Make sure that all daemons are running for i in range(1, 2): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR daemons, uncomment the next line @@ -802,7 +923,7 @@ def test_bgp_ipv6_summary(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -811,15 +932,19 @@ def test_bgp_ipv6_summary(): print("******************************************\n") failures = 0 for i in range(1, 2): - refTableFile = '%s/r%s/show_bgp_ipv6_summary.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_bgp_ipv6_summary.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show bgp ipv6 summary" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show bgp ipv6 summary" 2> /dev/null') + .rstrip() + ) # Mask out "using XXiXX bytes" portion. They are random... actual = re.sub(r"using [0-9]+ bytes", "using XXXX bytes", actual) # Mask out "using XiXXX KiB" portion. They are random... @@ -828,51 +953,61 @@ def test_bgp_ipv6_summary(): # Remove extra summaries which exist with newer versions # # Remove summary lines (changed recently) - actual = re.sub(r'Total number.*', '', actual) - actual = re.sub(r'Displayed.*', '', actual) + actual = re.sub(r"Total number.*", "", actual) + actual = re.sub(r"Displayed.*", "", actual) # Remove IPv4 Unicast Summary (Title only) - actual = re.sub(r'IPv6 Unicast Summary:', '', actual) + actual = re.sub(r"IPv6 Unicast Summary:", "", actual) # Remove IPv4 Multicast Summary (all of it) - actual = re.sub(r'IPv6 Multicast Summary:', '', actual) - actual = re.sub(r'No IPv6 Multicast neighbor is configured', '', actual) + actual = re.sub(r"IPv6 Multicast Summary:", "", actual) + actual = re.sub(r"No IPv6 Multicast neighbor is configured", "", actual) # Remove IPv4 VPN Summary (all of it) - actual = re.sub(r'IPv6 VPN Summary:', '', actual) - actual = re.sub(r'No IPv6 VPN neighbor is configured', '', actual) + actual = re.sub(r"IPv6 VPN Summary:", "", actual) + actual = re.sub(r"No IPv6 VPN neighbor is configured", "", actual) # Remove IPv4 Encap Summary (all of it) - actual = re.sub(r'IPv6 Encap Summary:', '', actual) - actual = re.sub(r'No IPv6 Encap neighbor is configured', '', actual) + actual = re.sub(r"IPv6 Encap Summary:", "", actual) + actual = re.sub(r"No IPv6 Encap neighbor is configured", "", actual) # Remove Unknown Summary (all of it) - actual = re.sub(r'Unknown Summary:', '', actual) - actual = re.sub(r'No Unknown neighbor is configured', '', actual) + actual = re.sub(r"Unknown Summary:", "", actual) + actual = re.sub(r"No Unknown neighbor is configured", "", actual) # Remove Labeled Unicast Summary (all of it) - actual = re.sub(r'IPv6 labeled-unicast Summary:', '', actual) - actual = re.sub(r'No IPv6 labeled-unicast neighbor is configured', '', actual) + actual = re.sub(r"IPv6 labeled-unicast Summary:", "", actual) + actual = re.sub( + r"No IPv6 labeled-unicast neighbor is configured", "", actual + ) # Strip empty lines actual = actual.lstrip() actual = actual.rstrip() # # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual SHOW BGP IPv6 SUMMARY", - title2="expected SHOW BGP IPv6 SUMMARY") + title2="expected SHOW BGP IPv6 SUMMARY", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed SHOW BGP IPv6 SUMMARY check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed SHOW BGP IPv6 SUMMARY check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - assert failures == 0, "SHOW BGP IPv6 SUMMARY failed for router r%s:\n%s" % (i, diff) + assert failures == 0, "SHOW BGP IPv6 SUMMARY failed for router r%s:\n%s" % ( + i, + diff, + ) # Make sure that all daemons are running for i in range(1, 2): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR daemons, uncomment the next line @@ -884,7 +1019,7 @@ def test_bgp_ipv4(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -894,27 +1029,31 @@ def test_bgp_ipv4(): diffresult = {} for i in range(1, 2): success = 0 - for refTableFile in (glob.glob( - '%s/r%s/show_bgp_ipv4*.ref' % (thisDir, i))): + for refTableFile in glob.glob("%s/r%s/show_bgp_ipv4*.ref" % (thisDir, i)): if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show bgp ipv4" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i].cmd('vtysh -c "show bgp ipv4" 2> /dev/null').rstrip() + ) # Remove summary line (changed recently) - actual = re.sub(r'Total number.*', '', actual) - actual = re.sub(r'Displayed.*', '', actual) + actual = re.sub(r"Total number.*", "", actual) + actual = re.sub(r"Displayed.*", "", actual) actual = actual.rstrip() # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual SHOW BGP IPv4", - title2="expected SHOW BGP IPv4") + title2="expected SHOW BGP IPv4", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: @@ -925,17 +1064,20 @@ def test_bgp_ipv4(): break if not success: - resultstr = 'No template matched.\n' + resultstr = "No template matched.\n" for f in diffresult.iterkeys(): - resultstr += ( - 'template %s: r%s failed SHOW BGP IPv4 check:\n%s\n' - % (f, i, diffresult[f])) + resultstr += "template %s: r%s failed SHOW BGP IPv4 check:\n%s\n" % ( + f, + i, + diffresult[f], + ) raise AssertionError( - "SHOW BGP IPv4 failed for router r%s:\n%s" % (i, resultstr)) + "SHOW BGP IPv4 failed for router r%s:\n%s" % (i, resultstr) + ) # Make sure that all daemons are running for i in range(1, 2): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR daemons, uncomment the next line @@ -947,7 +1089,7 @@ def test_bgp_ipv6(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -957,27 +1099,31 @@ def test_bgp_ipv6(): diffresult = {} for i in range(1, 2): success = 0 - for refTableFile in (glob.glob( - '%s/r%s/show_bgp_ipv6*.ref' % (thisDir, i))): + for refTableFile in glob.glob("%s/r%s/show_bgp_ipv6*.ref" % (thisDir, i)): if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show bgp ipv6" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i].cmd('vtysh -c "show bgp ipv6" 2> /dev/null').rstrip() + ) # Remove summary line (changed recently) - actual = re.sub(r'Total number.*', '', actual) - actual = re.sub(r'Displayed.*', '', actual) + actual = re.sub(r"Total number.*", "", actual) + actual = re.sub(r"Displayed.*", "", actual) actual = actual.rstrip() # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual SHOW BGP IPv6", - title2="expected SHOW BGP IPv6") + title2="expected SHOW BGP IPv6", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: @@ -987,27 +1133,31 @@ def test_bgp_ipv6(): print("template %s matched: r%s ok" % (refTableFile, i)) if not success: - resultstr = 'No template matched.\n' + resultstr = "No template matched.\n" for f in diffresult.iterkeys(): - resultstr += ( - 'template %s: r%s failed SHOW BGP IPv6 check:\n%s\n' - % (f, i, diffresult[f])) + resultstr += "template %s: r%s failed SHOW BGP IPv6 check:\n%s\n" % ( + f, + i, + diffresult[f], + ) raise AssertionError( - "SHOW BGP IPv6 failed for router r%s:\n%s" % (i, resultstr)) + "SHOW BGP IPv6 failed for router r%s:\n%s" % (i, resultstr) + ) # Make sure that all daemons are running for i in range(1, 2): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR daemons, uncomment the next line # CLI(net) + def test_route_map(): global fatal_error global net - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -1016,32 +1166,42 @@ def test_route_map(): print("*******************************************************\n") failures = 0 for i in range(1, 2): - refroutemap = '%s/r%s/show_route_map.ref' % (thisDir, i) + refroutemap = "%s/r%s/show_route_map.ref" % (thisDir, i) if os.path.isfile(refroutemap): expected = open(refroutemap).read().rstrip() - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) - actual = net['r%s' %i].cmd('vtysh -c "show route-map" 2> /dev/null').rstrip() - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ( + net["r%s" % i].cmd('vtysh -c "show route-map" 2> /dev/null').rstrip() + ) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) - diff = topotest.get_textdiff(actual, expected, - title1="actual show route-map", - title2="expected show route-map") + diff = topotest.get_textdiff( + actual, + expected, + title1="actual show route-map", + title2="expected show route-map", + ) if diff: - sys.stderr.write('r%s failed show route-map command Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed show route-map command Check:\n%s\n" % (i, diff) + ) failures += 1 else: - print("r%s ok" %i) + print("r%s ok" % i) + + assert ( + failures == 0 + ), "Show route-map command failed for router r%s:\n%s" % (i, diff) - assert failures == 0, "Show route-map command failed for router r%s:\n%s" % (i, diff) def test_nexthop_groups_with_route_maps(): global fatal_error global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) print("\n\n** Verifying Nexthop Groups With Route-Maps") @@ -1050,14 +1210,18 @@ def test_nexthop_groups_with_route_maps(): ### Nexthop Group With Route-Map Tests # Create a lib nexthop-group - net["r1"].cmd('vtysh -c "c t" -c "nexthop-group test" -c "nexthop 1.1.1.1" -c "nexthop 1.1.1.2"') + net["r1"].cmd( + 'vtysh -c "c t" -c "nexthop-group test" -c "nexthop 1.1.1.1" -c "nexthop 1.1.1.2"' + ) ## Route-Map Proto Source route_str = "2.2.2.1" src_str = "192.168.0.1" - net["r1"].cmd('vtysh -c "c t" -c "route-map NH-SRC permit 111" -c "set src %s"' % src_str) + net["r1"].cmd( + 'vtysh -c "c t" -c "route-map NH-SRC permit 111" -c "set src %s"' % src_str + ) net["r1"].cmd('vtysh -c "c t" -c "ip protocol sharp route-map NH-SRC"') net["r1"].cmd('vtysh -c "sharp install routes %s nexthop-group test 1"' % route_str) @@ -1066,14 +1230,19 @@ def test_nexthop_groups_with_route_maps(): # Only a valid test on linux using nexthop objects if sys.platform.startswith("linux"): - output = net["r1"].cmd('ip route show %s/32' % route_str) + output = net["r1"].cmd("ip route show %s/32" % route_str) match = re.search(r"src %s" % src_str, output) - assert match is not None, "Route %s/32 not installed with src %s" % (route_str, src_str) + assert match is not None, "Route %s/32 not installed with src %s" % ( + route_str, + src_str, + ) # Remove NHG routes and route-map net["r1"].cmd('vtysh -c "sharp remove routes %s 1"' % route_str) net["r1"].cmd('vtysh -c "c t" -c "no ip protocol sharp route-map NH-SRC"') - net["r1"].cmd('vtysh -c "c t" -c "no route-map NH-SRC permit 111" -c "set src %s"' % src_str) + net["r1"].cmd( + 'vtysh -c "c t" -c "no route-map NH-SRC permit 111" -c "set src %s"' % src_str + ) net["r1"].cmd('vtysh -c "c t" -c "no route-map NH-SRC"') ## Route-Map Deny/Permit with same nexthop group @@ -1081,18 +1250,26 @@ def test_nexthop_groups_with_route_maps(): permit_route_str = "3.3.3.1" deny_route_str = "3.3.3.2" - net["r1"].cmd('vtysh -c "c t" -c "ip prefix-list NOPE seq 5 permit %s/32"' % permit_route_str) - net["r1"].cmd('vtysh -c "c t" -c "route-map NOPE permit 111" -c "match ip address prefix-list NOPE"') + net["r1"].cmd( + 'vtysh -c "c t" -c "ip prefix-list NOPE seq 5 permit %s/32"' % permit_route_str + ) + net["r1"].cmd( + 'vtysh -c "c t" -c "route-map NOPE permit 111" -c "match ip address prefix-list NOPE"' + ) net["r1"].cmd('vtysh -c "c t" -c "route-map NOPE deny 222"') net["r1"].cmd('vtysh -c "c t" -c "ip protocol sharp route-map NOPE"') # This route should be permitted - net["r1"].cmd('vtysh -c "sharp install routes %s nexthop-group test 1"' % permit_route_str) + net["r1"].cmd( + 'vtysh -c "sharp install routes %s nexthop-group test 1"' % permit_route_str + ) verify_route_nexthop_group("%s/32" % permit_route_str) # This route should be denied - net["r1"].cmd('vtysh -c "sharp install routes %s nexthop-group test 1"' % deny_route_str) + net["r1"].cmd( + 'vtysh -c "sharp install routes %s nexthop-group test 1"' % deny_route_str + ) nhg_id = route_get_nhg_id(deny_route_str) output = net["r1"].cmd('vtysh -c "show nexthop-group rib %d"' % nhg_id) @@ -1110,14 +1287,18 @@ def test_nexthop_groups_with_route_maps(): net["r1"].cmd('vtysh -c "c t" -c "no route-map NOPE permit 111"') net["r1"].cmd('vtysh -c "c t" -c "no route-map NOPE deny 222"') net["r1"].cmd('vtysh -c "c t" -c "no route-map NOPE"') - net["r1"].cmd('vtysh -c "c t" -c "no ip prefix-list NOPE seq 5 permit %s/32"' % permit_route_str) + net["r1"].cmd( + 'vtysh -c "c t" -c "no ip prefix-list NOPE seq 5 permit %s/32"' + % permit_route_str + ) + def test_nexthop_group_replace(): global fatal_error global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) print("\n\n** Verifying Nexthop Groups") @@ -1127,7 +1308,9 @@ def test_nexthop_group_replace(): ## 2-Way ECMP Directly Connected - net["r1"].cmd('vtysh -c "c t" -c "nexthop-group replace" -c "nexthop 1.1.1.1 r1-eth1 onlink" -c "nexthop 1.1.1.2 r1-eth2 onlink"') + net["r1"].cmd( + 'vtysh -c "c t" -c "nexthop-group replace" -c "nexthop 1.1.1.1 r1-eth1 onlink" -c "nexthop 1.1.1.2 r1-eth2 onlink"' + ) # Create with sharpd using nexthop-group net["r1"].cmd('vtysh -c "sharp install routes 3.3.3.1 nexthop-group replace 1"') @@ -1135,21 +1318,24 @@ def test_nexthop_group_replace(): verify_route_nexthop_group("3.3.3.1/32") # Change the nexthop group - net["r1"].cmd('vtysh -c "c t" -c "nexthop-group replace" -c "no nexthop 1.1.1.1 r1-eth1 onlink" -c "nexthop 1.1.1.3 r1-eth1 onlink" -c "nexthop 1.1.1.4 r1-eth4 onlink"') + net["r1"].cmd( + 'vtysh -c "c t" -c "nexthop-group replace" -c "no nexthop 1.1.1.1 r1-eth1 onlink" -c "nexthop 1.1.1.3 r1-eth1 onlink" -c "nexthop 1.1.1.4 r1-eth4 onlink"' + ) # Verify it updated. We can just check install and ecmp count here. verify_route_nexthop_group("3.3.3.1/32", False, 3) + def test_mpls_interfaces(): global fatal_error global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) # Skip if no LDP installed or old kernel - if (net['r1'].daemon_available('ldpd') == False): + if net["r1"].daemon_available("ldpd") == False: pytest.skip("No MPLS or kernel < 4.5") thisDir = os.path.dirname(os.path.realpath(__file__)) @@ -1158,40 +1344,51 @@ def test_mpls_interfaces(): print("******************************************\n") failures = 0 for i in range(1, 2): - refTableFile = '%s/r%s/show_mpls_ldp_interface.ref' % (thisDir, i) + refTableFile = "%s/r%s/show_mpls_ldp_interface.ref" % (thisDir, i) if os.path.isfile(refTableFile): # Read expected result from file expected = open(refTableFile).read().rstrip() # Fix newlines (make them all the same) - expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1) + expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) # Actual output from router - actual = net['r%s' % i].cmd('vtysh -c "show mpls ldp interface" 2> /dev/null').rstrip() + actual = ( + net["r%s" % i] + .cmd('vtysh -c "show mpls ldp interface" 2> /dev/null') + .rstrip() + ) # Mask out Timer in Uptime actual = re.sub(r" [0-9][0-9]:[0-9][0-9]:[0-9][0-9] ", " xx:xx:xx ", actual) # Fix newlines (make them all the same) - actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1) + actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) # Generate Diff - diff = topotest.get_textdiff(actual, expected, + diff = topotest.get_textdiff( + actual, + expected, title1="actual MPLS LDP interface status", - title2="expected MPLS LDP interface status") + title2="expected MPLS LDP interface status", + ) # Empty string if it matches, otherwise diff contains unified diff if diff: - sys.stderr.write('r%s failed MPLS LDP Interface status Check:\n%s\n' % (i, diff)) + sys.stderr.write( + "r%s failed MPLS LDP Interface status Check:\n%s\n" % (i, diff) + ) failures += 1 else: print("r%s ok" % i) - if failures>0: + if failures > 0: fatal_error = "MPLS LDP Interface status failed" - assert failures == 0, "MPLS LDP Interface status failed for router r%s:\n%s" % (i, diff) + assert ( + failures == 0 + ), "MPLS LDP Interface status failed for router r%s:\n%s" % (i, diff) # Make sure that all daemons are running for i in range(1, 2): - fatal_error = net['r%s' % i].checkRouterRunning() + fatal_error = net["r%s" % i].checkRouterRunning() assert fatal_error == "", fatal_error # For debugging after starting FRR daemons, uncomment the next line @@ -1203,58 +1400,60 @@ def test_shutdown_check_stderr(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) print("\n\n** Verifying unexpected STDERR output from daemons") print("******************************************\n") - if os.environ.get('TOPOTESTS_CHECK_STDERR') is None: - print("SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n") - pytest.skip('Skipping test for Stderr output') + if os.environ.get("TOPOTESTS_CHECK_STDERR") is None: + print( + "SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n" + ) + pytest.skip("Skipping test for Stderr output") thisDir = os.path.dirname(os.path.realpath(__file__)) print("thisDir=" + thisDir) - net['r1'].stopRouter() + net["r1"].stopRouter() - log = net['r1'].getStdErr('ripd') + log = net["r1"].getStdErr("ripd") if log: print("\nRIPd StdErr Log:\n" + log) - log = net['r1'].getStdErr('ripngd') + log = net["r1"].getStdErr("ripngd") if log: print("\nRIPngd StdErr Log:\n" + log) - log = net['r1'].getStdErr('ospfd') + log = net["r1"].getStdErr("ospfd") if log: print("\nOSPFd StdErr Log:\n" + log) - log = net['r1'].getStdErr('ospf6d') + log = net["r1"].getStdErr("ospf6d") if log: print("\nOSPF6d StdErr Log:\n" + log) - log = net['r1'].getStdErr('isisd') + log = net["r1"].getStdErr("isisd") if log: print("\nISISd StdErr Log:\n" + log) - log = net['r1'].getStdErr('bgpd') + log = net["r1"].getStdErr("bgpd") if log: print("\nBGPd StdErr Log:\n" + log) - log = net['r1'].getStdErr('nhrpd') + log = net["r1"].getStdErr("nhrpd") if log: print("\nNHRPd StdErr Log:\n" + log) - log = net['r1'].getStdErr('pbrd') + log = net["r1"].getStdErr("pbrd") if log: print("\nPBRd StdErr Log:\n" + log) - log = net['r1'].getStdErr('babeld') + log = net["r1"].getStdErr("babeld") if log: print("\nBABELd StdErr Log:\n" + log) - if (net['r1'].daemon_available('ldpd')): - log = net['r1'].getStdErr('ldpd') + if net["r1"].daemon_available("ldpd"): + log = net["r1"].getStdErr("ldpd") if log: print("\nLDPd StdErr Log:\n" + log) - log = net['r1'].getStdErr('zebra') + log = net["r1"].getStdErr("zebra") if log: print("\nZebra StdErr Log:\n" + log) @@ -1264,23 +1463,27 @@ def test_shutdown_check_memleak(): global net # Skip if previous fatal error condition is raised - if (fatal_error != ""): + if fatal_error != "": pytest.skip(fatal_error) - if os.environ.get('TOPOTESTS_CHECK_MEMLEAK') is None: - print("SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n") - pytest.skip('Skipping test for memory leaks') - + if os.environ.get("TOPOTESTS_CHECK_MEMLEAK") is None: + print( + "SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n" + ) + pytest.skip("Skipping test for memory leaks") + thisDir = os.path.dirname(os.path.realpath(__file__)) for i in range(1, 2): - net['r%s' % i].stopRouter() - net['r%s' % i].report_memory_leaks(os.environ.get('TOPOTESTS_CHECK_MEMLEAK'), os.path.basename(__file__)) + net["r%s" % i].stopRouter() + net["r%s" % i].report_memory_leaks( + os.environ.get("TOPOTESTS_CHECK_MEMLEAK"), os.path.basename(__file__) + ) -if __name__ == '__main__': +if __name__ == "__main__": - setLogLevel('info') + setLogLevel("info") # To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli # retval = pytest.main(["-s", "--tb=no"]) retval = pytest.main(["-s"]) diff --git a/tests/topotests/bfd-profiles-topo1/test_bfd_profiles_topo1.py b/tests/topotests/bfd-profiles-topo1/test_bfd_profiles_topo1.py index 514933b891..bd3b876eeb 100644 --- a/tests/topotests/bfd-profiles-topo1/test_bfd_profiles_topo1.py +++ b/tests/topotests/bfd-profiles-topo1/test_bfd_profiles_topo1.py @@ -118,6 +118,7 @@ def teardown_module(_mod): tgen = get_topogen() tgen.stop_topology() + def test_wait_protocols_convergence(): "Wait for all protocols to converge" tgen = get_topogen() @@ -128,41 +129,40 @@ def test_wait_protocols_convergence(): def expect_loopback_route(router, iptype, route, proto): "Wait until route is present on RIB for protocol." - logger.info('waiting route {} in {}'.format(route, router)) + logger.info("waiting route {} in {}".format(route, router)) test_func = partial( topotest.router_json_cmp, tgen.gears[router], - 'show {} route json'.format(iptype), - { route: [{ 'protocol': proto }] } + "show {} route json".format(iptype), + {route: [{"protocol": proto}]}, ) _, result = topotest.run_and_expect(test_func, None, count=130, wait=1) assertmsg = '"{}" OSPF convergence failure'.format(router) assert result is None, assertmsg - # Wait for R1 <-> R6 convergence. - expect_loopback_route('r1', 'ip', '10.254.254.6/32', 'ospf') + expect_loopback_route("r1", "ip", "10.254.254.6/32", "ospf") # Wait for R6 <-> R1 convergence. - expect_loopback_route('r6', 'ip', '10.254.254.1/32', 'ospf') + expect_loopback_route("r6", "ip", "10.254.254.1/32", "ospf") # Wait for R2 <-> R3 convergence. - expect_loopback_route('r2', 'ip', '10.254.254.3/32', 'bgp') + expect_loopback_route("r2", "ip", "10.254.254.3/32", "bgp") # Wait for R3 <-> R2 convergence. - expect_loopback_route('r3', 'ip', '10.254.254.2/32', 'bgp') + expect_loopback_route("r3", "ip", "10.254.254.2/32", "bgp") # Wait for R3 <-> R4 convergence. - expect_loopback_route('r3', 'ipv6', '2001:db8:3::/64', 'isis') + expect_loopback_route("r3", "ipv6", "2001:db8:3::/64", "isis") # Wait for R4 <-> R3 convergence. - expect_loopback_route('r4', 'ipv6', '2001:db8:1::/64', 'isis') + expect_loopback_route("r4", "ipv6", "2001:db8:1::/64", "isis") # Wait for R4 <-> R5 convergence. - expect_loopback_route('r4', 'ipv6', '2001:db8:3::/64', 'ospf6') + expect_loopback_route("r4", "ipv6", "2001:db8:3::/64", "ospf6") # Wait for R5 <-> R4 convergence. - expect_loopback_route('r5', 'ipv6', '2001:db8:2::/64', 'ospf6') + expect_loopback_route("r5", "ipv6", "2001:db8:2::/64", "ospf6") def test_bfd_profile_values(): diff --git a/tests/topotests/bfd-topo3/test_bfd_topo3.py b/tests/topotests/bfd-topo3/test_bfd_topo3.py index fa68ace59d..f473b67108 100644 --- a/tests/topotests/bfd-topo3/test_bfd_topo3.py +++ b/tests/topotests/bfd-topo3/test_bfd_topo3.py @@ -103,44 +103,44 @@ def test_wait_bgp_convergence(): def expect_loopback_route(router, iptype, route, proto): "Wait until route is present on RIB for protocol." - logger.info('waiting route {} in {}'.format(route, router)) + logger.info("waiting route {} in {}".format(route, router)) test_func = partial( topotest.router_json_cmp, tgen.gears[router], - 'show {} route json'.format(iptype), - { route: [{ 'protocol': proto }] } + "show {} route json".format(iptype), + {route: [{"protocol": proto}]}, ) _, result = topotest.run_and_expect(test_func, None, count=130, wait=1) assertmsg = '"{}" OSPF convergence failure'.format(router) assert result is None, assertmsg # Wait for R1 <-> R2 convergence. - expect_loopback_route('r1', 'ip', '10.254.254.2/32', 'bgp') + expect_loopback_route("r1", "ip", "10.254.254.2/32", "bgp") # Wait for R1 <-> R3 convergence. - expect_loopback_route('r1', 'ip', '10.254.254.3/32', 'bgp') + expect_loopback_route("r1", "ip", "10.254.254.3/32", "bgp") # Wait for R1 <-> R4 convergence. - expect_loopback_route('r1', 'ip', '10.254.254.4/32', 'bgp') + expect_loopback_route("r1", "ip", "10.254.254.4/32", "bgp") # Wait for R2 <-> R1 convergence. - expect_loopback_route('r2', 'ip', '10.254.254.1/32', 'bgp') + expect_loopback_route("r2", "ip", "10.254.254.1/32", "bgp") # Wait for R2 <-> R3 convergence. - expect_loopback_route('r2', 'ip', '10.254.254.3/32', 'bgp') + expect_loopback_route("r2", "ip", "10.254.254.3/32", "bgp") # Wait for R2 <-> R4 convergence. - expect_loopback_route('r2', 'ip', '10.254.254.4/32', 'bgp') + expect_loopback_route("r2", "ip", "10.254.254.4/32", "bgp") # Wait for R3 <-> R1 convergence. - expect_loopback_route('r3', 'ip', '10.254.254.1/32', 'bgp') + expect_loopback_route("r3", "ip", "10.254.254.1/32", "bgp") # Wait for R3 <-> R2 convergence. - expect_loopback_route('r3', 'ip', '10.254.254.2/32', 'bgp') + expect_loopback_route("r3", "ip", "10.254.254.2/32", "bgp") # Wait for R3 <-> R4 convergence. - expect_loopback_route('r3', 'ip', '10.254.254.4/32', 'bgp') + expect_loopback_route("r3", "ip", "10.254.254.4/32", "bgp") # Wait for R4 <-> R1 convergence. - expect_loopback_route('r4', 'ip', '10.254.254.1/32', 'bgp') + expect_loopback_route("r4", "ip", "10.254.254.1/32", "bgp") # Wait for R4 <-> R2 convergence. - expect_loopback_route('r4', 'ip', '10.254.254.2/32', 'bgp') + expect_loopback_route("r4", "ip", "10.254.254.2/32", "bgp") # Wait for R4 <-> R3 convergence. - expect_loopback_route('r4', 'ip', '10.254.254.3/32', 'bgp') + expect_loopback_route("r4", "ip", "10.254.254.3/32", "bgp") def test_wait_bfd_convergence(): @@ -153,22 +153,22 @@ def test_wait_bfd_convergence(): def expect_bfd_configuration(router): "Load JSON file and compare with 'show bfd peer json'" - logger.info('waiting BFD configuration on router {}'.format(router)) - bfd_config = json.loads(open('{}/{}/bfd-peers.json'.format(CWD, router)).read()) + logger.info("waiting BFD configuration on router {}".format(router)) + bfd_config = json.loads(open("{}/{}/bfd-peers.json".format(CWD, router)).read()) test_func = partial( topotest.router_json_cmp, tgen.gears[router], - 'show bfd peers json', - bfd_config + "show bfd peers json", + bfd_config, ) _, result = topotest.run_and_expect(test_func, None, count=130, wait=1) assertmsg = '"{}" BFD configuration failure'.format(router) assert result is None, assertmsg - expect_bfd_configuration('r1') - expect_bfd_configuration('r2') - expect_bfd_configuration('r3') - expect_bfd_configuration('r4') + expect_bfd_configuration("r1") + expect_bfd_configuration("r2") + expect_bfd_configuration("r3") + expect_bfd_configuration("r4") def teardown_module(_mod): diff --git a/tests/topotests/bgp-auth/test_bgp_auth.py b/tests/topotests/bgp-auth/test_bgp_auth.py index 286af3bf65..559cf4fb1b 100644 --- a/tests/topotests/bgp-auth/test_bgp_auth.py +++ b/tests/topotests/bgp-auth/test_bgp_auth.py @@ -270,7 +270,7 @@ def peer_name(rtr, prefix, vrf): def print_diag(vrf): "print failure disagnostics" - + tgen = get_topogen() router_list = tgen.routers() for rname, router in router_list.items(): @@ -330,7 +330,7 @@ def clear_ospf(vrf=""): def check_neigh_state(router, peer, state, vrf=""): "check BGP neighbor state on a router" - + count = 0 matched = False neigh_output = "" diff --git a/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py b/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py index 41fa7c0a09..b3b7256ac4 100644 --- a/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py +++ b/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py @@ -76,7 +76,7 @@ from lib.common_config import ( create_prefix_lists, create_route_maps, verify_bgp_community, - required_linux_kernel_version + required_linux_kernel_version, ) from lib.topolog import logger from lib.bgp import ( @@ -139,7 +139,7 @@ def setup_module(mod): """ # Required linux kernel version for this suite to run. - result = required_linux_kernel_version('4.15') + result = required_linux_kernel_version("4.15") if result is not True: pytest.skip("Kernel requirements are not met") @@ -567,7 +567,7 @@ def test_BGP_attributes_with_vrf_default_keyword_p0(request): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - #reset_config_on_routers(tgen) + # reset_config_on_routers(tgen) step("Configure static routes and redistribute in BGP on R3") for addr_type in ADDR_TYPES: diff --git a/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py b/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py index eed118ebdc..12069a12dc 100644 --- a/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py +++ b/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py @@ -61,7 +61,7 @@ from lib.common_config import ( check_address_types, interface_status, reset_config_on_routers, - required_linux_kernel_version + required_linux_kernel_version, ) from lib.topolog import logger from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp @@ -110,7 +110,7 @@ def setup_module(mod): global ADDR_TYPES # Required linux kernel version for this suite to run. - result = required_linux_kernel_version('4.15') + result = required_linux_kernel_version("4.15") if result is not True: pytest.skip("Kernel requirements are not met") @@ -144,9 +144,7 @@ def setup_module(mod): ) link_data = [ - val - for links, val in topo["routers"]["r2"]["links"].items() - if "r3" in links + val for links, val in topo["routers"]["r2"]["links"].items() if "r3" in links ] for adt in ADDR_TYPES: NEXT_HOPS[adt] = [val[adt].split("/")[0] for val in link_data] @@ -161,9 +159,7 @@ def setup_module(mod): INTF_LIST_R2 = sorted(INTF_LIST_R2, key=lambda x: int(x.split("eth")[1])) link_data = [ - val - for links, val in topo["routers"]["r3"]["links"].items() - if "r2" in links + val for links, val in topo["routers"]["r3"]["links"].items() if "r2" in links ] INTF_LIST_R3 = [val["interface"].split("/")[0] for val in link_data] INTF_LIST_R3 = sorted(INTF_LIST_R3, key=lambda x: int(x.split("eth")[1])) diff --git a/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py b/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py index 7357c33824..50aa281d34 100644 --- a/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py +++ b/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py @@ -61,7 +61,7 @@ from lib.common_config import ( check_address_types, interface_status, reset_config_on_routers, - required_linux_kernel_version + required_linux_kernel_version, ) from lib.topolog import logger from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp @@ -110,7 +110,7 @@ def setup_module(mod): global ADDR_TYPES # Required linux kernel version for this suite to run. - result = required_linux_kernel_version('4.15') + result = required_linux_kernel_version("4.15") if result is not True: pytest.skip("Kernel requirements are not met") @@ -145,9 +145,7 @@ def setup_module(mod): ) link_data = [ - val - for links, val in topo["routers"]["r2"]["links"].items() - if "r3" in links + val for links, val in topo["routers"]["r2"]["links"].items() if "r3" in links ] for adt in ADDR_TYPES: NEXT_HOPS[adt] = [val[adt].split("/")[0] for val in link_data] @@ -162,9 +160,7 @@ def setup_module(mod): INTF_LIST_R2 = sorted(INTF_LIST_R2, key=lambda x: int(x.split("eth")[1])) link_data = [ - val - for links, val in topo["routers"]["r3"]["links"].items() - if "r2" in links + val for links, val in topo["routers"]["r3"]["links"].items() if "r2" in links ] INTF_LIST_R3 = [val["interface"].split("/")[0] for val in link_data] INTF_LIST_R3 = sorted(INTF_LIST_R3, key=lambda x: int(x.split("eth")[1])) diff --git a/tests/topotests/bgp-evpn-mh/test_evpn_mh.py b/tests/topotests/bgp-evpn-mh/test_evpn_mh.py index 9af22c06bd..8a8a49a23e 100644 --- a/tests/topotests/bgp-evpn-mh/test_evpn_mh.py +++ b/tests/topotests/bgp-evpn-mh/test_evpn_mh.py @@ -57,13 +57,13 @@ from mininet.topo import Topo class NetworkTopo(Topo): - ''' + """ EVPN Multihoming Topology - 1. Two level CLOS 2. Two spine switches - spine1, spine2 3. Two racks with Top-of-Rack switches per rack - tormx1, tormx2 4. Two dual attached hosts per-rack - hostdx1, hostdx2 - ''' + """ def build(self, **_opts): "Build function" @@ -84,7 +84,6 @@ class NetworkTopo(Topo): # On main router # First switch is for a dummy interface (for local network) - ##################### spine1 ######################## # spine1-eth0 is connected to torm11-eth0 switch = tgen.add_switch("sw1") @@ -178,38 +177,44 @@ class NetworkTopo(Topo): ## ##################################################### -tor_ips = {"torm11" : "192.168.100.15", \ - "torm12" : "192.168.100.16", \ - "torm21" : "192.168.100.17", \ - "torm22" : "192.168.100.18"} +tor_ips = { + "torm11": "192.168.100.15", + "torm12": "192.168.100.16", + "torm21": "192.168.100.17", + "torm22": "192.168.100.18", +} -svi_ips = {"torm11" : "45.0.0.2", \ - "torm12" : "45.0.0.3", \ - "torm21" : "45.0.0.4", \ - "torm22" : "45.0.0.5"} +svi_ips = { + "torm11": "45.0.0.2", + "torm12": "45.0.0.3", + "torm21": "45.0.0.4", + "torm22": "45.0.0.5", +} -tor_ips_rack_1 = {"torm11" : "192.168.100.15", \ - "torm12" : "192.168.100.16"} +tor_ips_rack_1 = {"torm11": "192.168.100.15", "torm12": "192.168.100.16"} -tor_ips_rack_2 = {"torm21" : "192.168.100.17", \ - "torm22" : "192.168.100.18"} +tor_ips_rack_2 = {"torm21": "192.168.100.17", "torm22": "192.168.100.18"} + +host_es_map = { + "hostd11": "03:44:38:39:ff:ff:01:00:00:01", + "hostd12": "03:44:38:39:ff:ff:01:00:00:02", + "hostd21": "03:44:38:39:ff:ff:02:00:00:01", + "hostd22": "03:44:38:39:ff:ff:02:00:00:02", +} -host_es_map = {"hostd11" : "03:44:38:39:ff:ff:01:00:00:01", - "hostd12" : "03:44:38:39:ff:ff:01:00:00:02", - "hostd21" : "03:44:38:39:ff:ff:02:00:00:01", - "hostd22" : "03:44:38:39:ff:ff:02:00:00:02"} def config_bond(node, bond_name, bond_members, bond_ad_sys_mac, br): - ''' + """ Used to setup bonds on the TORs and hosts for MH - ''' + """ node.run("ip link add dev %s type bond mode 802.3ad" % bond_name) node.run("ip link set dev %s type bond lacp_rate 1" % bond_name) node.run("ip link set dev %s type bond miimon 100" % bond_name) node.run("ip link set dev %s type bond xmit_hash_policy layer3+4" % bond_name) node.run("ip link set dev %s type bond min_links 1" % bond_name) - node.run("ip link set dev %s type bond ad_actor_system %s" %\ - (bond_name, bond_ad_sys_mac)) + node.run( + "ip link set dev %s type bond ad_actor_system %s" % (bond_name, bond_ad_sys_mac) + ) for bond_member in bond_members: node.run("ip link set dev %s down" % bond_member) @@ -225,15 +230,14 @@ def config_bond(node, bond_name, bond_members, bond_ad_sys_mac, br): node.run("/sbin/bridge vlan del vid 1 dev %s" % bond_name) node.run("/sbin/bridge vlan del vid 1 untagged pvid dev %s" % bond_name) node.run("/sbin/bridge vlan add vid 1000 dev %s" % bond_name) - node.run("/sbin/bridge vlan add vid 1000 untagged pvid dev %s"\ - % bond_name) + node.run("/sbin/bridge vlan add vid 1000 untagged pvid dev %s" % bond_name) def config_mcast_tunnel_termination_device(node): - ''' + """ The kernel requires a device to terminate VxLAN multicast tunnels when EVPN-PIM is used for flooded traffic - ''' + """ node.run("ip link add dev ipmr-lo type dummy") node.run("ip link set dev ipmr-lo mtu 16000") node.run("ip link set dev ipmr-lo mode dormant") @@ -241,9 +245,9 @@ def config_mcast_tunnel_termination_device(node): def config_bridge(node): - ''' + """ Create a VLAN aware bridge - ''' + """ node.run("ip link add dev bridge type bridge stp_state 0") node.run("ip link set dev bridge type bridge vlan_filtering 1") node.run("ip link set dev bridge mtu 9216") @@ -255,10 +259,10 @@ def config_bridge(node): def config_vxlan(node, node_ip): - ''' + """ Create a VxLAN device for VNI 1000 and add it to the bridge. VLAN-1000 is mapped to VNI-1000. - ''' + """ node.run("ip link add dev vx-1000 type vxlan id 1000 dstport 4789") node.run("ip link set dev vx-1000 type vxlan nolearning") node.run("ip link set dev vx-1000 type vxlan local %s" % node_ip) @@ -279,9 +283,9 @@ def config_vxlan(node, node_ip): def config_svi(node, svi_pip): - ''' + """ Create an SVI for VLAN 1000 - ''' + """ node.run("ip link add link bridge name vlan1000 type vlan id 1000 protocol 802.1q") node.run("ip addr add %s/24 dev vlan1000" % svi_pip) node.run("ip link set dev vlan1000 up") @@ -297,9 +301,9 @@ def config_svi(node, svi_pip): def config_tor(tor_name, tor, tor_ip, svi_pip): - ''' + """ Create the bond/vxlan-bridge on the TOR which acts as VTEP and EPN-PE - ''' + """ # create a device for terminating VxLAN multicast tunnels config_mcast_tunnel_termination_device(tor) @@ -329,17 +333,19 @@ def config_tors(tgen, tors): tor = tgen.gears[tor_name] config_tor(tor_name, tor, tor_ips.get(tor_name), svi_ips.get(tor_name)) + def compute_host_ip_mac(host_name): host_id = host_name.split("hostd")[1] - host_ip = "45.0.0."+ host_id + "/24" + host_ip = "45.0.0." + host_id + "/24" host_mac = "00:00:00:00:00:" + host_id return host_ip, host_mac + def config_host(host_name, host): - ''' + """ Create the dual-attached bond on host nodes for MH - ''' + """ bond_members = [] bond_members.append(host_name + "-eth0") bond_members.append(host_name + "-eth1") @@ -407,9 +413,9 @@ def teardown_module(_mod): def check_local_es(esi, vtep_ips, dut_name, down_vteps): - ''' + """ Check if ES peers are setup correctly on local ESs - ''' + """ peer_ips = [] if "torm1" in dut_name: tor_ips_rack = tor_ips_rack_1 @@ -432,9 +438,9 @@ def check_local_es(esi, vtep_ips, dut_name, down_vteps): def check_remote_es(esi, vtep_ips, dut_name, down_vteps): - ''' + """ Verify list of PEs associated with a remote ES - ''' + """ remote_ips = [] if "torm1" in dut_name: @@ -455,10 +461,11 @@ def check_remote_es(esi, vtep_ips, dut_name, down_vteps): return (esi, diff) if diff else None + def check_es(dut): - ''' + """ Verify list of PEs associated all ESs, local and remote - ''' + """ bgp_es = dut.vtysh_cmd("show bgp l2vp evpn es json") bgp_es_json = json.loads(bgp_es) @@ -490,10 +497,11 @@ def check_es(dut): return result if result else None + def check_one_es(dut, esi, down_vteps): - ''' + """ Verify list of PEs associated all ESs, local and remote - ''' + """ bgp_es = dut.vtysh_cmd("show bgp l2vp evpn es %s json" % esi) es = json.loads(bgp_es) @@ -513,12 +521,13 @@ def check_one_es(dut, esi, down_vteps): return result + def test_evpn_es(): - ''' + """ Two ES are setup on each rack. This test checks if - 1. ES peer has been added to the local ES (via Type-1/EAD route) 2. The remote ESs are setup with the right list of PEs (via Type-1) - ''' + """ tgen = get_topogen() @@ -534,11 +543,12 @@ def test_evpn_es(): assert result is None, assertmsg # tgen.mininet_cli() + def test_evpn_ead_update(): - ''' + """ Flap a host link one the remote rack and check if the EAD updates are sent/processed for the corresponding ESI - ''' + """ tgen = get_topogen() if tgen.routers_have_failure(): @@ -580,30 +590,32 @@ def test_evpn_ead_update(): # tgen.mininet_cli() + def check_mac(dut, vni, mac, m_type, esi, intf): - ''' + """ checks if mac is present and if desination matches the one provided - ''' + """ out = dut.vtysh_cmd("show evpn mac vni %d mac %s json" % (vni, mac)) mac_js = json.loads(out) for mac, info in mac_js.items(): tmp_esi = info.get("esi", "") - tmp_m_type = info.get("type", "") + tmp_m_type = info.get("type", "") tmp_intf = info.get("intf", "") if tmp_m_type == "local" else "" if tmp_esi == esi and tmp_m_type == m_type and intf == intf: return None return "invalid vni %d mac %s out %s" % (vni, mac, mac_js) + def test_evpn_mac(): - ''' + """ 1. Add a MAC on hostd11 and check if the MAC is synced between torm11 and torm12. And installed as a local MAC. 2. Add a MAC on hostd21 and check if the MAC is installed as a remote MAC on torm11 and torm12 - ''' + """ tgen = get_topogen() @@ -646,6 +658,7 @@ def test_evpn_mac(): assertmsg = '"{}" remote MAC content incorrect'.format(tor.name) assert result is None, assertmsg + if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp-vrf-route-leak-basic/test_bgp-vrf-route-leak-basic.py b/tests/topotests/bgp-vrf-route-leak-basic/test_bgp-vrf-route-leak-basic.py index 5aa1bdf329..36f1d8cd56 100644 --- a/tests/topotests/bgp-vrf-route-leak-basic/test_bgp-vrf-route-leak-basic.py +++ b/tests/topotests/bgp-vrf-route-leak-basic/test_bgp-vrf-route-leak-basic.py @@ -90,84 +90,36 @@ def test_vrf_route_leak(): # Test DONNA VRF. expect = { - '10.0.0.0/24': [ - { - 'protocol': 'connected', - } + "10.0.0.0/24": [{"protocol": "connected",}], + "10.0.1.0/24": [ + {"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]} ], - '10.0.1.0/24': [ - { - 'protocol': 'bgp', - 'selected': True, - 'nexthops': [ - { - 'fib': True - } - ] - } + "10.0.2.0/24": [{"protocol": "connected"}], + "10.0.3.0/24": [ + {"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]} ], - '10.0.2.0/24': [ - { - 'protocol': 'connected' - } - ], - '10.0.3.0/24': [ - { - 'protocol': 'bgp', - 'selected': True, - 'nexthops': [ - { - 'fib': True - } - ] - } - ] } test_func = partial( - topotest.router_json_cmp, r1, 'show ip route vrf DONNA json', expect + topotest.router_json_cmp, r1, "show ip route vrf DONNA json", expect ) result, diff = topotest.run_and_expect(test_func, None, count=10, wait=0.5) assert result, "BGP VRF DONNA check failed:\n{}".format(diff) # Test EVA VRF. expect = { - '10.0.0.0/24': [ - { - 'protocol': 'bgp', - 'selected': True, - 'nexthops': [ - { - 'fib': True - } - ] - } + "10.0.0.0/24": [ + {"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]} ], - '10.0.1.0/24': [ - { - 'protocol': 'connected', - } + "10.0.1.0/24": [{"protocol": "connected",}], + "10.0.2.0/24": [ + {"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]} ], - '10.0.2.0/24': [ - { - 'protocol': 'bgp', - 'selected': True, - 'nexthops': [ - { - 'fib': True - } - ] - } - ], - '10.0.3.0/24': [ - { - 'protocol': 'connected', - } - ] + "10.0.3.0/24": [{"protocol": "connected",}], } test_func = partial( - topotest.router_json_cmp, r1, 'show ip route vrf EVA json', expect + topotest.router_json_cmp, r1, "show ip route vrf EVA json", expect ) result, diff = topotest.run_and_expect(test_func, None, count=10, wait=0.5) assert result, "BGP VRF EVA check failed:\n{}".format(diff) diff --git a/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py b/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py index 25760501b4..d3656b8701 100644 --- a/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py +++ b/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py @@ -47,16 +47,17 @@ class BgpAggregateAddressTopo1(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) - r1 = tgen.add_router('r1') - r2 = tgen.add_router('r2') - peer1 = tgen.add_exabgp_peer('peer1', ip='10.0.0.2', - defaultRoute='via 10.0.0.1') + r1 = tgen.add_router("r1") + r2 = tgen.add_router("r2") + peer1 = tgen.add_exabgp_peer( + "peer1", ip="10.0.0.2", defaultRoute="via 10.0.0.1" + ) - switch = tgen.add_switch('s1') + switch = tgen.add_switch("s1") switch.add_link(r1) switch.add_link(peer1) - switch = tgen.add_switch('s2') + switch = tgen.add_switch("s2") switch.add_link(r1) switch.add_link(r2) @@ -65,17 +66,17 @@ def setup_module(mod): tgen = Topogen(BgpAggregateAddressTopo1, mod.__name__) tgen.start_topology() - router = tgen.gears['r1'] + router = tgen.gears["r1"] router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, "r1/zebra.conf")) router.load_config(TopoRouter.RD_BGP, os.path.join(CWD, "r1/bgpd.conf")) router.start() - router = tgen.gears['r2'] + router = tgen.gears["r2"] router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, "r2/zebra.conf")) router.load_config(TopoRouter.RD_BGP, os.path.join(CWD, "r2/bgpd.conf")) router.start() - peer = tgen.gears['peer1'] + peer = tgen.gears["peer1"] peer.start(os.path.join(CWD, "peer1"), os.path.join(CWD, "exabgp.env")) @@ -92,21 +93,22 @@ def test_expect_convergence(): pytest.skip(tgen.errors) logger.info("waiting for protocols to converge") + def expect_loopback_route(router, iptype, route, proto): "Wait until route is present on RIB for protocol." - logger.info('waiting route {} in {}'.format(route, router)) + logger.info("waiting route {} in {}".format(route, router)) test_func = functools.partial( topotest.router_json_cmp, tgen.gears[router], - 'show {} route json'.format(iptype), - { route: [{ 'protocol': proto }] } + "show {} route json".format(iptype), + {route: [{"protocol": proto}]}, ) _, result = topotest.run_and_expect(test_func, None, count=130, wait=1) assertmsg = '"{}" BGP convergence failure'.format(router) assert result is None, assertmsg - expect_loopback_route('r2', 'ip', '10.254.254.1/32', 'bgp') - expect_loopback_route('r2', 'ip', '10.254.254.3/32', 'bgp') + expect_loopback_route("r2", "ip", "10.254.254.1/32", "bgp") + expect_loopback_route("r2", "ip", "10.254.254.3/32", "bgp") def test_bgp_aggregate_address_matching_med_only(): @@ -122,19 +124,18 @@ def test_bgp_aggregate_address_matching_med_only(): "192.168.0.1/32": [{"protocol": "bgp", "metric": 10}], "192.168.0.2/32": [{"protocol": "bgp", "metric": 10}], "192.168.0.3/32": [{"protocol": "bgp", "metric": 10}], - # Non matching MED: aggregation must not exist. "192.168.1.0/24": None, "192.168.1.1/32": [{"protocol": "bgp", "metric": 10}], "192.168.1.2/32": [{"protocol": "bgp", "metric": 10}], - "192.168.1.3/32": [{"protocol": "bgp", "metric": 20}] + "192.168.1.3/32": [{"protocol": "bgp", "metric": 20}], } test_func = functools.partial( topotest.router_json_cmp, - tgen.gears['r2'], - 'show ip route json', - routes_expected + tgen.gears["r2"], + "show ip route json", + routes_expected, ) _, result = topotest.run_and_expect(test_func, None, count=20, wait=1) assertmsg = '"r2" BGP convergence failure' @@ -148,7 +149,8 @@ def test_bgp_aggregate_address_match_and_supress(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - tgen.gears['r1'].vtysh_multicmd(""" + tgen.gears["r1"].vtysh_multicmd( + """ configure terminal router bgp 65000 address-family ipv4 unicast @@ -156,7 +158,8 @@ no aggregate-address 192.168.0.0/24 matching-MED-only no aggregate-address 192.168.1.0/24 matching-MED-only aggregate-address 192.168.0.0/24 matching-MED-only summary-only aggregate-address 192.168.1.0/24 matching-MED-only summary-only -""") +""" + ) routes_expected = { # All MED matches, aggregation must exist. @@ -164,19 +167,18 @@ aggregate-address 192.168.1.0/24 matching-MED-only summary-only "192.168.0.1/32": None, "192.168.0.2/32": None, "192.168.0.3/32": None, - # Non matching MED: aggregation must not exist. "192.168.1.0/24": None, "192.168.1.1/32": [{"protocol": "bgp", "metric": 10}], "192.168.1.2/32": [{"protocol": "bgp", "metric": 10}], - "192.168.1.3/32": [{"protocol": "bgp", "metric": 20}] + "192.168.1.3/32": [{"protocol": "bgp", "metric": 20}], } test_func = functools.partial( topotest.router_json_cmp, - tgen.gears['r2'], - 'show ip route json', - routes_expected + tgen.gears["r2"], + "show ip route json", + routes_expected, ) _, result = topotest.run_and_expect(test_func, None, count=120, wait=1) assertmsg = '"r2" BGP convergence failure' diff --git a/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py b/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py index f9d22a3a36..544bda145c 100644 --- a/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py +++ b/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py @@ -65,7 +65,7 @@ from lib.common_config import ( create_route_maps, check_address_types, step, - required_linux_kernel_version + required_linux_kernel_version, ) from lib.topolog import logger from lib.bgp import ( @@ -114,7 +114,7 @@ def setup_module(mod): """ # Required linux kernel version for this suite to run. - result = required_linux_kernel_version('4.15') + result = required_linux_kernel_version("4.15") if result is not True: pytest.skip("Kernel requirements are not met") diff --git a/tests/topotests/bgp_communities_topo1/test_bgp_communities.py b/tests/topotests/bgp_communities_topo1/test_bgp_communities.py index 57e8e0d34a..f2e54b24d6 100644 --- a/tests/topotests/bgp_communities_topo1/test_bgp_communities.py +++ b/tests/topotests/bgp_communities_topo1/test_bgp_communities.py @@ -54,7 +54,7 @@ from lib.common_config import ( create_route_maps, create_prefix_lists, create_route_maps, - required_linux_kernel_version + required_linux_kernel_version, ) from lib.topolog import logger from lib.bgp import ( @@ -104,7 +104,7 @@ def setup_module(mod): """ # Required linux kernel version for this suite to run. - result = required_linux_kernel_version('4.15') + result = required_linux_kernel_version("4.15") if result is not True: pytest.skip("Kernel requirements are not met") diff --git a/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py b/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py index 2520763bda..003193f108 100644 --- a/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py +++ b/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py @@ -122,27 +122,39 @@ def test_ebgp_requires_policy(): test_func = functools.partial(_bgp_converge, "r2") success, result = topotest.run_and_expect(test_func, None, count=65, wait=2) - assert success is True, 'Failed bgp convergence (r2) in "{}"'.format(tgen.gears["r2"]) + assert success is True, 'Failed bgp convergence (r2) in "{}"'.format( + tgen.gears["r2"] + ) test_func = functools.partial(_bgp_has_routes, "r2") success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert success is True, 'eBGP policy is not working (r2) in "{}"'.format(tgen.gears["r2"]) + assert success is True, 'eBGP policy is not working (r2) in "{}"'.format( + tgen.gears["r2"] + ) test_func = functools.partial(_bgp_converge, "r4") success, result = topotest.run_and_expect(test_func, None, count=65, wait=2) - assert success is True, 'Failed bgp convergence (r4) in "{}"'.format(tgen.gears["r4"]) + assert success is True, 'Failed bgp convergence (r4) in "{}"'.format( + tgen.gears["r4"] + ) test_func = functools.partial(_bgp_has_routes, "r4") success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert success is False, 'eBGP policy is not working (r4) in "{}"'.format(tgen.gears["r4"]) + assert success is False, 'eBGP policy is not working (r4) in "{}"'.format( + tgen.gears["r4"] + ) test_func = functools.partial(_bgp_converge, "r6") success, result = topotest.run_and_expect(test_func, None, count=65, wait=2) - assert success is True, 'Failed bgp convergence (r6) in "{}"'.format(tgen.gears["r6"]) + assert success is True, 'Failed bgp convergence (r6) in "{}"'.format( + tgen.gears["r6"] + ) test_func = functools.partial(_bgp_has_routes, "r6") success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert success is True, 'eBGP policy is not working (r6) in "{}"'.format(tgen.gears["r6"]) + assert success is True, 'eBGP policy is not working (r6) in "{}"'.format( + tgen.gears["r6"] + ) if __name__ == "__main__": diff --git a/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py b/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py index 0d99f23ad9..222478f12d 100644 --- a/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py +++ b/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py @@ -35,7 +35,7 @@ import platform # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -47,27 +47,30 @@ from lib.topolog import logger from mininet.topo import Topo l3mdev_accept = 0 -krel = '' +krel = "" + class BGPEVPNTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) - tgen.add_router('r1') - tgen.add_router('r2') + tgen.add_router("r1") + tgen.add_router("r2") - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r1"]) + + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r2"]) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r1']) - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['r2']) - def setup_module(mod): "Sets up the pytest environment" global l3mdev_accept @@ -79,99 +82,109 @@ def setup_module(mod): router_list = tgen.routers() krel = platform.release() - if topotest.version_cmp(krel, '4.18') < 0: - logger.info('BGP EVPN RT5 NETNS tests will not run (have kernel "{}", but it requires 4.18)'.format(krel)) - return pytest.skip('Skipping BGP EVPN RT5 NETNS Test. Kernel not supported') + if topotest.version_cmp(krel, "4.18") < 0: + logger.info( + 'BGP EVPN RT5 NETNS tests will not run (have kernel "{}", but it requires 4.18)'.format( + krel + ) + ) + return pytest.skip("Skipping BGP EVPN RT5 NETNS Test. Kernel not supported") l3mdev_accept = 1 - logger.info('setting net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept)) + logger.info("setting net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept)) # create VRF vrf-101 on R1 and R2 # create loop101 - cmds_vrflite = ['sysctl -w net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept), - 'ip link add {}-vrf-101 type vrf table 101', - 'ip ru add oif {}-vrf-101 table 101', - 'ip ru add iif {}-vrf-101 table 101', - 'ip link set dev {}-vrf-101 up', - 'sysctl -w net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept), - 'ip link add loop101 type dummy', - 'ip link set dev loop101 master {}-vrf-101', - 'ip link set dev loop101 up'] - cmds_netns = ['ip netns add {}-vrf-101', - 'ip link add loop101 type dummy', - 'ip link set dev loop101 netns {}-vrf-101', - 'ip netns exec {}-vrf-101 ip link set dev loop101 up'] + cmds_vrflite = [ + "sysctl -w net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept), + "ip link add {}-vrf-101 type vrf table 101", + "ip ru add oif {}-vrf-101 table 101", + "ip ru add iif {}-vrf-101 table 101", + "ip link set dev {}-vrf-101 up", + "sysctl -w net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept), + "ip link add loop101 type dummy", + "ip link set dev loop101 master {}-vrf-101", + "ip link set dev loop101 up", + ] + cmds_netns = [ + "ip netns add {}-vrf-101", + "ip link add loop101 type dummy", + "ip link set dev loop101 netns {}-vrf-101", + "ip netns exec {}-vrf-101 ip link set dev loop101 up", + ] - cmds_r2 = [ # config routing 101 - 'ip link add name bridge-101 up type bridge stp_state 0', - 'ip link set bridge-101 master {}-vrf-101', - 'ip link set dev bridge-101 up', - 'ip link add name vxlan-101 type vxlan id 101 dstport 4789 dev r2-eth0 local 192.168.100.41', - 'ip link set dev vxlan-101 master bridge-101', - 'ip link set vxlan-101 up type bridge_slave learning off flood off mcast_flood off'] + cmds_r2 = [ # config routing 101 + "ip link add name bridge-101 up type bridge stp_state 0", + "ip link set bridge-101 master {}-vrf-101", + "ip link set dev bridge-101 up", + "ip link add name vxlan-101 type vxlan id 101 dstport 4789 dev r2-eth0 local 192.168.100.41", + "ip link set dev vxlan-101 master bridge-101", + "ip link set vxlan-101 up type bridge_slave learning off flood off mcast_flood off", + ] - cmds_r1_netns_method3 = ['ip link add name vxlan-{1} type vxlan id {1} dstport 4789 dev {0}-eth0 local 192.168.100.21', - 'ip link set dev vxlan-{1} netns {0}-vrf-{1}', - 'ip netns exec {0}-vrf-{1} ip li set dev lo up', - 'ip netns exec {0}-vrf-{1} ip link add name bridge-{1} up type bridge stp_state 0', - 'ip netns exec {0}-vrf-{1} ip link set dev vxlan-{1} master bridge-{1}', - 'ip netns exec {0}-vrf-{1} ip link set bridge-{1} up', - 'ip netns exec {0}-vrf-{1} ip link set vxlan-{1} up'] + cmds_r1_netns_method3 = [ + "ip link add name vxlan-{1} type vxlan id {1} dstport 4789 dev {0}-eth0 local 192.168.100.21", + "ip link set dev vxlan-{1} netns {0}-vrf-{1}", + "ip netns exec {0}-vrf-{1} ip li set dev lo up", + "ip netns exec {0}-vrf-{1} ip link add name bridge-{1} up type bridge stp_state 0", + "ip netns exec {0}-vrf-{1} ip link set dev vxlan-{1} master bridge-{1}", + "ip netns exec {0}-vrf-{1} ip link set bridge-{1} up", + "ip netns exec {0}-vrf-{1} ip link set vxlan-{1} up", + ] - router = tgen.gears['r1'] + router = tgen.gears["r1"] for cmd in cmds_netns: - logger.info('cmd to r1: '+cmd); - output = router.run(cmd.format('r1')) - logger.info('result: '+output); + logger.info("cmd to r1: " + cmd) + output = router.run(cmd.format("r1")) + logger.info("result: " + output) - router = tgen.gears['r2'] + router = tgen.gears["r2"] for cmd in cmds_vrflite: - logger.info('cmd to r2: '+cmd.format('r2')); - output = router.run(cmd.format('r2')) - logger.info('result: '+output); + logger.info("cmd to r2: " + cmd.format("r2")) + output = router.run(cmd.format("r2")) + logger.info("result: " + output) for cmd in cmds_r2: - logger.info('cmd to r2: '+cmd.format('r2')); - output = router.run(cmd.format('r2')) - logger.info('result: '+output); + logger.info("cmd to r2: " + cmd.format("r2")) + output = router.run(cmd.format("r2")) + logger.info("result: " + output) - router = tgen.gears['r1'] - bridge_id = '101' + router = tgen.gears["r1"] + bridge_id = "101" for cmd in cmds_r1_netns_method3: - logger.info('cmd to r1: '+cmd.format('r1', bridge_id)); - output = router.run(cmd.format('r1', bridge_id)) - logger.info('result: '+output); - router = tgen.gears['r1'] + logger.info("cmd to r1: " + cmd.format("r1", bridge_id)) + output = router.run(cmd.format("r1", bridge_id)) + logger.info("result: " + output) + router = tgen.gears["r1"] for rname, router in router_list.items(): - if rname == 'r1': + if rname == "r1": router.load_config( TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)), - '--vrfwnetns -o vrf0' + os.path.join(CWD, "{}/zebra.conf".format(rname)), + "--vrfwnetns -o vrf0", ) else: router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) # Initialize all routers. tgen.start_router() + def teardown_module(_mod): "Teardown the pytest environment" tgen = get_topogen() - cmds_rx_netns = ['ip netns del {}-vrf-101'] - - router = tgen.gears['r1'] + cmds_rx_netns = ["ip netns del {}-vrf-101"] + + router = tgen.gears["r1"] for cmd in cmds_rx_netns: - logger.info('cmd to r1: '+cmd.format('r1')); - output = router.run(cmd.format('r1')) + logger.info("cmd to r1: " + cmd.format("r1")) + output = router.run(cmd.format("r1")) tgen.stop_topology() @@ -183,52 +196,59 @@ def test_protocols_convergence(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - topotest.sleep(4, 'waiting 4 seconds for bgp convergence') + topotest.sleep(4, "waiting 4 seconds for bgp convergence") # Check IPv4/IPv6 routing tables. - output = tgen.gears['r1'].vtysh_cmd('show bgp l2vpn evpn', isjson=False) - logger.info('==== result from show bgp l2vpn evpn') + output = tgen.gears["r1"].vtysh_cmd("show bgp l2vpn evpn", isjson=False) + logger.info("==== result from show bgp l2vpn evpn") logger.info(output) - output = tgen.gears['r1'].vtysh_cmd('show bgp l2vpn evpn route detail', isjson=False) - logger.info('==== result from show bgp l2vpn evpn route detail') + output = tgen.gears["r1"].vtysh_cmd( + "show bgp l2vpn evpn route detail", isjson=False + ) + logger.info("==== result from show bgp l2vpn evpn route detail") logger.info(output) - output = tgen.gears['r1'].vtysh_cmd('show bgp vrf r1-vrf-101 ipv4', isjson=False) - logger.info('==== result from show bgp vrf r1-vrf-101 ipv4') + output = tgen.gears["r1"].vtysh_cmd("show bgp vrf r1-vrf-101 ipv4", isjson=False) + logger.info("==== result from show bgp vrf r1-vrf-101 ipv4") logger.info(output) - output = tgen.gears['r1'].vtysh_cmd('show bgp vrf r1-vrf-101', isjson=False) - logger.info('==== result from show bgp vrf r1-vrf-101 ') + output = tgen.gears["r1"].vtysh_cmd("show bgp vrf r1-vrf-101", isjson=False) + logger.info("==== result from show bgp vrf r1-vrf-101 ") logger.info(output) - output = tgen.gears['r1'].vtysh_cmd('show ip route vrf r1-vrf-101', isjson=False) - logger.info('==== result from show ip route vrf r1-vrf-101') + output = tgen.gears["r1"].vtysh_cmd("show ip route vrf r1-vrf-101", isjson=False) + logger.info("==== result from show ip route vrf r1-vrf-101") logger.info(output) - output = tgen.gears['r1'].vtysh_cmd('show evpn vni detail', isjson=False) - logger.info('==== result from show evpn vni detail') + output = tgen.gears["r1"].vtysh_cmd("show evpn vni detail", isjson=False) + logger.info("==== result from show evpn vni detail") logger.info(output) - output = tgen.gears['r1'].vtysh_cmd('show evpn next-hops vni all', isjson=False) - logger.info('==== result from show evpn next-hops vni all') + output = tgen.gears["r1"].vtysh_cmd("show evpn next-hops vni all", isjson=False) + logger.info("==== result from show evpn next-hops vni all") logger.info(output) - output = tgen.gears['r1'].vtysh_cmd('show evpn rmac vni all', isjson=False) - logger.info('==== result from show evpn next-hops vni all') + output = tgen.gears["r1"].vtysh_cmd("show evpn rmac vni all", isjson=False) + logger.info("==== result from show evpn next-hops vni all") logger.info(output) # Check IPv4 and IPv6 connectivity between r1 and r2 ( routing vxlan evpn) - pingrouter = tgen.gears['r1'] - logger.info('Check Ping IPv4 from R1(r1-vrf-101) to R2(r2-vrf-101 = 192.168.101.41)') - output = pingrouter.run('ip netns exec r1-vrf-101 ping 192.168.101.41 -f -c 1000') + pingrouter = tgen.gears["r1"] + logger.info( + "Check Ping IPv4 from R1(r1-vrf-101) to R2(r2-vrf-101 = 192.168.101.41)" + ) + output = pingrouter.run("ip netns exec r1-vrf-101 ping 192.168.101.41 -f -c 1000") logger.info(output) - if '1000 packets transmitted, 1000 received' not in output: - assertmsg = 'expected ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) should be ok' + if "1000 packets transmitted, 1000 received" not in output: + assertmsg = ( + "expected ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) should be ok" + ) assert 0, assertmsg else: - logger.info('Check Ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) OK') + logger.info("Check Ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) OK") + def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_features/test_bgp_features.py b/tests/topotests/bgp_features/test_bgp_features.py index 4ec060b642..bd092c4340 100644 --- a/tests/topotests/bgp_features/test_bgp_features.py +++ b/tests/topotests/bgp_features/test_bgp_features.py @@ -188,11 +188,15 @@ def test_bgp_shutdown(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - tgen.net['r1'].cmd('vtysh -c \"conf t\" -c \"router bgp 65000\" -c \"bgp shutdown message ABCDabcd\"') + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "bgp shutdown message ABCDabcd"' + ) # Check BGP Summary on local and remote routers for rtrNum in [1, 2, 4]: - logger.info("Checking BGP Summary after shutdown of R1 BGP on router r{}".format(rtrNum)) + logger.info( + "Checking BGP Summary after shutdown of R1 BGP on router r{}".format(rtrNum) + ) router = tgen.gears["r{}".format(rtrNum)] reffile = os.path.join(CWD, "r{}/bgp_shutdown_summary.json".format(rtrNum)) @@ -202,7 +206,9 @@ def test_bgp_shutdown(): topotest.router_json_cmp, router, "show ip bgp summary json", expected ) _, res = topotest.run_and_expect(test_func, None, count=60, wait=2) - assertmsg = "BGP sessions on router R{} are in incorrect state (not down as expected?)".format(rtrNum) + assertmsg = "BGP sessions on router R{} are in incorrect state (not down as expected?)".format( + rtrNum + ) assert res is None, assertmsg @@ -218,18 +224,21 @@ def test_bgp_shutdown_message(): for rtrNum in [2, 4]: logger.info("Checking BGP shutdown received on router r{}".format(rtrNum)) - shut_message = tgen.net['r{}'.format(rtrNum)].cmd( - 'tail bgpd.log | grep "NOTIFICATION.*Cease/Administratively Shutdown"') + shut_message = tgen.net["r{}".format(rtrNum)].cmd( + 'tail bgpd.log | grep "NOTIFICATION.*Cease/Administratively Shutdown"' + ) assertmsg = "BGP shutdown message not received on router R{}".format(rtrNum) - assert shut_message != '', assertmsg + assert shut_message != "", assertmsg - m = re.search('.*([0-9]+ bytes[ 0-9a-fA-F]+)', shut_message) + m = re.search(".*([0-9]+ bytes[ 0-9a-fA-F]+)", shut_message) if m: found = m.group(1) else: - found = '' - assertmsg = "Incorrect BGP shutdown message received on router R{}".format(rtrNum) - assert found == '8 bytes 41 42 43 44 61 62 63 64', assertmsg + found = "" + assertmsg = "Incorrect BGP shutdown message received on router R{}".format( + rtrNum + ) + assert found == "8 bytes 41 42 43 44 61 62 63 64", assertmsg # tgen.mininet_cli() @@ -243,11 +252,15 @@ def test_bgp_no_shutdown(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - tgen.net['r1'].cmd('vtysh -c \"conf t\" -c \"router bgp 65000\" -c \"no bgp shutdown\"') + tgen.net["r1"].cmd('vtysh -c "conf t" -c "router bgp 65000" -c "no bgp shutdown"') # Check BGP Summary on local and remote routers for rtrNum in [1, 2, 4]: - logger.info("Checking BGP Summary after removing bgp shutdown on router r1 on router r{}".format(rtrNum)) + logger.info( + "Checking BGP Summary after removing bgp shutdown on router r1 on router r{}".format( + rtrNum + ) + ) router = tgen.gears["r{}".format(rtrNum)] reffile = os.path.join(CWD, "r{}/bgp_summary.json".format(rtrNum)) @@ -257,7 +270,9 @@ def test_bgp_no_shutdown(): topotest.router_json_cmp, router, "show ip bgp summary json", expected ) _, res = topotest.run_and_expect(test_func, None, count=60, wait=2) - assertmsg = "BGP sessions on router R{} are in incorrect state (not down as expected?)".format(rtrNum) + assertmsg = "BGP sessions on router R{} are in incorrect state (not down as expected?)".format( + rtrNum + ) assert res is None, assertmsg @@ -303,31 +318,43 @@ def test_bgp_metric_config(): # set metric +12 # ! - tgen.net['r1'].cmd('vtysh -c "conf t" -c "router bgp 65000" '+ - '-c "address-family ipv4 unicast" '+ - '-c "neighbor 192.168.0.2 route-map addmetric-in in" '+ - '-c "neighbor 192.168.0.2 route-map addmetric-out out" '+ - '-c "neighbor 192.168.101.2 route-map setmetric-in in" '+ - '-c "neighbor 192.168.101.2 route-map setmetric-out out" ') - tgen.net['r1'].cmd('vtysh -c "conf t" '+ - '-c "ip prefix-list net1 seq 10 permit 192.168.101.0/24" '+ - '-c "ip prefix-list net2 seq 20 permit 192.168.1.0/24"') - tgen.net['r1'].cmd('vtysh -c "conf t" '+ - '-c "route-map setmetric-in permit 10" '+ - '-c "match ip address prefix-list net1" '+ - '-c "set metric 111" '+ - '-c "route-map setmetric-in permit 20"') - tgen.net['r1'].cmd('vtysh -c "conf t" '+ - '-c "route-map setmetric-out permit 10" '+ - '-c "match ip address prefix-list net2" '+ - '-c "set metric 1011" '+ - '-c "route-map setmetric-out permit 20"') - tgen.net['r1'].cmd('vtysh -c "conf t" '+ - '-c "route-map addmetric-in permit 10" '+ - '-c "set metric +11"') - tgen.net['r1'].cmd('vtysh -c "conf t" '+ - '-c "route-map addmetric-out permit 10" '+ - '-c "set metric +12"') + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" ' + + '-c "address-family ipv4 unicast" ' + + '-c "neighbor 192.168.0.2 route-map addmetric-in in" ' + + '-c "neighbor 192.168.0.2 route-map addmetric-out out" ' + + '-c "neighbor 192.168.101.2 route-map setmetric-in in" ' + + '-c "neighbor 192.168.101.2 route-map setmetric-out out" ' + ) + tgen.net["r1"].cmd( + 'vtysh -c "conf t" ' + + '-c "ip prefix-list net1 seq 10 permit 192.168.101.0/24" ' + + '-c "ip prefix-list net2 seq 20 permit 192.168.1.0/24"' + ) + tgen.net["r1"].cmd( + 'vtysh -c "conf t" ' + + '-c "route-map setmetric-in permit 10" ' + + '-c "match ip address prefix-list net1" ' + + '-c "set metric 111" ' + + '-c "route-map setmetric-in permit 20"' + ) + tgen.net["r1"].cmd( + 'vtysh -c "conf t" ' + + '-c "route-map setmetric-out permit 10" ' + + '-c "match ip address prefix-list net2" ' + + '-c "set metric 1011" ' + + '-c "route-map setmetric-out permit 20"' + ) + tgen.net["r1"].cmd( + 'vtysh -c "conf t" ' + + '-c "route-map addmetric-in permit 10" ' + + '-c "set metric +11"' + ) + tgen.net["r1"].cmd( + 'vtysh -c "conf t" ' + + '-c "route-map addmetric-out permit 10" ' + + '-c "set metric +12"' + ) # # Adding the following configuration to r2: # router bgp 65000 @@ -360,50 +387,72 @@ def test_bgp_metric_config(): # set metric -23 # ! - tgen.net['r2'].cmd('vtysh -c "conf t" -c "router bgp 65000" '+ - '-c "address-family ipv4 unicast" '+ - '-c "neighbor 192.168.0.1 route-map subtractmetric-in in" '+ - '-c "neighbor 192.168.0.1 route-map subtractmetric-out out" '+ - '-c "neighbor 192.168.201.2 route-map setmetric-in in" ' + - '-c "neighbor 192.168.201.2 route-map setmetric-out out" ') - tgen.net['r2'].cmd('vtysh -c "conf t" '+ - '-c "ip prefix-list net1 seq 10 permit 192.168.201.0/24" '+ - '-c "ip prefix-list net2 seq 20 permit 192.168.2.0/24" ') - tgen.net['r2'].cmd('vtysh -c "conf t" '+ - '-c "route-map setmetric-in permit 10" '+ - '-c "match ip address prefix-list net1" '+ - '-c "set metric 222" '+ - '-c "route-map setmetric-in permit 20"') - tgen.net['r2'].cmd('vtysh -c "conf t" '+ - '-c "route-map setmetric-out permit 10" '+ - '-c "match ip address prefix-list net2" '+ - '-c "set metric 2022" '+ - '-c "route-map setmetric-out permit 20"') - tgen.net['r2'].cmd('vtysh -c "conf t" '+ - '-c "route-map subtractmetric-in permit 10" '+ - '-c "set metric -22"') - tgen.net['r2'].cmd('vtysh -c "conf t" '+ - '-c "route-map subtractmetric-out permit 10" '+ - '-c "set metric -23"') + tgen.net["r2"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" ' + + '-c "address-family ipv4 unicast" ' + + '-c "neighbor 192.168.0.1 route-map subtractmetric-in in" ' + + '-c "neighbor 192.168.0.1 route-map subtractmetric-out out" ' + + '-c "neighbor 192.168.201.2 route-map setmetric-in in" ' + + '-c "neighbor 192.168.201.2 route-map setmetric-out out" ' + ) + tgen.net["r2"].cmd( + 'vtysh -c "conf t" ' + + '-c "ip prefix-list net1 seq 10 permit 192.168.201.0/24" ' + + '-c "ip prefix-list net2 seq 20 permit 192.168.2.0/24" ' + ) + tgen.net["r2"].cmd( + 'vtysh -c "conf t" ' + + '-c "route-map setmetric-in permit 10" ' + + '-c "match ip address prefix-list net1" ' + + '-c "set metric 222" ' + + '-c "route-map setmetric-in permit 20"' + ) + tgen.net["r2"].cmd( + 'vtysh -c "conf t" ' + + '-c "route-map setmetric-out permit 10" ' + + '-c "match ip address prefix-list net2" ' + + '-c "set metric 2022" ' + + '-c "route-map setmetric-out permit 20"' + ) + tgen.net["r2"].cmd( + 'vtysh -c "conf t" ' + + '-c "route-map subtractmetric-in permit 10" ' + + '-c "set metric -22"' + ) + tgen.net["r2"].cmd( + 'vtysh -c "conf t" ' + + '-c "route-map subtractmetric-out permit 10" ' + + '-c "set metric -23"' + ) # Clear IN the bgp neighbors to make sure the route-maps are applied - tgen.net['r1'].cmd('vtysh -c "clear ip bgp 192.168.0.2 in" '+ - '-c "clear ip bgp 192.168.101.2 in"') - tgen.net['r2'].cmd('vtysh -c "clear ip bgp 192.168.0.1 in" '+ - '-c "clear ip bgp 192.168.201.2 in"') + tgen.net["r1"].cmd( + 'vtysh -c "clear ip bgp 192.168.0.2 in" ' + '-c "clear ip bgp 192.168.101.2 in"' + ) + tgen.net["r2"].cmd( + 'vtysh -c "clear ip bgp 192.168.0.1 in" ' + '-c "clear ip bgp 192.168.201.2 in"' + ) # tgen.mininet_cli() # Checking BGP config - should show the bgp metric settings in the route-maps logger.info("Checking BGP configuration for correct 'set metric' values") - setmetric111 = tgen.net['r1'].cmd('vtysh -c "show running" | grep "^ set metric 111"').rstrip() - assertmsg = "'set metric 111' configuration applied to R1, but not visible in configuration" - assert setmetric111 == ' set metric 111', assertmsg + setmetric111 = ( + tgen.net["r1"].cmd('vtysh -c "show running" | grep "^ set metric 111"').rstrip() + ) + assertmsg = ( + "'set metric 111' configuration applied to R1, but not visible in configuration" + ) + assert setmetric111 == " set metric 111", assertmsg - setmetric222 = tgen.net['r2'].cmd('vtysh -c "show running" | grep "^ set metric 222"').rstrip() - assertmsg = "'set metric 222' configuration applied to R2, but not visible in configuration" - assert setmetric222 == ' set metric 222', assertmsg + setmetric222 = ( + tgen.net["r2"].cmd('vtysh -c "show running" | grep "^ set metric 222"').rstrip() + ) + assertmsg = ( + "'set metric 222' configuration applied to R2, but not visible in configuration" + ) + assert setmetric222 == " set metric 222", assertmsg def test_bgp_metric_add_config(): @@ -417,9 +466,13 @@ def test_bgp_metric_add_config(): logger.info("Checking BGP configuration for correct 'set metric' ADD value") - setmetricP11 = tgen.net['r1'].cmd('vtysh -c "show running" | grep "^ set metric +11"').rstrip() - assertmsg = "'set metric +11' configuration applied to R1, but not visible in configuration" - assert setmetricP11 == ' set metric +11', assertmsg + setmetricP11 = ( + tgen.net["r1"].cmd('vtysh -c "show running" | grep "^ set metric +11"').rstrip() + ) + assertmsg = ( + "'set metric +11' configuration applied to R1, but not visible in configuration" + ) + assert setmetricP11 == " set metric +11", assertmsg def test_bgp_metric_subtract_config(): @@ -433,9 +486,13 @@ def test_bgp_metric_subtract_config(): logger.info("Checking BGP configuration for correct 'set metric' SUBTRACT value") - setmetricM22 = tgen.net['r2'].cmd('vtysh -c "show running" | grep "^ set metric -22"').rstrip() - assertmsg = "'set metric -22' configuration applied to R2, but not visible in configuration" - assert setmetricM22 == ' set metric -22', assertmsg + setmetricM22 = ( + tgen.net["r2"].cmd('vtysh -c "show running" | grep "^ set metric -22"').rstrip() + ) + assertmsg = ( + "'set metric -22' configuration applied to R2, but not visible in configuration" + ) + assert setmetricM22 == " set metric -22", assertmsg def test_bgp_set_metric(): @@ -478,47 +535,49 @@ def test_bgp_remove_metric_rmaps(): # Remove metric route-maps and relevant comfiguration - tgen.net['r1'].cmd('vtysh -c "conf t" -c "router bgp 65000" '+ - '-c "address-family ipv4 unicast" '+ - '-c "no neighbor 192.168.0.2 route-map addmetric-in in" '+ - '-c "no neighbor 192.168.0.2 route-map addmetric-out out" '+ - '-c "no neighbor 192.168.101.2 route-map setmetric-in in" '+ - '-c "no neighbor 192.168.101.2 route-map setmetric-out out" ') - tgen.net['r1'].cmd('vtysh -c "conf t" '+ - '-c "no ip prefix-list net1" '+ - '-c "no ip prefix-list net2"') - tgen.net['r1'].cmd('vtysh -c "conf t" '+ - '-c "no route-map setmetric-in" ') - tgen.net['r1'].cmd('vtysh -c "conf t" '+ - '-c "no route-map setmetric-out" ') - tgen.net['r1'].cmd('vtysh -c "conf t" '+ - '-c "no route-map addmetric-in" ') - tgen.net['r1'].cmd('vtysh -c "conf t" '+ - '-c "no route-map addmetric-out" ') + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" ' + + '-c "address-family ipv4 unicast" ' + + '-c "no neighbor 192.168.0.2 route-map addmetric-in in" ' + + '-c "no neighbor 192.168.0.2 route-map addmetric-out out" ' + + '-c "no neighbor 192.168.101.2 route-map setmetric-in in" ' + + '-c "no neighbor 192.168.101.2 route-map setmetric-out out" ' + ) + tgen.net["r1"].cmd( + 'vtysh -c "conf t" ' + + '-c "no ip prefix-list net1" ' + + '-c "no ip prefix-list net2"' + ) + tgen.net["r1"].cmd('vtysh -c "conf t" ' + '-c "no route-map setmetric-in" ') + tgen.net["r1"].cmd('vtysh -c "conf t" ' + '-c "no route-map setmetric-out" ') + tgen.net["r1"].cmd('vtysh -c "conf t" ' + '-c "no route-map addmetric-in" ') + tgen.net["r1"].cmd('vtysh -c "conf t" ' + '-c "no route-map addmetric-out" ') - tgen.net['r2'].cmd('vtysh -c "conf t" -c "router bgp 65000" '+ - '-c "address-family ipv4 unicast" '+ - '-c "no neighbor 192.168.0.1 route-map subtractmetric-in in" '+ - '-c "no neighbor 192.168.0.1 route-map subtractmetric-out out" '+ - '-c "no neighbor 192.168.201.2 route-map setmetric-in in" ' + - '-c "no neighbor 192.168.201.2 route-map setmetric-out out" ') - tgen.net['r2'].cmd('vtysh -c "conf t" '+ - '-c "no ip prefix-list net1" '+ - '-c "no ip prefix-list net2" ') - tgen.net['r2'].cmd('vtysh -c "conf t" '+ - '-c "no route-map setmetric-in" ') - tgen.net['r2'].cmd('vtysh -c "conf t" '+ - '-c "no route-map setmetric-out" ') - tgen.net['r2'].cmd('vtysh -c "conf t" '+ - '-c "no route-map addmetric-in" ') - tgen.net['r2'].cmd('vtysh -c "conf t" '+ - '-c "no route-map addmetric-out" ') + tgen.net["r2"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" ' + + '-c "address-family ipv4 unicast" ' + + '-c "no neighbor 192.168.0.1 route-map subtractmetric-in in" ' + + '-c "no neighbor 192.168.0.1 route-map subtractmetric-out out" ' + + '-c "no neighbor 192.168.201.2 route-map setmetric-in in" ' + + '-c "no neighbor 192.168.201.2 route-map setmetric-out out" ' + ) + tgen.net["r2"].cmd( + 'vtysh -c "conf t" ' + + '-c "no ip prefix-list net1" ' + + '-c "no ip prefix-list net2" ' + ) + tgen.net["r2"].cmd('vtysh -c "conf t" ' + '-c "no route-map setmetric-in" ') + tgen.net["r2"].cmd('vtysh -c "conf t" ' + '-c "no route-map setmetric-out" ') + tgen.net["r2"].cmd('vtysh -c "conf t" ' + '-c "no route-map addmetric-in" ') + tgen.net["r2"].cmd('vtysh -c "conf t" ' + '-c "no route-map addmetric-out" ') # Clear IN the bgp neighbors to make sure the route-maps are applied - tgen.net['r1'].cmd('vtysh -c "clear ip bgp 192.168.0.2 in" '+ - '-c "clear ip bgp 192.168.101.2 in"') - tgen.net['r2'].cmd('vtysh -c "clear ip bgp 192.168.0.1 in" '+ - '-c "clear ip bgp 192.168.201.2 in"') + tgen.net["r1"].cmd( + 'vtysh -c "clear ip bgp 192.168.0.2 in" ' + '-c "clear ip bgp 192.168.101.2 in"' + ) + tgen.net["r2"].cmd( + 'vtysh -c "clear ip bgp 192.168.0.1 in" ' + '-c "clear ip bgp 192.168.201.2 in"' + ) # tgen.mininet_cli() @@ -534,7 +593,9 @@ def test_bgp_remove_metric_rmaps(): topotest.router_json_cmp, router, "show ip bgp json", expected ) _, res = topotest.run_and_expect(test_func, None, count=60, wait=2) - assertmsg = "BGP routes on router r{} are wrong after removing metric route-maps".format(rtrNum) + assertmsg = "BGP routes on router r{} are wrong after removing metric route-maps".format( + rtrNum + ) assert res is None, assertmsg @@ -549,15 +610,17 @@ def test_bgp_norib(): logger.info("Configuring 'bgp no-rib' on router r1") - tgen.net['r1'].cmd('vtysh -c \"conf t\" -c \"bgp no-rib\"') + tgen.net["r1"].cmd('vtysh -c "conf t" -c "bgp no-rib"') # Checking BGP config - should show the "bgp no-rib" under the router bgp section logger.info("Checking BGP configuration for 'bgp no-rib'") - norib_cfg = tgen.net['r1'].cmd('vtysh -c "show running bgpd" | grep "^bgp no-rib"').rstrip() + norib_cfg = ( + tgen.net["r1"].cmd('vtysh -c "show running bgpd" | grep "^bgp no-rib"').rstrip() + ) assertmsg = "'bgp no-rib' configuration applied, but not visible in configuration" - assert norib_cfg == 'bgp no-rib', assertmsg + assert norib_cfg == "bgp no-rib", assertmsg def test_bgp_norib_routes(): @@ -585,7 +648,11 @@ def test_bgp_norib_routes(): # Check BGP Summary on local and remote routers for rtrNum in [1, 2, 4]: - logger.info("Checking BGP Summary after 'bgp no-rib' on router r1 on router r{}".format(rtrNum)) + logger.info( + "Checking BGP Summary after 'bgp no-rib' on router r1 on router r{}".format( + rtrNum + ) + ) router = tgen.gears["r{}".format(rtrNum)] reffile = os.path.join(CWD, "r{}/bgp_summary.json".format(rtrNum)) @@ -595,7 +662,9 @@ def test_bgp_norib_routes(): topotest.router_json_cmp, router, "show ip bgp summary json", expected ) _, res = topotest.run_and_expect(test_func, None, count=30, wait=2) - assertmsg = "BGP sessions on router R{} has incorrect routes after adding 'bgp no-rib on r1'".format(rtrNum) + assertmsg = "BGP sessions on router R{} has incorrect routes after adding 'bgp no-rib on r1'".format( + rtrNum + ) assert res is None, assertmsg # tgen.mininet_cli() @@ -612,15 +681,21 @@ def test_bgp_disable_norib(): logger.info("Configuring 'no bgp no-rib' on router r1") - tgen.net['r1'].cmd('vtysh -c \"conf t\" -c \"no bgp no-rib\"') + tgen.net["r1"].cmd('vtysh -c "conf t" -c "no bgp no-rib"') # Checking BGP config - should show the "bgp no-rib" under the router bgp section logger.info("Checking BGP configuration for 'bgp no-rib'") - norib_cfg = tgen.net['r1'].cmd('vtysh -c "show running bgpd" | grep "^ bgp no-rib"').rstrip() + norib_cfg = ( + tgen.net["r1"] + .cmd('vtysh -c "show running bgpd" | grep "^ bgp no-rib"') + .rstrip() + ) - assertmsg = "'no bgp no-rib'configuration applied, but still visible in configuration" - assert norib_cfg == '', assertmsg + assertmsg = ( + "'no bgp no-rib'configuration applied, but still visible in configuration" + ) + assert norib_cfg == "", assertmsg def test_bgp_disable_norib_routes(): @@ -648,7 +723,11 @@ def test_bgp_disable_norib_routes(): # Check BGP Summary on local and remote routers for rtrNum in [1, 2, 4]: - logger.info("Checking BGP Summary after removing the 'bgp no-rib' on router r1 on router r{}".format(rtrNum)) + logger.info( + "Checking BGP Summary after removing the 'bgp no-rib' on router r1 on router r{}".format( + rtrNum + ) + ) router = tgen.gears["r{}".format(rtrNum)] reffile = os.path.join(CWD, "r{}/bgp_summary.json".format(rtrNum)) @@ -658,13 +737,14 @@ def test_bgp_disable_norib_routes(): topotest.router_json_cmp, router, "show ip bgp summary json", expected ) _, res = topotest.run_and_expect(test_func, None, count=30, wait=2) - assertmsg = "BGP sessions on router R{} has incorrect routes after removing 'bgp no-rib on r1'".format(rtrNum) + assertmsg = "BGP sessions on router R{} has incorrect routes after removing 'bgp no-rib on r1'".format( + rtrNum + ) assert res is None, assertmsg # tgen.mininet_cli() - if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_flowspec/test_bgp_flowspec_topo.py b/tests/topotests/bgp_flowspec/test_bgp_flowspec_topo.py index 7e6bfc8b2b..a772a2aab1 100644 --- a/tests/topotests/bgp_flowspec/test_bgp_flowspec_topo.py +++ b/tests/topotests/bgp_flowspec/test_bgp_flowspec_topo.py @@ -200,6 +200,7 @@ def test_bgp_flowspec(): else: logger.info("Check BGP FS entry for 3::3 with redirect IP OK") + if __name__ == "__main__": args = ["-s"] + sys.argv[1:] diff --git a/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py b/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py index fdbd317093..18d2ac59d2 100644 --- a/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py +++ b/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py @@ -135,7 +135,7 @@ from lib.common_config import ( kill_mininet_routers_process, get_frr_ipv6_linklocal, create_route_maps, - required_linux_kernel_version + required_linux_kernel_version, ) # Reading the data from JSON File for topology and configuration creation @@ -188,7 +188,7 @@ def setup_module(mod): global ADDR_TYPES # Required linux kernel version for this suite to run. - result = required_linux_kernel_version('4.15') + result = required_linux_kernel_version("4.15") if result is not True: pytest.skip("Kernel requirements are not met") diff --git a/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py index e1ec0ea81b..da1a47cd29 100644 --- a/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py +++ b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py @@ -135,7 +135,7 @@ from lib.common_config import ( kill_mininet_routers_process, get_frr_ipv6_linklocal, create_route_maps, - required_linux_kernel_version + required_linux_kernel_version, ) # Reading the data from JSON File for topology and configuration creation @@ -185,7 +185,7 @@ def setup_module(mod): """ # Required linux kernel version for this suite to run. - result = required_linux_kernel_version('4.15') + result = required_linux_kernel_version("4.15") if result is not True: pytest.skip("Kernel requirements are not met") diff --git a/tests/topotests/bgp_gshut/test_bgp_gshut.py b/tests/topotests/bgp_gshut/test_bgp_gshut.py index 7f8cf17a5c..fe945a4565 100644 --- a/tests/topotests/bgp_gshut/test_bgp_gshut.py +++ b/tests/topotests/bgp_gshut/test_bgp_gshut.py @@ -99,12 +99,14 @@ class TemplateTopo(Topo): switch.add_link(tgen.gears["r2"]) switch.add_link(tgen.gears["r5"]) + def _run_cmd_and_check(router, cmd, results_file, retries=100, intvl=0.5): json_file = "{}/{}".format(CWD, results_file) expected = json.loads(open(json_file).read()) test_func = partial(topotest.router_json_cmp, router, cmd, expected) return topotest.run_and_expect(test_func, None, retries, intvl) + def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) tgen.start_topology() @@ -134,12 +136,14 @@ def setup_module(mod): tgen.start_router() # Basic peering test to see if things are ok - _, result = _run_cmd_and_check(r2, 'show ip bgp summary json', 'r2/bgp_sum_1.json') - assertmsg = 'R2: Basic sanity test after init failed -- global peerings not up' + _, result = _run_cmd_and_check(r2, "show ip bgp summary json", "r2/bgp_sum_1.json") + assertmsg = "R2: Basic sanity test after init failed -- global peerings not up" assert result is None, assertmsg - _, result = _run_cmd_and_check(r2, 'show ip bgp vrf vrf1 summary json', 'r2/bgp_sum_2.json') - assertmsg = 'R2: Basic sanity test after init failed -- VRF peerings not up' + _, result = _run_cmd_and_check( + r2, "show ip bgp vrf vrf1 summary json", "r2/bgp_sum_2.json" + ) + assertmsg = "R2: Basic sanity test after init failed -- VRF peerings not up" assert result is None, assertmsg @@ -160,80 +164,104 @@ def test_bgp_gshut(): r4 = tgen.gears["r4"] r5 = tgen.gears["r5"] - # Verify initial route states - logger.info('\nVerify initial route states') + logger.info("\nVerify initial route states") - _, result = _run_cmd_and_check(r1, 'show ip bgp 13.1.1.1/32 json', 'r1/bgp_route_1.json') - assertmsg = 'R1: Route 13.1.1.1/32 not present or has unexpected params' + _, result = _run_cmd_and_check( + r1, "show ip bgp 13.1.1.1/32 json", "r1/bgp_route_1.json" + ) + assertmsg = "R1: Route 13.1.1.1/32 not present or has unexpected params" assert result is None, assertmsg - _, result = _run_cmd_and_check(r3, 'show ip bgp 11.1.1.1/32 json', 'r3/bgp_route_1.json') - assertmsg = 'R3: Route 11.1.1.1/32 not present or has unexpected params' + _, result = _run_cmd_and_check( + r3, "show ip bgp 11.1.1.1/32 json", "r3/bgp_route_1.json" + ) + assertmsg = "R3: Route 11.1.1.1/32 not present or has unexpected params" assert result is None, assertmsg - _, result = _run_cmd_and_check(r5, 'show ip bgp 14.1.1.1/32 json', 'r5/bgp_route_1.json') - assertmsg = 'R5: Route 14.1.1.1/32 not present or has unexpected params' + _, result = _run_cmd_and_check( + r5, "show ip bgp 14.1.1.1/32 json", "r5/bgp_route_1.json" + ) + assertmsg = "R5: Route 14.1.1.1/32 not present or has unexpected params" assert result is None, assertmsg - logger.info('\nInitial route states are as expected') + logger.info("\nInitial route states are as expected") - - #"Test #1: Enable BGP-wide graceful-shutdown on R2 and check routes on peers" - logger.info('\nTest #1: Enable BGP-wide graceful-shutdown on R2 and check routes on peers') + # "Test #1: Enable BGP-wide graceful-shutdown on R2 and check routes on peers" + logger.info( + "\nTest #1: Enable BGP-wide graceful-shutdown on R2 and check routes on peers" + ) r2.vtysh_cmd( """ configure terminal bgp graceful-shutdown """ - ) + ) # R1, R3 and R5 should see routes from R2 with GSHUT. In addition, # R1 should see LOCAL_PREF of 0 - _, result = _run_cmd_and_check(r1, 'show ip bgp 13.1.1.1/32 json', 'r1/bgp_route_2.json') - assertmsg = 'R1: Route 13.1.1.1/32 not present or has unexpected params' + _, result = _run_cmd_and_check( + r1, "show ip bgp 13.1.1.1/32 json", "r1/bgp_route_2.json" + ) + assertmsg = "R1: Route 13.1.1.1/32 not present or has unexpected params" assert result is None, assertmsg - _, result = _run_cmd_and_check(r3, 'show ip bgp 11.1.1.1/32 json', 'r3/bgp_route_2.json') - assertmsg = 'R3: Route 11.1.1.1/32 not present or has unexpected params' + _, result = _run_cmd_and_check( + r3, "show ip bgp 11.1.1.1/32 json", "r3/bgp_route_2.json" + ) + assertmsg = "R3: Route 11.1.1.1/32 not present or has unexpected params" assert result is None, assertmsg - _, result = _run_cmd_and_check(r5, 'show ip bgp 14.1.1.1/32 json', 'r5/bgp_route_2.json') - assertmsg = 'R5: Route 14.1.1.1/32 not present or has unexpected params' + _, result = _run_cmd_and_check( + r5, "show ip bgp 14.1.1.1/32 json", "r5/bgp_route_2.json" + ) + assertmsg = "R5: Route 14.1.1.1/32 not present or has unexpected params" assert result is None, assertmsg - logger.info('\nTest #1: Successful, routes have GSHUT and/or LPREF of 0 as expected') + logger.info( + "\nTest #1: Successful, routes have GSHUT and/or LPREF of 0 as expected" + ) - - #"Test #2: Turn off BGP-wide graceful-shutdown on R2 and check routes on peers" - logger.info('\nTest #2: Turn off BGP-wide graceful-shutdown on R2 and check routes on peers') + # "Test #2: Turn off BGP-wide graceful-shutdown on R2 and check routes on peers" + logger.info( + "\nTest #2: Turn off BGP-wide graceful-shutdown on R2 and check routes on peers" + ) r2.vtysh_cmd( """ configure terminal no bgp graceful-shutdown """ - ) + ) # R1, R3 and R5 should see routes from R2 with their original attributes - _, result = _run_cmd_and_check(r1, 'show ip bgp 13.1.1.1/32 json', 'r1/bgp_route_1.json') - assertmsg = 'R1: Route 13.1.1.1/32 not present or has unexpected params' + _, result = _run_cmd_and_check( + r1, "show ip bgp 13.1.1.1/32 json", "r1/bgp_route_1.json" + ) + assertmsg = "R1: Route 13.1.1.1/32 not present or has unexpected params" assert result is None, assertmsg - _, result = _run_cmd_and_check(r3, 'show ip bgp 11.1.1.1/32 json', 'r3/bgp_route_1.json') - assertmsg = 'R3: Route 11.1.1.1/32 not present or has unexpected params' + _, result = _run_cmd_and_check( + r3, "show ip bgp 11.1.1.1/32 json", "r3/bgp_route_1.json" + ) + assertmsg = "R3: Route 11.1.1.1/32 not present or has unexpected params" assert result is None, assertmsg - _, result = _run_cmd_and_check(r5, 'show ip bgp 14.1.1.1/32 json', 'r5/bgp_route_1.json') - assertmsg = 'R5: Route 14.1.1.1/32 not present or has unexpected params' + _, result = _run_cmd_and_check( + r5, "show ip bgp 14.1.1.1/32 json", "r5/bgp_route_1.json" + ) + assertmsg = "R5: Route 14.1.1.1/32 not present or has unexpected params" assert result is None, assertmsg - logger.info('\nTest #2: Successful, routes have their original attributes with default LPREF and without GSHUT') + logger.info( + "\nTest #2: Successful, routes have their original attributes with default LPREF and without GSHUT" + ) - - #"Test #3: Enable graceful-shutdown on R2 only in VRF1 and check routes on peers" - logger.info('\nTest #3: Enable graceful-shutdown on R2 only in VRF1 and check routes on peers') + # "Test #3: Enable graceful-shutdown on R2 only in VRF1 and check routes on peers" + logger.info( + "\nTest #3: Enable graceful-shutdown on R2 only in VRF1 and check routes on peers" + ) r2.vtysh_cmd( """ @@ -241,44 +269,56 @@ def test_bgp_gshut(): router bgp 65001 vrf vrf1 bgp graceful-shutdown """ - ) + ) # R1 and R3 should see no change to their routes - _, result = _run_cmd_and_check(r1, 'show ip bgp 13.1.1.1/32 json', 'r1/bgp_route_1.json') - assertmsg = 'R1: Route 13.1.1.1/32 not present or has unexpected params' + _, result = _run_cmd_and_check( + r1, "show ip bgp 13.1.1.1/32 json", "r1/bgp_route_1.json" + ) + assertmsg = "R1: Route 13.1.1.1/32 not present or has unexpected params" assert result is None, assertmsg - _, result = _run_cmd_and_check(r3, 'show ip bgp 11.1.1.1/32 json', 'r3/bgp_route_1.json') - assertmsg = 'R3: Route 11.1.1.1/32 not present or has unexpected params' + _, result = _run_cmd_and_check( + r3, "show ip bgp 11.1.1.1/32 json", "r3/bgp_route_1.json" + ) + assertmsg = "R3: Route 11.1.1.1/32 not present or has unexpected params" assert result is None, assertmsg # R5 should see routes from R2 with GSHUT. - _, result = _run_cmd_and_check(r5, 'show ip bgp 14.1.1.1/32 json', 'r5/bgp_route_2.json') - assertmsg = 'R5: Route 14.1.1.1/32 not present or has unexpected params' + _, result = _run_cmd_and_check( + r5, "show ip bgp 14.1.1.1/32 json", "r5/bgp_route_2.json" + ) + assertmsg = "R5: Route 14.1.1.1/32 not present or has unexpected params" assert result is None, assertmsg - logger.info('\nTest #3: Successful, only VRF peers like R5 see routes with GSHUT') + logger.info("\nTest #3: Successful, only VRF peers like R5 see routes with GSHUT") - - #"Test #4: Try to enable BGP-wide graceful-shutdown on R2 while it is configured in VRF1" - logger.info('\nTest #4: Try to enable BGP-wide graceful-shutdown on R2 while it is configured in VRF1') + # "Test #4: Try to enable BGP-wide graceful-shutdown on R2 while it is configured in VRF1" + logger.info( + "\nTest #4: Try to enable BGP-wide graceful-shutdown on R2 while it is configured in VRF1" + ) ret = r2.vtysh_cmd( """ configure terminal bgp graceful-shutdown """ - ) + ) # This should fail - assertmsg = 'R2: BGP-wide graceful-shutdown config not rejected even though it is enabled in VRF1' - assert re.search("global graceful-shutdown not permitted", ret) is not None, assertmsg + assertmsg = "R2: BGP-wide graceful-shutdown config not rejected even though it is enabled in VRF1" + assert ( + re.search("global graceful-shutdown not permitted", ret) is not None + ), assertmsg - logger.info('\nTest #4: Successful, BGP-wide graceful-shutdown rejected as it is enabled in VRF') + logger.info( + "\nTest #4: Successful, BGP-wide graceful-shutdown rejected as it is enabled in VRF" + ) - - #"Test #5: Turn off graceful-shutdown on R2 in VRF1 and check routes on peers" - logger.info('\nTest #5: Turn off graceful-shutdown on R2 in VRF1 and check routes on peers') + # "Test #5: Turn off graceful-shutdown on R2 in VRF1 and check routes on peers" + logger.info( + "\nTest #5: Turn off graceful-shutdown on R2 in VRF1 and check routes on peers" + ) r2.vtysh_cmd( """ @@ -286,28 +326,35 @@ def test_bgp_gshut(): router bgp 65001 vrf vrf1 no bgp graceful-shutdown """ - ) + ) # R1 and R3 should see no change to their routes - _, result = _run_cmd_and_check(r1, 'show ip bgp 13.1.1.1/32 json', 'r1/bgp_route_1.json') - assertmsg = 'R1: Route 13.1.1.1/32 not present or has unexpected params' + _, result = _run_cmd_and_check( + r1, "show ip bgp 13.1.1.1/32 json", "r1/bgp_route_1.json" + ) + assertmsg = "R1: Route 13.1.1.1/32 not present or has unexpected params" assert result is None, assertmsg - _, result = _run_cmd_and_check(r3, 'show ip bgp 11.1.1.1/32 json', 'r3/bgp_route_1.json') - assertmsg = 'R3: Route 11.1.1.1/32 not present or has unexpected params' + _, result = _run_cmd_and_check( + r3, "show ip bgp 11.1.1.1/32 json", "r3/bgp_route_1.json" + ) + assertmsg = "R3: Route 11.1.1.1/32 not present or has unexpected params" assert result is None, assertmsg # R5 should see routes from R2 with original attributes. - _, result = _run_cmd_and_check(r5, 'show ip bgp 14.1.1.1/32 json', 'r5/bgp_route_1.json') - assertmsg = 'R5: Route 14.1.1.1/32 not present or has unexpected params' + _, result = _run_cmd_and_check( + r5, "show ip bgp 14.1.1.1/32 json", "r5/bgp_route_1.json" + ) + assertmsg = "R5: Route 14.1.1.1/32 not present or has unexpected params" assert result is None, assertmsg + logger.info( + "\nTest #5: Successful, routes have their original attributes with default LPREF and without GSHUT" + ) - logger.info('\nTest #5: Successful, routes have their original attributes with default LPREF and without GSHUT') + # tgen.mininet_cli() - #tgen.mininet_cli() - if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py index f553513b9c..6f5cfcf8d6 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py @@ -306,8 +306,13 @@ want_r1_remote_cust1_routes = [ {"p": "99.0.0.4/32", "n": "4.4.4.4"}, ] bgpribRequireUnicastRoutes( - "r1", "ipv4", "r1-cust1", "Customer 1 routes in r1 vrf (2)", want_r1_remote_cust1_routes - , debug=False) + "r1", + "ipv4", + "r1-cust1", + "Customer 1 routes in r1 vrf (2)", + want_r1_remote_cust1_routes, + debug=False, +) want_r3_remote_cust1_routes = [ {"p": "5.1.0.0/24", "n": "1.1.1.1", "bp": True}, @@ -329,8 +334,13 @@ want_r3_remote_cust1_routes = [ {"p": "99.0.0.4/32", "n": "4.4.4.4", "bp": True}, ] bgpribRequireUnicastRoutes( - "r3", "ipv4", "r3-cust1", "Customer 1 routes in r3 vrf (2)", want_r3_remote_cust1_routes - , debug=False) + "r3", + "ipv4", + "r3-cust1", + "Customer 1 routes in r3 vrf (2)", + want_r3_remote_cust1_routes, + debug=False, +) want_r4_remote_cust1_routes = [ {"p": "5.1.0.0/24", "n": "1.1.1.1", "bp": True}, @@ -351,8 +361,13 @@ want_r4_remote_cust1_routes = [ {"p": "99.0.0.4/32", "n": "192.168.2.2", "bp": True}, ] bgpribRequireUnicastRoutes( - "r4", "ipv4", "r4-cust1", "Customer 1 routes in r4 vrf (2)", want_r4_remote_cust1_routes - , debug=False) + "r4", + "ipv4", + "r4-cust1", + "Customer 1 routes in r4 vrf (2)", + want_r4_remote_cust1_routes, + debug=False, +) want_r4_remote_cust2_routes = [ {"p": "5.1.0.0/24", "n": "1.1.1.1", "bp": True}, @@ -373,8 +388,13 @@ want_r4_remote_cust2_routes = [ {"p": "99.0.0.4/32", "n": "192.168.2.2", "bp": True}, ] bgpribRequireUnicastRoutes( - "r4", "ipv4", "r4-cust2", "Customer 2 routes in r4 vrf (2)", want_r4_remote_cust2_routes - , debug=False) + "r4", + "ipv4", + "r4-cust2", + "Customer 2 routes in r4 vrf (2)", + want_r4_remote_cust2_routes, + debug=False, +) ######################################################################### @@ -402,7 +422,9 @@ want = [ {"p": "6.0.1.0/24", "n": "99.0.0.1", "bp": True}, {"p": "6.0.2.0/24", "n": "99.0.0.1", "bp": True}, ] -bgpribRequireUnicastRoutes("ce1", "ipv4", "", "Cust 1 routes from remote", want, debug=False) +bgpribRequireUnicastRoutes( + "ce1", "ipv4", "", "Cust 1 routes from remote", want, debug=False +) luCommand( "ce2", @@ -425,7 +447,9 @@ want = [ {"p": "6.0.1.0/24", "n": "99.0.0.2", "bp": True}, {"p": "6.0.2.0/24", "n": "99.0.0.2", "bp": True}, ] -bgpribRequireUnicastRoutes("ce2", "ipv4", "", "Cust 1 routes from remote", want, debug=False) +bgpribRequireUnicastRoutes( + "ce2", "ipv4", "", "Cust 1 routes from remote", want, debug=False +) # human readable output for debugging luCommand("r4", 'vtysh -c "show bgp vrf r4-cust1 ipv4 uni"') @@ -453,7 +477,9 @@ want = [ {"p": "6.0.1.0/24", "n": "99.0.0.3", "bp": True}, {"p": "6.0.2.0/24", "n": "99.0.0.3", "bp": True}, ] -bgpribRequireUnicastRoutes("ce3", "ipv4", "", "Cust 1 routes from remote", want, debug=False) +bgpribRequireUnicastRoutes( + "ce3", "ipv4", "", "Cust 1 routes from remote", want, debug=False +) luCommand( "ce4", @@ -477,58 +503,91 @@ bgpribRequireUnicastRoutes( "ce4", "ipv4", "ce4-cust2", "Cust 2 routes from remote", want, debug=False ) -#verify details of exported/imported routes -luCommand("ce1",'vtysh -c "show bgp ipv4 uni 6.0.1.0"', - "1 available.*192.168.1.1.*99.0.0.1.*Community: 0:67.*Extended Community: RT:89:123.*Large Community: 12:34:56", - "pass", "Redundant route 1 details") -luCommand("ce2",'vtysh -c "show bgp ipv4 uni 6.0.1.0"', - "2 available, best .*192.168.1.1.* Local.* 192.168.1.1 from 192.168.1.1 .192.168.1.1" + - ".* Origin IGP, metric 98, localpref 123, valid, internal" + - ".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:56", - ".* Local.* 99.0.0.2 from 0.0.0.0 .99.0.0.2" + - ".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .Weight" + - ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:56", - "pass", "Redundant route 1 details") -luCommand("ce3",'vtysh -c "show bgp ipv4 uni 6.0.1.0"', - "2 available, best .*192.168.1.1.* Local.* 99.0.0.3 from 0.0.0.0 .99.0.0.3" + - ".* Origin IGP, metric 200, localpref 50, weight 32768, valid, sourced, local, best .Weight" + - ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:56" + - ".* Local.* 192.168.1.1 from 192.168.1.1 .192.168.1.1" + - ".* Origin IGP, metric 98, localpref 123, valid, internal" + - ".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:56", - "pass", "Redundant route 1 details") -luCommand("ce4",'vtysh -c "show bgp vrf ce4-cust2 ipv4 6.0.1.0"', - "2 available, best .*192.168.2.1.* Local.* 192.168.2.1 from 192.168.2.1 .192.168.2.1" + - ".* Origin IGP, metric 98, localpref 123, valid, internal" + - ".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:56" + - ".* Local.* 99.0.0.4 from 0.0.0.0 .99.0.0.4" + - ".* Origin IGP, metric 200, localpref 50, weight 32768, valid, sourced, local, best .Weight" + - ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:56", - "pass", "Redundant route 1 details") +# verify details of exported/imported routes +luCommand( + "ce1", + 'vtysh -c "show bgp ipv4 uni 6.0.1.0"', + "1 available.*192.168.1.1.*99.0.0.1.*Community: 0:67.*Extended Community: RT:89:123.*Large Community: 12:34:56", + "pass", + "Redundant route 1 details", +) +luCommand( + "ce2", + 'vtysh -c "show bgp ipv4 uni 6.0.1.0"', + "2 available, best .*192.168.1.1.* Local.* 192.168.1.1 from 192.168.1.1 .192.168.1.1" + + ".* Origin IGP, metric 98, localpref 123, valid, internal" + + ".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:56", + ".* Local.* 99.0.0.2 from 0.0.0.0 .99.0.0.2" + + ".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .Weight" + + ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:56", + "pass", + "Redundant route 1 details", +) +luCommand( + "ce3", + 'vtysh -c "show bgp ipv4 uni 6.0.1.0"', + "2 available, best .*192.168.1.1.* Local.* 99.0.0.3 from 0.0.0.0 .99.0.0.3" + + ".* Origin IGP, metric 200, localpref 50, weight 32768, valid, sourced, local, best .Weight" + + ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:56" + + ".* Local.* 192.168.1.1 from 192.168.1.1 .192.168.1.1" + + ".* Origin IGP, metric 98, localpref 123, valid, internal" + + ".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:56", + "pass", + "Redundant route 1 details", +) +luCommand( + "ce4", + 'vtysh -c "show bgp vrf ce4-cust2 ipv4 6.0.1.0"', + "2 available, best .*192.168.2.1.* Local.* 192.168.2.1 from 192.168.2.1 .192.168.2.1" + + ".* Origin IGP, metric 98, localpref 123, valid, internal" + + ".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:56" + + ".* Local.* 99.0.0.4 from 0.0.0.0 .99.0.0.4" + + ".* Origin IGP, metric 200, localpref 50, weight 32768, valid, sourced, local, best .Weight" + + ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:56", + "pass", + "Redundant route 1 details", +) -luCommand("ce1",'vtysh -c "show bgp ipv4 uni 6.0.2.0"', - "1 available, best .*192.168.1.1.* Local.* 99.0.0.1 from 0.0.0.0 .99.0.0.1" + - ".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .First path received" + - ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:11", - "pass", "Redundant route 2 details") -luCommand("ce2",'vtysh -c "show bgp ipv4 uni 6.0.2.0"', "1 available, best .*192.168.1.1.* Local.* 99.0.0.2 from 0.0.0.0 .99.0.0.2" + - ".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .First path received" + - ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:12", - "pass", "Redundant route 2 details") -luCommand("ce3",'vtysh -c "show bgp ipv4 uni 6.0.2.0"', - "2 available, best .*192.168.1.1.* Local.* 99.0.0.3 from 0.0.0.0 .99.0.0.3" + - ".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .Weight" + - ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:13" + - ".* Local.* 192.168.1.1 from 192.168.1.1 .192.168.1.1" + - ".* Origin IGP, metric 100, localpref 100, valid, internal" + - ".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:14", - "pass", "Redundant route 2 details") -luCommand("ce4",'vtysh -c "show bgp vrf ce4-cust2 ipv4 6.0.2.0"', - "2 available, best .*192.168.2.1.* Local.* 192.168.2.1 from 192.168.2.1 .192.168.2.1" + - ".* Origin IGP, metric 100, localpref 100, valid, internal" + - ".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:13" + - ".* Local.* 99.0.0.4 from 0.0.0.0 .99.0.0.4" + - ".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .Weight" + - ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:14", - "pass", "Redundant route 2 details") -#done +luCommand( + "ce1", + 'vtysh -c "show bgp ipv4 uni 6.0.2.0"', + "1 available, best .*192.168.1.1.* Local.* 99.0.0.1 from 0.0.0.0 .99.0.0.1" + + ".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .First path received" + + ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:11", + "pass", + "Redundant route 2 details", +) +luCommand( + "ce2", + 'vtysh -c "show bgp ipv4 uni 6.0.2.0"', + "1 available, best .*192.168.1.1.* Local.* 99.0.0.2 from 0.0.0.0 .99.0.0.2" + + ".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .First path received" + + ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:12", + "pass", + "Redundant route 2 details", +) +luCommand( + "ce3", + 'vtysh -c "show bgp ipv4 uni 6.0.2.0"', + "2 available, best .*192.168.1.1.* Local.* 99.0.0.3 from 0.0.0.0 .99.0.0.3" + + ".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .Weight" + + ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:13" + + ".* Local.* 192.168.1.1 from 192.168.1.1 .192.168.1.1" + + ".* Origin IGP, metric 100, localpref 100, valid, internal" + + ".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:14", + "pass", + "Redundant route 2 details", +) +luCommand( + "ce4", + 'vtysh -c "show bgp vrf ce4-cust2 ipv4 6.0.2.0"', + "2 available, best .*192.168.2.1.* Local.* 192.168.2.1 from 192.168.2.1 .192.168.2.1" + + ".* Origin IGP, metric 100, localpref 100, valid, internal" + + ".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:13" + + ".* Local.* 99.0.0.4 from 0.0.0.0 .99.0.0.4" + + ".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .Weight" + + ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:14", + "pass", + "Redundant route 2 details", +) +# done diff --git a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py index dc06b7131a..40489f438f 100644 --- a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py +++ b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py @@ -67,7 +67,7 @@ from lib.common_config import ( verify_bgp_community, step, check_address_types, - required_linux_kernel_version + required_linux_kernel_version, ) from lib.topolog import logger from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify @@ -144,7 +144,7 @@ def setup_module(mod): * `mod`: module name """ # Required linux kernel version for this suite to run. - result = required_linux_kernel_version('4.15') + result = required_linux_kernel_version("4.15") if result is not True: pytest.skip("Kernel requirements are not met") diff --git a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py index bb88e47415..9c0355a3e9 100644 --- a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py +++ b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py @@ -91,7 +91,7 @@ from lib.common_config import ( verify_route_maps, create_static_routes, check_address_types, - required_linux_kernel_version + required_linux_kernel_version, ) from lib.topolog import logger from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify @@ -135,7 +135,7 @@ def setup_module(mod): """ # Required linux kernel version for this suite to run. - result = required_linux_kernel_version('4.15') + result = required_linux_kernel_version("4.15") if result is not True: pytest.skip("Kernel requirements are not met") diff --git a/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py b/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py index dff69e3a27..f09ff20651 100644 --- a/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py +++ b/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py @@ -35,7 +35,7 @@ import json # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -63,8 +63,10 @@ this scenario, the servers are also routers as they have to announce anycast IP (VIP) addresses via BGP. """ + class BgpLinkBwTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) @@ -73,45 +75,46 @@ class BgpLinkBwTopo(Topo): # and 4 servers routers = {} for i in range(1, 11): - routers[i] = tgen.add_router('r{}'.format(i)) + routers[i] = tgen.add_router("r{}".format(i)) # Create 13 "switches" - to interconnect the above routers switches = {} for i in range(1, 14): - switches[i] = tgen.add_switch('s{}'.format(i)) + switches[i] = tgen.add_switch("s{}".format(i)) # Interconnect R1 (super-spine) to R2 and R3 (the two spines) - switches[1].add_link(tgen.gears['r1']) - switches[1].add_link(tgen.gears['r2']) - switches[2].add_link(tgen.gears['r1']) - switches[2].add_link(tgen.gears['r3']) + switches[1].add_link(tgen.gears["r1"]) + switches[1].add_link(tgen.gears["r2"]) + switches[2].add_link(tgen.gears["r1"]) + switches[2].add_link(tgen.gears["r3"]) # Interconnect R2 (spine in pod-1) to R4 and R5 (the associated # leaf switches) - switches[3].add_link(tgen.gears['r2']) - switches[3].add_link(tgen.gears['r4']) - switches[4].add_link(tgen.gears['r2']) - switches[4].add_link(tgen.gears['r5']) + switches[3].add_link(tgen.gears["r2"]) + switches[3].add_link(tgen.gears["r4"]) + switches[4].add_link(tgen.gears["r2"]) + switches[4].add_link(tgen.gears["r5"]) # Interconnect R3 (spine in pod-2) to R6 (associated leaf) - switches[5].add_link(tgen.gears['r3']) - switches[5].add_link(tgen.gears['r6']) + switches[5].add_link(tgen.gears["r3"]) + switches[5].add_link(tgen.gears["r6"]) # Interconnect leaf switches to servers - switches[6].add_link(tgen.gears['r4']) - switches[6].add_link(tgen.gears['r7']) - switches[7].add_link(tgen.gears['r4']) - switches[7].add_link(tgen.gears['r8']) - switches[8].add_link(tgen.gears['r5']) - switches[8].add_link(tgen.gears['r9']) - switches[9].add_link(tgen.gears['r6']) - switches[9].add_link(tgen.gears['r10']) + switches[6].add_link(tgen.gears["r4"]) + switches[6].add_link(tgen.gears["r7"]) + switches[7].add_link(tgen.gears["r4"]) + switches[7].add_link(tgen.gears["r8"]) + switches[8].add_link(tgen.gears["r5"]) + switches[8].add_link(tgen.gears["r9"]) + switches[9].add_link(tgen.gears["r6"]) + switches[9].add_link(tgen.gears["r10"]) # Create empty networks for the servers - switches[10].add_link(tgen.gears['r7']) - switches[11].add_link(tgen.gears['r8']) - switches[12].add_link(tgen.gears['r9']) - switches[13].add_link(tgen.gears['r10']) + switches[10].add_link(tgen.gears["r7"]) + switches[11].add_link(tgen.gears["r8"]) + switches[12].add_link(tgen.gears["r9"]) + switches[13].add_link(tgen.gears["r10"]) + def setup_module(mod): "Sets up the pytest environment" @@ -121,395 +124,454 @@ def setup_module(mod): router_list = tgen.routers() for rname, router in router_list.items(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_BGP, - os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) # Initialize all routers. tgen.start_router() - #tgen.mininet_cli() + # tgen.mininet_cli() + def teardown_module(mod): "Teardown the pytest environment" tgen = get_topogen() tgen.stop_topology() + def test_bgp_linkbw_adv(): "Test #1: Test BGP link-bandwidth advertisement based on number of multipaths" - logger.info('\nTest #1: Test BGP link-bandwidth advertisement based on number of multipaths') + logger.info( + "\nTest #1: Test BGP link-bandwidth advertisement based on number of multipaths" + ) tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") - r1 = tgen.gears['r1'] - r2 = tgen.gears['r2'] + r1 = tgen.gears["r1"] + r2 = tgen.gears["r2"] # Configure anycast IP on server r7 - logger.info('Configure anycast IP on server r7') + logger.info("Configure anycast IP on server r7") - tgen.net['r7'].cmd('ip addr add 198.10.1.1/32 dev r7-eth1') + tgen.net["r7"].cmd("ip addr add 198.10.1.1/32 dev r7-eth1") # Check on spine router r2 for link-bw advertisement by leaf router r4 - logger.info('Check on spine router r2 for link-bw advertisement by leaf router r4') + logger.info("Check on spine router r2 for link-bw advertisement by leaf router r4") - json_file = '{}/r2/bgp-route-1.json'.format(CWD) + json_file = "{}/r2/bgp-route-1.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r2, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r2, "show bgp ipv4 uni 198.10.1.1/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) - assertmsg = 'JSON output mismatch on spine router r2' + assertmsg = "JSON output mismatch on spine router r2" assert result is None, assertmsg # Check on spine router r2 that default weight is used as there is no multipath - logger.info('Check on spine router r2 that default weight is used as there is no multipath') + logger.info( + "Check on spine router r2 that default weight is used as there is no multipath" + ) - json_file = '{}/r2/ip-route-1.json'.format(CWD) + json_file = "{}/r2/ip-route-1.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r2, 'show ip route 198.10.1.1/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r2, "show ip route 198.10.1.1/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) - assertmsg = 'JSON output mismatch on spine router r2' + assertmsg = "JSON output mismatch on spine router r2" assert result is None, assertmsg # Check on super-spine router r1 that link-bw has been propagated by spine router r2 - logger.info('Check on super-spine router r1 that link-bw has been propagated by spine router r2') + logger.info( + "Check on super-spine router r1 that link-bw has been propagated by spine router r2" + ) - json_file = '{}/r1/bgp-route-1.json'.format(CWD) + json_file = "{}/r1/bgp-route-1.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r1, "show bgp ipv4 uni 198.10.1.1/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) - assertmsg = 'JSON output mismatch on super-spine router r1' + assertmsg = "JSON output mismatch on super-spine router r1" assert result is None, assertmsg + def test_bgp_cumul_linkbw(): "Test #2: Test cumulative link-bandwidth propagation" - logger.info('\nTest #2: Test cumulative link-bandwidth propagation') + logger.info("\nTest #2: Test cumulative link-bandwidth propagation") tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") - r1 = tgen.gears['r1'] - r2 = tgen.gears['r2'] - r4 = tgen.gears['r4'] + r1 = tgen.gears["r1"] + r2 = tgen.gears["r2"] + r4 = tgen.gears["r4"] # Configure anycast IP on additional server r8 - logger.info('Configure anycast IP on server r8') + logger.info("Configure anycast IP on server r8") - tgen.net['r8'].cmd('ip addr add 198.10.1.1/32 dev r8-eth1') + tgen.net["r8"].cmd("ip addr add 198.10.1.1/32 dev r8-eth1") # Check multipath on leaf router r4 - logger.info('Check multipath on leaf router r4') + logger.info("Check multipath on leaf router r4") - json_file = '{}/r4/bgp-route-1.json'.format(CWD) + json_file = "{}/r4/bgp-route-1.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r4, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r4, "show bgp ipv4 uni 198.10.1.1/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) - assertmsg = 'JSON output mismatch on leaf router r4' + assertmsg = "JSON output mismatch on leaf router r4" assert result is None, assertmsg # Check regular ECMP is in effect on leaf router r4 - logger.info('Check regular ECMP is in effect on leaf router r4') + logger.info("Check regular ECMP is in effect on leaf router r4") - json_file = '{}/r4/ip-route-1.json'.format(CWD) + json_file = "{}/r4/ip-route-1.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r4, 'show ip route 198.10.1.1/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r4, "show ip route 198.10.1.1/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) - assertmsg = 'JSON output mismatch on leaf router r4' + assertmsg = "JSON output mismatch on leaf router r4" assert result is None, assertmsg # Check on spine router r2 that leaf has propagated the cumulative link-bw based on num-multipaths - logger.info('Check on spine router r2 that leaf has propagated the cumulative link-bw based on num-multipaths') + logger.info( + "Check on spine router r2 that leaf has propagated the cumulative link-bw based on num-multipaths" + ) - json_file = '{}/r2/bgp-route-2.json'.format(CWD) + json_file = "{}/r2/bgp-route-2.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r2, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r2, "show bgp ipv4 uni 198.10.1.1/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) - assertmsg = 'JSON output mismatch on spine router r2' + assertmsg = "JSON output mismatch on spine router r2" assert result is None, assertmsg + def test_weighted_ecmp(): "Test #3: Test weighted ECMP - multipath with next hop weights" - logger.info('\nTest #3: Test weighted ECMP - multipath with next hop weights') + logger.info("\nTest #3: Test weighted ECMP - multipath with next hop weights") tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") - r1 = tgen.gears['r1'] - r2 = tgen.gears['r2'] + r1 = tgen.gears["r1"] + r2 = tgen.gears["r2"] # Configure anycast IP on additional server r9 - logger.info('Configure anycast IP on server r9') + logger.info("Configure anycast IP on server r9") - tgen.net['r9'].cmd('ip addr add 198.10.1.1/32 dev r9-eth1') + tgen.net["r9"].cmd("ip addr add 198.10.1.1/32 dev r9-eth1") # Check multipath on spine router r2 - logger.info('Check multipath on spine router r2') - json_file = '{}/r2/bgp-route-3.json'.format(CWD) + logger.info("Check multipath on spine router r2") + json_file = "{}/r2/bgp-route-3.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r2, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r2, "show bgp ipv4 uni 198.10.1.1/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) - assertmsg = 'JSON output mismatch on spine router r2' + assertmsg = "JSON output mismatch on spine router r2" assert result is None, assertmsg # Check weighted ECMP is in effect on the spine router r2 - logger.info('Check weighted ECMP is in effect on the spine router r2') + logger.info("Check weighted ECMP is in effect on the spine router r2") - json_file = '{}/r2/ip-route-2.json'.format(CWD) + json_file = "{}/r2/ip-route-2.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r2, 'show ip route 198.10.1.1/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r2, "show ip route 198.10.1.1/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) - assertmsg = 'JSON output mismatch on spine router r2' + assertmsg = "JSON output mismatch on spine router r2" assert result is None, assertmsg # Configure anycast IP on additional server r10 - logger.info('Configure anycast IP on server r10') + logger.info("Configure anycast IP on server r10") - tgen.net['r10'].cmd('ip addr add 198.10.1.1/32 dev r10-eth1') + tgen.net["r10"].cmd("ip addr add 198.10.1.1/32 dev r10-eth1") # Check multipath on super-spine router r1 - logger.info('Check multipath on super-spine router r1') - json_file = '{}/r1/bgp-route-2.json'.format(CWD) + logger.info("Check multipath on super-spine router r1") + json_file = "{}/r1/bgp-route-2.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r1, "show bgp ipv4 uni 198.10.1.1/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) - assertmsg = 'JSON output mismatch on super-spine router r1' + assertmsg = "JSON output mismatch on super-spine router r1" assert result is None, assertmsg # Check weighted ECMP is in effect on the super-spine router r1 - logger.info('Check weighted ECMP is in effect on the super-spine router r1') - json_file = '{}/r1/ip-route-1.json'.format(CWD) + logger.info("Check weighted ECMP is in effect on the super-spine router r1") + json_file = "{}/r1/ip-route-1.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show ip route 198.10.1.1/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r1, "show ip route 198.10.1.1/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) - assertmsg = 'JSON output mismatch on super-spine router r1' + assertmsg = "JSON output mismatch on super-spine router r1" assert result is None, assertmsg + def test_weighted_ecmp_link_flap(): "Test #4: Test weighted ECMP rebalancing upon change (link flap)" - logger.info('\nTest #4: Test weighted ECMP rebalancing upon change (link flap)') + logger.info("\nTest #4: Test weighted ECMP rebalancing upon change (link flap)") tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") - r1 = tgen.gears['r1'] - r2 = tgen.gears['r2'] + r1 = tgen.gears["r1"] + r2 = tgen.gears["r2"] # Bring down link on server r9 - logger.info('Bring down link on server r9') + logger.info("Bring down link on server r9") - tgen.net['r9'].cmd('ip link set dev r9-eth1 down') + tgen.net["r9"].cmd("ip link set dev r9-eth1 down") # Check spine router r2 has only one path - logger.info('Check spine router r2 has only one path') + logger.info("Check spine router r2 has only one path") - json_file = '{}/r2/ip-route-3.json'.format(CWD) + json_file = "{}/r2/ip-route-3.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r2, 'show ip route 198.10.1.1/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r2, "show ip route 198.10.1.1/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) - assertmsg = 'JSON output mismatch on spine router r2' + assertmsg = "JSON output mismatch on spine router r2" assert result is None, assertmsg # Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1 - logger.info('Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1') + logger.info( + "Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1" + ) - json_file = '{}/r1/bgp-route-3.json'.format(CWD) + json_file = "{}/r1/bgp-route-3.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r1, "show bgp ipv4 uni 198.10.1.1/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) - assertmsg = 'JSON output mismatch on super-spine router r1' + assertmsg = "JSON output mismatch on super-spine router r1" assert result is None, assertmsg - json_file = '{}/r1/ip-route-2.json'.format(CWD) + json_file = "{}/r1/ip-route-2.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show ip route 198.10.1.1/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r1, "show ip route 198.10.1.1/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) - assertmsg = 'JSON output mismatch on super-spine router r1' + assertmsg = "JSON output mismatch on super-spine router r1" assert result is None, assertmsg # Bring up link on server r9 - logger.info('Bring up link on server r9') + logger.info("Bring up link on server r9") - tgen.net['r9'].cmd('ip link set dev r9-eth1 up') + tgen.net["r9"].cmd("ip link set dev r9-eth1 up") # Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1 - logger.info('Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1') + logger.info( + "Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1" + ) - json_file = '{}/r1/bgp-route-2.json'.format(CWD) + json_file = "{}/r1/bgp-route-2.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r1, "show bgp ipv4 uni 198.10.1.1/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) - assertmsg = 'JSON output mismatch on super-spine router r1' + assertmsg = "JSON output mismatch on super-spine router r1" assert result is None, assertmsg - json_file = '{}/r1/ip-route-1.json'.format(CWD) + json_file = "{}/r1/ip-route-1.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show ip route 198.10.1.1/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r1, "show ip route 198.10.1.1/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) - assertmsg = 'JSON output mismatch on super-spine router r1' + assertmsg = "JSON output mismatch on super-spine router r1" assert result is None, assertmsg + def test_weighted_ecmp_second_anycast_ip(): "Test #5: Test weighted ECMP for a second anycast IP" - logger.info('\nTest #5: Test weighted ECMP for a second anycast IP') + logger.info("\nTest #5: Test weighted ECMP for a second anycast IP") tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") - r1 = tgen.gears['r1'] - r2 = tgen.gears['r2'] + r1 = tgen.gears["r1"] + r2 = tgen.gears["r2"] # Configure anycast IP on additional server r7, r9 and r10 - logger.info('Configure anycast IP on server r7, r9 and r10') + logger.info("Configure anycast IP on server r7, r9 and r10") - tgen.net['r7'].cmd('ip addr add 198.10.1.11/32 dev r7-eth1') - tgen.net['r9'].cmd('ip addr add 198.10.1.11/32 dev r9-eth1') - tgen.net['r10'].cmd('ip addr add 198.10.1.11/32 dev r10-eth1') + tgen.net["r7"].cmd("ip addr add 198.10.1.11/32 dev r7-eth1") + tgen.net["r9"].cmd("ip addr add 198.10.1.11/32 dev r9-eth1") + tgen.net["r10"].cmd("ip addr add 198.10.1.11/32 dev r10-eth1") # Check link-bandwidth and weighted ECMP on super-spine router r1 - logger.info('Check link-bandwidth and weighted ECMP on super-spine router r1') + logger.info("Check link-bandwidth and weighted ECMP on super-spine router r1") - json_file = '{}/r1/bgp-route-4.json'.format(CWD) + json_file = "{}/r1/bgp-route-4.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show bgp ipv4 uni 198.10.1.11/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r1, "show bgp ipv4 uni 198.10.1.11/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) - assertmsg = 'JSON output mismatch on super-spine router r1' + assertmsg = "JSON output mismatch on super-spine router r1" assert result is None, assertmsg - json_file = '{}/r1/ip-route-3.json'.format(CWD) + json_file = "{}/r1/ip-route-3.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show ip route 198.10.1.11/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r1, "show ip route 198.10.1.11/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) - assertmsg = 'JSON output mismatch on super-spine router r1' + assertmsg = "JSON output mismatch on super-spine router r1" assert result is None, assertmsg + def test_paths_with_and_without_linkbw(): "Test #6: Test paths with and without link-bandwidth - receiver should resort to regular ECMP" - logger.info('\nTest #6: Test paths with and without link-bandwidth - receiver should resort to regular ECMP') + logger.info( + "\nTest #6: Test paths with and without link-bandwidth - receiver should resort to regular ECMP" + ) tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") - r1 = tgen.gears['r1'] + r1 = tgen.gears["r1"] # Configure leaf router r6 to not advertise any link-bandwidth - logger.info('Configure leaf router r6 to not advertise any link-bandwidth') + logger.info("Configure leaf router r6 to not advertise any link-bandwidth") - tgen.net['r6'].cmd('vtysh -c \"conf t\" -c \"router bgp 65303\" -c \"address-family ipv4 unicast\" -c \"no neighbor 11.1.3.1 route-map anycast_ip out\"') + tgen.net["r6"].cmd( + 'vtysh -c "conf t" -c "router bgp 65303" -c "address-family ipv4 unicast" -c "no neighbor 11.1.3.1 route-map anycast_ip out"' + ) # Check link-bandwidth change on super-spine router r1 - logger.info('Check link-bandwidth change on super-spine router r1') + logger.info("Check link-bandwidth change on super-spine router r1") - json_file = '{}/r1/bgp-route-5.json'.format(CWD) + json_file = "{}/r1/bgp-route-5.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r1, "show bgp ipv4 uni 198.10.1.1/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) - assertmsg = 'JSON output mismatch on super-spine router r1' + assertmsg = "JSON output mismatch on super-spine router r1" assert result is None, assertmsg # Check super-spine router r1 resorts to regular ECMP - logger.info('Check super-spine router r1 resorts to regular ECMP') + logger.info("Check super-spine router r1 resorts to regular ECMP") - json_file = '{}/r1/ip-route-4.json'.format(CWD) + json_file = "{}/r1/ip-route-4.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show ip route 198.10.1.1/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r1, "show ip route 198.10.1.1/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) - assertmsg = 'JSON output mismatch on super-spine router r1' + assertmsg = "JSON output mismatch on super-spine router r1" assert result is None, assertmsg - json_file = '{}/r1/ip-route-5.json'.format(CWD) + json_file = "{}/r1/ip-route-5.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show ip route 198.10.1.11/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r1, "show ip route 198.10.1.11/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5) - assertmsg = 'JSON output mismatch on super-spine router r1' + assertmsg = "JSON output mismatch on super-spine router r1" assert result is None, assertmsg + def test_linkbw_handling_options(): "Test #7: Test different options for processing link-bandwidth on the receiver" - logger.info('\nTest #7: Test different options for processing link-bandwidth on the receiver') + logger.info( + "\nTest #7: Test different options for processing link-bandwidth on the receiver" + ) tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") - r1 = tgen.gears['r1'] + r1 = tgen.gears["r1"] # Configure super-spine r1 to skip multipaths without link-bandwidth - logger.info('Configure super-spine r1 to skip multipaths without link-bandwidth') + logger.info("Configure super-spine r1 to skip multipaths without link-bandwidth") - tgen.net['r1'].cmd('vtysh -c \"conf t\" -c \"router bgp 65101\" -c \"bgp bestpath bandwidth skip-missing\"') + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65101" -c "bgp bestpath bandwidth skip-missing"' + ) # Check super-spine router r1 resorts to only one path as other path is skipped - logger.info('Check super-spine router r1 resorts to only one path as other path is skipped') + logger.info( + "Check super-spine router r1 resorts to only one path as other path is skipped" + ) - json_file = '{}/r1/ip-route-6.json'.format(CWD) + json_file = "{}/r1/ip-route-6.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show ip route 198.10.1.1/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r1, "show ip route 198.10.1.1/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) - assertmsg = 'JSON output mismatch on super-spine router r1' + assertmsg = "JSON output mismatch on super-spine router r1" assert result is None, assertmsg - json_file = '{}/r1/ip-route-7.json'.format(CWD) + json_file = "{}/r1/ip-route-7.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show ip route 198.10.1.11/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r1, "show ip route 198.10.1.11/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) - assertmsg = 'JSON output mismatch on super-spine router r1' + assertmsg = "JSON output mismatch on super-spine router r1" assert result is None, assertmsg # Configure super-spine r1 to use default-weight for multipaths without link-bandwidth - logger.info('Configure super-spine r1 to use default-weight for multipaths without link-bandwidth') + logger.info( + "Configure super-spine r1 to use default-weight for multipaths without link-bandwidth" + ) - tgen.net['r1'].cmd('vtysh -c \"conf t\" -c \"router bgp 65101\" -c \"bgp bestpath bandwidth default-weight-for-missing\"') + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65101" -c "bgp bestpath bandwidth default-weight-for-missing"' + ) # Check super-spine router r1 uses ECMP with weight 1 for path without link-bandwidth - logger.info('Check super-spine router r1 uses ECMP with weight 1 for path without link-bandwidth') + logger.info( + "Check super-spine router r1 uses ECMP with weight 1 for path without link-bandwidth" + ) - json_file = '{}/r1/ip-route-8.json'.format(CWD) + json_file = "{}/r1/ip-route-8.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show ip route 198.10.1.1/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r1, "show ip route 198.10.1.1/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) - assertmsg = 'JSON output mismatch on super-spine router r1' + assertmsg = "JSON output mismatch on super-spine router r1" assert result is None, assertmsg - json_file = '{}/r1/ip-route-9.json'.format(CWD) + json_file = "{}/r1/ip-route-9.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - r1, 'show ip route 198.10.1.11/32 json', expected) + test_func = partial( + topotest.router_json_cmp, r1, "show ip route 198.10.1.11/32 json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5) - assertmsg = 'JSON output mismatch on super-spine router r1' + assertmsg = "JSON output mismatch on super-spine router r1" assert result is None, assertmsg -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py b/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py index c15b88d371..cf6b7cc53f 100644 --- a/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py +++ b/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py @@ -132,7 +132,7 @@ from lib.common_config import ( create_bgp_community_lists, check_router_status, apply_raw_config, - required_linux_kernel_version + required_linux_kernel_version, ) from lib.topolog import logger @@ -211,7 +211,7 @@ def setup_module(mod): * `mod`: module name """ # Required linux kernel version for this suite to run. - result = required_linux_kernel_version('4.15') + result = required_linux_kernel_version("4.15") if result is not True: pytest.skip("Kernel requirements are not met") @@ -693,10 +693,12 @@ def test_static_routes_associated_to_specific_vrfs_p0(request): ) step( - "Verify that static routes 1.x.x.x/32 and 1::x/128 appear " "in VRF BLUE_A table" + "Verify that static routes 1.x.x.x/32 and 1::x/128 appear " + "in VRF BLUE_A table" ) step( - "Verify that static routes 2.x.x.x/32 and 2::x/128 appear " "in VRF BLUE_B table" + "Verify that static routes 2.x.x.x/32 and 2::x/128 appear " + "in VRF BLUE_B table" ) for addr_type in ADDR_TYPES: diff --git a/tests/topotests/bgp_multi_vrf_topo2/test_bgp_multi_vrf_topo2.py b/tests/topotests/bgp_multi_vrf_topo2/test_bgp_multi_vrf_topo2.py index bb13d54019..cafe758209 100644 --- a/tests/topotests/bgp_multi_vrf_topo2/test_bgp_multi_vrf_topo2.py +++ b/tests/topotests/bgp_multi_vrf_topo2/test_bgp_multi_vrf_topo2.py @@ -78,7 +78,7 @@ from lib.common_config import ( get_frr_ipv6_linklocal, check_router_status, apply_raw_config, - required_linux_kernel_version + required_linux_kernel_version, ) from lib.topolog import logger @@ -143,7 +143,7 @@ def setup_module(mod): * `mod`: module name """ # Required linux kernel version for this suite to run. - result = required_linux_kernel_version('4.15') + result = required_linux_kernel_version("4.15") if result is not True: pytest.skip("Kernel requirements are not met") diff --git a/tests/topotests/bgp_recursive_route_ebgp_multi_hop/test_bgp_recursive_route_ebgp_multi_hop.py b/tests/topotests/bgp_recursive_route_ebgp_multi_hop/test_bgp_recursive_route_ebgp_multi_hop.py index fef6eb71dc..3af944473d 100644 --- a/tests/topotests/bgp_recursive_route_ebgp_multi_hop/test_bgp_recursive_route_ebgp_multi_hop.py +++ b/tests/topotests/bgp_recursive_route_ebgp_multi_hop/test_bgp_recursive_route_ebgp_multi_hop.py @@ -91,7 +91,7 @@ jsonFile = "{}/bgp_recursive_route_ebgp_multi_hop.json".format(CWD) try: with open(jsonFile, "r") as topoJson: topo = json.load(topoJson) -except IOError : +except IOError: logger.info("Could not read file:", jsonFile) # Global variables @@ -284,7 +284,9 @@ def test_recursive_routes_iBGP_peer_p1(request): input_dict_4, next_hop=topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0], ) - assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result) + assert result is True, "Testcase : Failed \n Error : {}".format( + tc_name, result + ) step( "Configure a static routes for next hop IP on R2 via multiple" @@ -317,7 +319,9 @@ def test_recursive_routes_iBGP_peer_p1(request): } } result = create_static_routes(tgen, input_dict_3) - assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result) + assert result is True, "Testcase : Failed \n Error : {}".format( + tc_name, result + ) step("verify if redistributed routes are now installed in FIB of R2") result = verify_rib( @@ -328,7 +332,9 @@ def test_recursive_routes_iBGP_peer_p1(request): next_hop=topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0], protocol="bgp", ) - assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result) + assert result is True, "Testcase : Failed \n Error : {}".format( + tc_name, result + ) step("Delete 1 route from static recursive for the next-hop IP") dut = "r2" @@ -345,7 +351,9 @@ def test_recursive_routes_iBGP_peer_p1(request): } } result = create_static_routes(tgen, input_dict_3) - assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result) + assert result is True, "Testcase : Failed \n Error : {}".format( + tc_name, result + ) step("Verify that redistributed routes are withdrawn from FIB of R2") result = verify_rib( @@ -355,7 +363,7 @@ def test_recursive_routes_iBGP_peer_p1(request): input_dict_4, next_hop=topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0], protocol="bgp", - expected=False + expected=False, ) assert result is not True, "Testcase : Failed \n Error : {}".format( tc_name, result @@ -375,7 +383,9 @@ def test_recursive_routes_iBGP_peer_p1(request): } } result = create_static_routes(tgen, input_dict_3) - assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result) + assert result is True, "Testcase : Failed \n Error : {}".format( + tc_name, result + ) step("Verify that redistributed routes are again installed" "in FIB of R2") result = verify_rib( @@ -386,7 +396,9 @@ def test_recursive_routes_iBGP_peer_p1(request): next_hop=topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0], protocol="bgp", ) - assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result) + assert result is True, "Testcase : Failed \n Error : {}".format( + tc_name, result + ) step("Configure static route with changed next-hop from same subnet") for addr_type in ADDR_TYPES: @@ -410,7 +422,9 @@ def test_recursive_routes_iBGP_peer_p1(request): } } result = create_static_routes(tgen, input_dict_4) - assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result) + assert result is True, "Testcase : Failed \n Error : {}".format( + tc_name, result + ) result = verify_rib(tgen, addr_type, "r1", input_dict_4, protocol="static") assert result is True, "Testcase {} : Failed \n Error : {}".format( @@ -455,7 +469,9 @@ def test_recursive_routes_iBGP_peer_p1(request): } } result = create_static_routes(tgen, input_dict_4) - assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result) + assert result is True, "Testcase : Failed \n Error : {}".format( + tc_name, result + ) result = verify_rib(tgen, addr_type, "r1", input_dict_4, protocol="static") assert result is True, "Testcase {} : Failed \n Error : {}".format( @@ -578,7 +594,7 @@ def test_next_hop_as_self_ip_p1(request): "r2", input_dict_4, next_hop=topo["routers"]["r2"]["links"]["r4"][addr_type].split("/")[0], - expected=False + expected=False, ) assert result is not True, "Testcase : Failed \n Error : {}".format( tc_name, result @@ -614,7 +630,9 @@ def test_next_hop_as_self_ip_p1(request): input_dict_4, next_hop=topo["routers"]["r2"]["links"]["r4"][addr_type].split("/")[0], ) - assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result) + assert result is True, "Testcase : Failed \n Error : {}".format( + tc_name, result + ) step("No shutdown interface on R2 which was shut in previous step") intf_r2_r4 = topo["routers"]["r2"]["links"]["r4"]["interface"] @@ -644,14 +662,16 @@ def test_next_hop_as_self_ip_p1(request): input_dict_4, next_hop=topo["routers"]["r2"]["links"]["r4"][addr_type].split("/")[0], ) - assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result) + assert result is True, "Testcase : Failed \n Error : {}".format( + tc_name, result + ) result = verify_rib( tgen, addr_type, "r2", input_dict_4, next_hop=topo["routers"]["r2"]["links"]["r4"][addr_type].split("/")[0], - expected=False + expected=False, ) assert result is not True, "Testcase : Failed \n Error : {}".format( tc_name, result @@ -907,7 +927,9 @@ def test_next_hop_with_recursive_lookup_p1(request): result = verify_bgp_convergence_from_running_config(tgen, expected=False) assert ( result is not True - ), "Testcase {} : Failed \n" "BGP is converged \n Error : {}".format(tc_name, result) + ), "Testcase {} : Failed \n" "BGP is converged \n Error : {}".format( + tc_name, result + ) logger.info("Expected behaviour: {}".format(result)) for addr_type in ADDR_TYPES: @@ -1018,7 +1040,7 @@ def test_next_hop_with_recursive_lookup_p1(request): input_dict, protocol="bgp", next_hop=next_hop, - expected=False + expected=False, ) assert result is not True, ( "Testcase {} : Failed \n " @@ -1083,7 +1105,9 @@ def test_next_hop_with_recursive_lookup_p1(request): result = verify_bgp_convergence_from_running_config(tgen, expected=False) assert ( result is not True - ), "Testcase {} : Failed \n" "BGP is converged \n Error : {}".format(tc_name, result) + ), "Testcase {} : Failed \n" "BGP is converged \n Error : {}".format( + tc_name, result + ) logger.info("Expected behaviour: {}".format(result)) for addr_type in ADDR_TYPES: @@ -1099,7 +1123,7 @@ def test_next_hop_with_recursive_lookup_p1(request): input_dict, protocol="bgp", next_hop=next_hop, - expected=False + expected=False, ) assert result is not True, ( "Testcase {} : Failed \n " @@ -1138,7 +1162,9 @@ def test_next_hop_with_recursive_lookup_p1(request): result = verify_bgp_convergence_from_running_config(tgen, expected=False) assert ( result is not True - ), "Testcase {} : Failed \n" "BGP is converged \n Error : {}".format(tc_name, result) + ), "Testcase {} : Failed \n" "BGP is converged \n Error : {}".format( + tc_name, result + ) logger.info("Expected behaviour: {}".format(result)) for addr_type in ADDR_TYPES: @@ -1154,7 +1180,7 @@ def test_next_hop_with_recursive_lookup_p1(request): input_dict, protocol="bgp", next_hop=next_hop, - expected=False + expected=False, ) assert result is not True, ( "Testcase {} : Failed \n " @@ -1237,7 +1263,9 @@ def test_BGP_path_attributes_default_values_p1(request): topo["routers"]["r3"]["links"]["r4"][addr_type].split("/")[0], ], ) - assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result) + assert result is True, "Testcase : Failed \n Error : {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: input_dict_4 = { @@ -1256,7 +1284,9 @@ def test_BGP_path_attributes_default_values_p1(request): rmap_name="rmap_pf", input_dict=input_dict_4, ) - assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result) + assert result is True, "Testcase : Failed \n Error : {}".format( + tc_name, result + ) step( "Configure a route-map to set below attribute value as 500" @@ -1358,7 +1388,9 @@ def test_BGP_path_attributes_default_values_p1(request): rmap_name="rmap_pf", input_dict=input_dict_4, ) - assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result) + assert result is True, "Testcase : Failed \n Error : {}".format( + tc_name, result + ) step("Remove the route-map from R4") input_dict_5 = { @@ -1432,7 +1464,9 @@ def test_BGP_path_attributes_default_values_p1(request): input_dict=input_dict_4, nexthop=None, ) - assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result) + assert result is True, "Testcase : Failed \n Error : {}".format( + tc_name, result + ) write_test_footer(tc_name) @@ -1670,7 +1704,7 @@ def test_BGP_peering_bw_loopback_and_physical_p1(request): input_dict_1, protocol="static", next_hop=topo["routers"]["r1"]["links"]["r3"][addr_type].split("/")[0], - expected=False + expected=False, ) assert result is not True, "Testcase {} : Failed \n Error : {}".format( tc_name, result @@ -1801,7 +1835,9 @@ def test_BGP_active_standby_preemption_and_ecmp_p1(request): topo["routers"]["r3"]["links"]["r4"][addr_type].split("/")[0], ], ) - assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result) + assert result is True, "Testcase : Failed \n Error : {}".format( + tc_name, result + ) step( "Configure a route-map to set as-path attribute and" @@ -2037,7 +2073,7 @@ def test_BGP_active_standby_preemption_and_ecmp_p1(request): topo["routers"]["r2"]["links"]["r4"][addr_type].split("/")[0], topo["routers"]["r3"]["links"]["r4"][addr_type].split("/")[0], ], - expected=False + expected=False, ) assert result is not True, "Testcase {} : Failed \n Error : {}".format( tc_name, result @@ -2084,7 +2120,7 @@ def test_BGP_active_standby_preemption_and_ecmp_p1(request): topo["routers"]["r2"]["links"]["r4"][addr_type].split("/")[0], topo["routers"]["r3"]["links"]["r4"][addr_type].split("/")[0], ], - expected=False + expected=False, ) assert result is not True, "Testcase {} : Failed \n Error : {}".format( tc_name, result diff --git a/tests/topotests/bgp_update_delay/test_bgp_update_delay.py b/tests/topotests/bgp_update_delay/test_bgp_update_delay.py index 4de7184c8e..71bd58bf73 100644 --- a/tests/topotests/bgp_update_delay/test_bgp_update_delay.py +++ b/tests/topotests/bgp_update_delay/test_bgp_update_delay.py @@ -149,22 +149,21 @@ def test_bgp_update_delay(): def _bgp_check_update_delay_in_progress(router): output = json.loads(router.vtysh_cmd("show ip bgp sum json")) - expected = {"ipv4Unicast": {"updateDelayInProgress":True}} + expected = {"ipv4Unicast": {"updateDelayInProgress": True}} return topotest.json_cmp(output, expected) def _bgp_check_route_install(router): output = json.loads(router.vtysh_cmd("show ip route 172.16.253.254/32 json")) - expected = {"172.16.253.254/32": [ {"protocol": "bgp"}]} + expected = {"172.16.253.254/32": [{"protocol": "bgp"}]} return topotest.json_cmp(output, expected) def _bgp_check_update_delay_and_wait(router): output = json.loads(router.vtysh_cmd("show ip bgp sum json")) expected = { - "ipv4Unicast": { - "updateDelayLimit": 20, - "updateDelayEstablishWait": 10}} + "ipv4Unicast": {"updateDelayLimit": 20, "updateDelayEstablishWait": 10} + } return topotest.json_cmp(output, expected) @@ -177,14 +176,11 @@ def test_bgp_update_delay(): def _bgp_check_vrf_update_delay_and_wait(router): output = json.loads(router.vtysh_cmd("show ip bgp vrf vrf1 sum json")) expected = { - "ipv4Unicast": { - "updateDelayLimit": 20, - "updateDelayEstablishWait": 10}} - + "ipv4Unicast": {"updateDelayLimit": 20, "updateDelayEstablishWait": 10} + } return topotest.json_cmp(output, expected) - # Check r2 initial convergence in default table test_func = functools.partial(_bgp_converge, router2) success, result = topotest.run_and_expect(test_func, None, count=30, wait=1) @@ -198,7 +194,7 @@ def test_bgp_update_delay(): router bgp 65002 update-delay 20 """ - ) + ) # Shutdown peering on r1 toward r2 so that delay timers can be exercised router1.vtysh_cmd( @@ -207,7 +203,7 @@ def test_bgp_update_delay(): router bgp 65001 neighbor 192.168.255.1 shut """ - ) + ) # Clear bgp neighbors on r2 and then check for the 'in progress' indicator router2.vtysh_cmd("""clear ip bgp *""") @@ -215,13 +211,17 @@ def test_bgp_update_delay(): test_func = functools.partial(_bgp_check_update_delay_in_progress, router2) success, result = topotest.run_and_expect(test_func, None, count=30, wait=1) - assert result is None, 'Failed to set update-delay max-delay timer "{}"'.format(router2) + assert result is None, 'Failed to set update-delay max-delay timer "{}"'.format( + router2 + ) # Check that r2 only installs route learned from r4 after the max-delay timer expires test_func = functools.partial(_bgp_check_route_install, router2) success, result = topotest.run_and_expect(test_func, None, count=30, wait=1) - assert result is None, 'Failed to install route after update-delay "{}"'.format(router2) + assert result is None, 'Failed to install route after update-delay "{}"'.format( + router2 + ) # Define update-delay with max-delay and estabish-wait and check json output showing set router2.vtysh_cmd( @@ -230,12 +230,14 @@ def test_bgp_update_delay(): router bgp 65002 update-delay 20 10 """ - ) + ) test_func = functools.partial(_bgp_check_update_delay_and_wait, router2) success, result = topotest.run_and_expect(test_func, None, count=30, wait=1) - assert result is None, 'Failed to set max-delay and establish-weight timers in "{}"'.format(router2) + assert ( + result is None + ), 'Failed to set max-delay and establish-weight timers in "{}"'.format(router2) # Define update-delay with max-delay and estabish-wait and check json output showing set router2.vtysh_cmd("""clear ip bgp *""") @@ -243,7 +245,11 @@ def test_bgp_update_delay(): test_func = functools.partial(_bgp_check_route_install, router3) success, result = topotest.run_and_expect(test_func, None, count=30, wait=1) - assert result is None, 'Failed to installed advertised route after establish-wait timer espired "{}"'.format(router2) + assert ( + result is None + ), 'Failed to installed advertised route after establish-wait timer espired "{}"'.format( + router2 + ) # Remove update-delay timer on r2 to verify that it goes back to normal behavior router2.vtysh_cmd( @@ -252,7 +258,7 @@ def test_bgp_update_delay(): router bgp 65002 no update-delay """ - ) + ) # Clear neighbors on r2 and check that route install time on r2 does not delay router2.vtysh_cmd("""clear ip bgp *""") @@ -260,7 +266,9 @@ def test_bgp_update_delay(): test_func = functools.partial(_bgp_check_route_install, router2) success, result = topotest.run_and_expect(test_func, None, count=30, wait=1) - assert result is None, 'Failed to remove update-delay delay timing "{}"'.format(router2) + assert result is None, 'Failed to remove update-delay delay timing "{}"'.format( + router2 + ) # Define global bgp update-delay with max-delay and establish-wait on r2 router2.vtysh_cmd( @@ -268,13 +276,15 @@ def test_bgp_update_delay(): configure terminal bgp update-delay 20 10 """ - ) + ) # Check that r2 default instance and vrf1 have the max-delay and establish set test_func = functools.partial(_bgp_check_update_delay_and_wait, router2) success, result = topotest.run_and_expect(test_func, None, count=30, wait=1) - assert result is None, 'Failed to set update-delay in default instance "{}"'.format(router2) + assert result is None, 'Failed to set update-delay in default instance "{}"'.format( + router2 + ) test_func = functools.partial(_bgp_check_vrf_update_delay_and_wait, router2) success, result = topotest.run_and_expect(test_func, None, count=30, wait=1) @@ -287,7 +297,11 @@ def test_bgp_update_delay(): test_func = functools.partial(_bgp_check_route_install, router3) success, result = topotest.run_and_expect(test_func, None, count=30, wait=1) - assert result is None, 'Failed to installed advertised route after establish-wait timer espired "{}"'.format(router2) + assert ( + result is None + ), 'Failed to installed advertised route after establish-wait timer espired "{}"'.format( + router2 + ) if __name__ == "__main__": diff --git a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py index 1947548b3e..63db393178 100644 --- a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py +++ b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py @@ -40,8 +40,8 @@ import platform # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) -sys.path.append(os.path.join(CWD, '../lib/')) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) # Required to instantiate the topology builder class. @@ -52,20 +52,32 @@ from lib.topotest import version_cmp from mininet.topo import Topo from lib.common_config import ( - start_topology, write_test_header, check_address_types, - write_test_footer, reset_config_on_routers, - verify_rib, step, create_route_maps, - shutdown_bringup_interface, create_static_routes, - create_prefix_lists, create_bgp_community_lists, + start_topology, + write_test_header, + check_address_types, + write_test_footer, + reset_config_on_routers, + verify_rib, + step, + create_route_maps, + shutdown_bringup_interface, + create_static_routes, + create_prefix_lists, + create_bgp_community_lists, create_interface_in_kernel, - check_router_status, verify_cli_json, - get_frr_ipv6_linklocal, verify_fib_routes + check_router_status, + verify_cli_json, + get_frr_ipv6_linklocal, + verify_fib_routes, ) from lib.topolog import logger from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, - clear_bgp, verify_bgp_community, verify_bgp_rib + verify_bgp_convergence, + create_router_bgp, + clear_bgp, + verify_bgp_community, + verify_bgp_rib, ) from lib.topojson import build_topo_from_json, build_config_from_json @@ -99,10 +111,18 @@ NETWORK4_3 = {"ipv4": "50.50.50.5/32", "ipv6": "50:50::5/128"} NETWORK4_4 = {"ipv4": "50.50.50.50/32", "ipv6": "50:50::50/128"} NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"} -LOOPBACK_1 = {"ipv4": "10.0.0.7/24", "ipv6": "fd00:0:0:1::7/64", - "ipv4_mask": "255.255.255.0", "ipv6_mask": None} -LOOPBACK_2 = {"ipv4": "10.0.0.16/24", "ipv6": "fd00:0:0:3::5/64", - "ipv4_mask": "255.255.255.0", "ipv6_mask": None} +LOOPBACK_1 = { + "ipv4": "10.0.0.7/24", + "ipv6": "fd00:0:0:1::7/64", + "ipv4_mask": "255.255.255.0", + "ipv6_mask": None, +} +LOOPBACK_2 = { + "ipv4": "10.0.0.16/24", + "ipv6": "fd00:0:0:3::5/64", + "ipv4_mask": "255.255.255.0", + "ipv6_mask": None, +} PREFERRED_NEXT_HOP = "global" @@ -144,10 +164,11 @@ def setup_module(mod): start_topology(tgen) # Run these tests for kernel version 4.19 or above - if version_cmp(platform.release(), '4.19') < 0: - error_msg = ('BGP vrf dynamic route leak tests will not run ' - '(have kernel "{}", but it requires >= 4.19)'.\ - format(platform.release())) + if version_cmp(platform.release(), "4.19") < 0: + error_msg = ( + "BGP vrf dynamic route leak tests will not run " + '(have kernel "{}", but it requires >= 4.19)'.format(platform.release()) + ) pytest.skip(error_msg) # Creating configuration from JSON @@ -158,8 +179,9 @@ def setup_module(mod): ADDR_TYPES = check_address_types() BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) - assert BGP_CONVERGENCE is True, "setup_module : Failed \n Error: {}". \ - format(BGP_CONVERGENCE) + assert BGP_CONVERGENCE is True, "setup_module : Failed \n Error: {}".format( + BGP_CONVERGENCE + ) logger.info("Running setup_module() done") @@ -174,16 +196,19 @@ def teardown_module(): # Stop toplogy and Remove tmp files tgen.stop_topology() - logger.info("Testsuite end time: {}". - format(time.asctime(time.localtime(time.time())))) + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) logger.info("=" * 40) + ##################################################### # # Local APIs # ##################################################### + def disable_route_map_to_prefer_global_next_hop(tgen, topo): """ This API is to remove prefer global route-map applied on neighbors @@ -202,8 +227,7 @@ def disable_route_map_to_prefer_global_next_hop(tgen, topo): logger.info("Remove prefer-global rmap applied on neighbors") input_dict = { "r1": { - "bgp": - [ + "bgp": [ { "local_as": "100", "vrf": "ISR", @@ -214,18 +238,20 @@ def disable_route_map_to_prefer_global_next_hop(tgen, topo): "r2": { "dest_link": { "r1-link1": { - "route_maps": [{ - "name": "rmap_global", - "direction": "in", - "delete": True - }] + "route_maps": [ + { + "name": "rmap_global", + "direction": "in", + "delete": True, + } + ] } } } } } } - } + }, }, { "local_as": "100", @@ -236,18 +262,20 @@ def disable_route_map_to_prefer_global_next_hop(tgen, topo): "r3": { "dest_link": { "r1-link1": { - "route_maps": [{ - "name": "rmap_global", - "direction": "in", - "delete": True - }] + "route_maps": [ + { + "name": "rmap_global", + "direction": "in", + "delete": True, + } + ] } } } } } } - } + }, }, { "local_as": "100", @@ -258,24 +286,25 @@ def disable_route_map_to_prefer_global_next_hop(tgen, topo): "r4": { "dest_link": { "r1-link1": { - "route_maps": [{ - "name": "rmap_global", - "direction": "in", - "delete": True - }] + "route_maps": [ + { + "name": "rmap_global", + "direction": "in", + "delete": True, + } + ] } } } } } } - } - } + }, + }, ] }, "r2": { - "bgp": - [ + "bgp": [ { "local_as": "100", "vrf": "ISR", @@ -286,18 +315,20 @@ def disable_route_map_to_prefer_global_next_hop(tgen, topo): "r1": { "dest_link": { "r2-link1": { - "route_maps": [{ - "name": "rmap_global", - "direction": "in", - "delete": True - }] + "route_maps": [ + { + "name": "rmap_global", + "direction": "in", + "delete": True, + } + ] } } } } } } - } + }, }, { "local_as": "100", @@ -308,18 +339,20 @@ def disable_route_map_to_prefer_global_next_hop(tgen, topo): "r3": { "dest_link": { "r2-link1": { - "route_maps": [{ - "name": "rmap_global", - "direction": "in", - "delete": True - }] + "route_maps": [ + { + "name": "rmap_global", + "direction": "in", + "delete": True, + } + ] } } } } } } - } + }, }, { "local_as": "100", @@ -330,24 +363,25 @@ def disable_route_map_to_prefer_global_next_hop(tgen, topo): "r4": { "dest_link": { "r2-link1": { - "route_maps": [{ - "name": "rmap_global", - "direction": "in", - "delete": True - }] + "route_maps": [ + { + "name": "rmap_global", + "direction": "in", + "delete": True, + } + ] } } } } } } - } - } + }, + }, ] }, "r3": { - "bgp": - [ + "bgp": [ { "local_as": "300", "address_family": { @@ -357,18 +391,20 @@ def disable_route_map_to_prefer_global_next_hop(tgen, topo): "r1": { "dest_link": { "r3-link1": { - "route_maps": [{ - "name": "rmap_global", - "direction": "in", - "delete": True - }] + "route_maps": [ + { + "name": "rmap_global", + "direction": "in", + "delete": True, + } + ] } } } } } } - } + }, }, { "local_as": "300", @@ -379,24 +415,25 @@ def disable_route_map_to_prefer_global_next_hop(tgen, topo): "r2": { "dest_link": { "r3-link1": { - "route_maps": [{ - "name": "rmap_global", - "direction": "in", - "delete": True - }] + "route_maps": [ + { + "name": "rmap_global", + "direction": "in", + "delete": True, + } + ] } } } } } } - } - } + }, + }, ] }, "r4": { - "bgp": - [ + "bgp": [ { "local_as": "400", "address_family": { @@ -406,18 +443,20 @@ def disable_route_map_to_prefer_global_next_hop(tgen, topo): "r1": { "dest_link": { "r4-link1": { - "route_maps": [{ - "name": "rmap_global", - "direction": "in", - "delete": True - }] + "route_maps": [ + { + "name": "rmap_global", + "direction": "in", + "delete": True, + } + ] } } } } } } - } + }, }, { "local_as": "400", @@ -428,26 +467,27 @@ def disable_route_map_to_prefer_global_next_hop(tgen, topo): "r2": { "dest_link": { "r4-link1": { - "route_maps": [{ - "name": "rmap_global", - "direction": "in", - "delete": True - }] + "route_maps": [ + { + "name": "rmap_global", + "direction": "in", + "delete": True, + } + ] } } } } } } - } - } + }, + }, ] - } + }, } result = create_router_bgp(tgen, topo, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) return True @@ -458,6 +498,7 @@ def disable_route_map_to_prefer_global_next_hop(tgen, topo): # ##################################################### + def test_dynamic_imported_routes_advertised_to_iBGP_peer_p0(request): """ TC5_FUNC_5: @@ -475,10 +516,11 @@ def test_dynamic_imported_routes_advertised_to_iBGP_peer_p0(request): for addr_type in ADDR_TYPES: - step("Redistribute configured static routes into BGP process" - " on R1 and R3/R4") + step( + "Redistribute configured static routes into BGP process" " on R1 and R3/R4" + ) - input_dict_1={} + input_dict_1 = {} DUT = ["r1", "r3", "r4"] VRFS = ["default", "default", "default"] AS_NUM = [100, 300, 400] @@ -493,47 +535,48 @@ def test_dynamic_imported_routes_advertised_to_iBGP_peer_p0(request): "vrf": vrf, "address_family": { addr_type: { - "unicast": { - "redistribute": [{ - "redist_type": "static" - }] - } + "unicast": {"redistribute": [{"redist_type": "static"}]} } - } - }) + }, + } + ) result = create_router_bgp(tgen, topo, input_dict_1) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Verify that R1 receives BGP routes from R3 and R4 in " - "vrf default.") + step("Verify that R1 receives BGP routes from R3 and R4 in " "vrf default.") input_routes_r3 = { "r3": { - "static_routes": [{ - "network": [ - NETWORK3_1[addr_type], \ - NETWORK3_2[addr_type], \ - NETWORK3_3[addr_type], \ - NETWORK3_4[addr_type] - ] - }] + "static_routes": [ + { + "network": [ + NETWORK3_1[addr_type], + NETWORK3_2[addr_type], + NETWORK3_3[addr_type], + NETWORK3_4[addr_type], + ] + } + ] } } input_routes_r4 = { "r4": { - "static_routes": [{ - "network": [ - NETWORK4_1[addr_type], \ - NETWORK4_2[addr_type], \ - NETWORK4_3[addr_type], \ - NETWORK4_4[addr_type] - ] - }] + "static_routes": [ + { + "network": [ + NETWORK4_1[addr_type], + NETWORK4_2[addr_type], + NETWORK4_3[addr_type], + NETWORK4_4[addr_type], + ] + } + ] } } @@ -542,20 +585,20 @@ def test_dynamic_imported_routes_advertised_to_iBGP_peer_p0(request): for dut, routes in zip(DUT, INPUT_DICT): result = verify_bgp_rib(tgen, addr_type, dut, routes) - assert result is True, \ - "Testcase {} : Failed \n Error {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) result = verify_fib_routes(tgen, addr_type, dut, routes) - assert result is True, \ - "Testcase {} : Failed \n Error {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: step("Import from default vrf into vrf ISR on R1") - input_dict_isr={} + input_dict_isr = {} DUT = ["r1", "r2"] VRFS = ["ISR", "ISR"] AS_NUM = [100, 100] @@ -569,50 +612,52 @@ def test_dynamic_imported_routes_advertised_to_iBGP_peer_p0(request): "local_as": as_num, "vrf": vrf, "address_family": { - addr_type: { - "unicast": { - "import": { - "vrf": "default" - } - } - } - } - }) + addr_type: {"unicast": {"import": {"vrf": "default"}}} + }, + } + ) result = create_router_bgp(tgen, topo, input_dict_isr) - assert result is True, "Testcase {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Verify that default vrf's imported routes are installed " - "in RIB/FIB of vrf ISR on R1:") + step( + "Verify that default vrf's imported routes are installed " + "in RIB/FIB of vrf ISR on R1:" + ) input_routes_r3 = { "r3": { - "static_routes": [{ - "network": [ - NETWORK3_1[addr_type], \ - NETWORK3_2[addr_type], \ - NETWORK3_3[addr_type], \ - NETWORK3_4[addr_type] - ], - "vrf": "ISR" - }] + "static_routes": [ + { + "network": [ + NETWORK3_1[addr_type], + NETWORK3_2[addr_type], + NETWORK3_3[addr_type], + NETWORK3_4[addr_type], + ], + "vrf": "ISR", + } + ] } } input_routes_r4 = { "r4": { - "static_routes": [{ - "network": [ - NETWORK4_1[addr_type], \ - NETWORK4_2[addr_type], \ - NETWORK4_3[addr_type], \ - NETWORK4_4[addr_type] - ], - "vrf": "ISR" - }] + "static_routes": [ + { + "network": [ + NETWORK4_1[addr_type], + NETWORK4_2[addr_type], + NETWORK4_3[addr_type], + NETWORK4_4[addr_type], + ], + "vrf": "ISR", + } + ] } } @@ -620,87 +665,101 @@ def test_dynamic_imported_routes_advertised_to_iBGP_peer_p0(request): for routes in INPUT_DICT_VRF: result = verify_bgp_rib(tgen, addr_type, "r1", routes) - assert result is True, \ - "Testcase {} : Failed \n Error {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) - result = verify_fib_routes(tgen, addr_type, "r1", routes) - assert result is True, \ - "Testcase {} : Failed \n Error {}". \ - format(tc_name, result) + result = verify_fib_routes(tgen, addr_type, "r1", routes) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) intf_r2_r1 = topo["routers"]["r2"]["links"]["r1-link1"] for addr_type in ADDR_TYPES: - step("Create a loopback10 interface on R1 with below IP address and " - "associate with vrf ISR:") + step( + "Create a loopback10 interface on R1 with below IP address and " + "associate with vrf ISR:" + ) - create_interface_in_kernel(tgen, "r1", "loopback2", - LOOPBACK_2[addr_type], - "ISR", - LOOPBACK_2["{}_mask".\ - format(addr_type)]) + create_interface_in_kernel( + tgen, + "r1", + "loopback2", + LOOPBACK_2[addr_type], + "ISR", + LOOPBACK_2["{}_mask".format(addr_type)], + ) for addr_type in ADDR_TYPES: - step("On router R1 Change the next-hop of static routes in vrf " - "ISR to LOOPBACK_1") + step( + "On router R1 Change the next-hop of static routes in vrf " + "ISR to LOOPBACK_1" + ) - input_routes_r1= { + input_routes_r1 = { "r1": { - "static_routes":[ + "static_routes": [ { "network": [NETWORK1_3[addr_type], NETWORK1_4[addr_type]], - "next_hop":"Null0", - "delete": True + "next_hop": "Null0", + "delete": True, } ] } } result = create_static_routes(tgen, input_routes_r1) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) - input_routes_r1= { + input_routes_r1 = { "r1": { - "static_routes":[ + "static_routes": [ { "network": [NETWORK1_3[addr_type], NETWORK1_4[addr_type]], - "next_hop": (intf_r2_r1[addr_type]).split("/")[0] + "next_hop": (intf_r2_r1[addr_type]).split("/")[0], } ] } } result = create_static_routes(tgen, input_routes_r1) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Verify that, though R1 originating BGP routes with next-hop" + step( + "Verify that, though R1 originating BGP routes with next-hop" " 24.1.1.2/24::1:2, which is local to R2(but in default vrf)" - ", R2 must receives and install all routes from R1 in vrf ISR.") - step("Verify on R2, that it now rejects 10.10.10.x routes originated " - "from R1. As next-hop IP is local to R2's vrf ISR.") + ", R2 must receives and install all routes from R1 in vrf ISR." + ) + step( + "Verify on R2, that it now rejects 10.10.10.x routes originated " + "from R1. As next-hop IP is local to R2's vrf ISR." + ) - input_routes_r1= { + input_routes_r1 = { "r1": { - "static_routes":[ + "static_routes": [ { "network": [NETWORK1_3[addr_type], NETWORK1_4[addr_type]], - "vrf": "ISR" + "vrf": "ISR", } ] } } - result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, - expected=False) - assert result is not True, ( - "Testcase {} : Failed \n Routes are still present \n Error {}". \ - format(tc_name, result)) + result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, expected=False) + assert ( + result is not True + ), "Testcase {} : Failed \n Routes are still present \n Error {}".format( + tc_name, result + ) write_test_footer(tc_name) @@ -722,71 +781,77 @@ def test_dynamic_imported_matching_prefix_based_on_community_list_p0(request): for addr_type in ADDR_TYPES: - step("Configure route-map to set community attribute for a specific" - "prefix on R1 in vrf ISR") + step( + "Configure route-map to set community attribute for a specific" + "prefix on R1 in vrf ISR" + ) input_dict_pf = { "r1": { "prefix_lists": { addr_type: { - "pflist_ABC_{}".format(addr_type): [{ - "seqid": 10, - "network": NETWORK1_1[addr_type], - "action": "permit" - }] + "pflist_ABC_{}".format(addr_type): [ + { + "seqid": 10, + "network": NETWORK1_1[addr_type], + "action": "permit", + } + ] } } } } result = create_prefix_lists(tgen, input_dict_pf) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_dict_cl = { "r1": { "bgp_community_lists": [ - { - "community_type": "expanded", - "action": "permit", - "name": "COMM", - "value": "100:100" + { + "community_type": "expanded", + "action": "permit", + "name": "COMM", + "value": "100:100", } ] } } result = create_bgp_community_lists(tgen, input_dict_cl) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: input_dict_rm = { "r1": { "route_maps": { - "rmap_XYZ_{}".format(addr_type): [{ - "action": "permit", - "match": { - addr_type: { - "prefix_lists": - "pflist_ABC_{}".format(addr_type) - } - }, - "set": { - "community": {"num": "100:100"} + "rmap_XYZ_{}".format(addr_type): [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pflist_ABC_{}".format(addr_type) + } + }, + "set": {"community": {"num": "100:100"}}, } - }] + ] } } } result = create_route_maps(tgen, input_dict_rm) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Apply this route-map on R1 to vrf ISR while redistributing the" - " prefixes into BGP") + step( + "Apply this route-map on R1 to vrf ISR while redistributing the" + " prefixes into BGP" + ) - input_dict_1={} + input_dict_1 = {} DUT = ["r1"] VRFS = ["ISR"] AS_NUM = [100] @@ -802,53 +867,58 @@ def test_dynamic_imported_matching_prefix_based_on_community_list_p0(request): "address_family": { addr_type: { "unicast": { - "redistribute": [{ - "redist_type": "static", + "redistribute": [ + { + "redist_type": "static", "attribute": { - "route-map" : "rmap_XYZ_{}".\ - format(addr_type) - } + "route-map": "rmap_XYZ_{}".format(addr_type) + }, } ] } } - } - }) + }, + } + ) result = create_router_bgp(tgen, topo, input_dict_1) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Configure another route-map for filtering the prefixes based on" - " community attribute while importing into default vrf") + step( + "Configure another route-map for filtering the prefixes based on" + " community attribute while importing into default vrf" + ) input_dict_rm = { "r1": { "route_maps": { - "rmap_IMP_{}".format(addr_type): [{ - "action": "permit", - "match": { - "community_list": {"id": "COMM"} - }, - "set": { - "community": {"num": "none"} + "rmap_IMP_{}".format(addr_type): [ + { + "action": "permit", + "match": {"community_list": {"id": "COMM"}}, + "set": {"community": {"num": "none"}}, } - }] + ] } } } result = create_route_maps(tgen, input_dict_rm) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Apply the route-map while Importing vrf ISR's prefixes into " - "default vrf on router R1:") + step( + "Apply the route-map while Importing vrf ISR's prefixes into " + "default vrf on router R1:" + ) - input_dict_isr={} + input_dict_isr = {} DUT = ["r1"] VRFS = ["default"] AS_NUM = [100] @@ -862,15 +932,10 @@ def test_dynamic_imported_matching_prefix_based_on_community_list_p0(request): "local_as": as_num, "vrf": vrf, "address_family": { - addr_type: { - "unicast": { - "import": { - "vrf": "ISR" - } - } - } - } - }) + addr_type: {"unicast": {"import": {"vrf": "ISR"}}} + }, + } + ) temp[dut]["bgp"].append( { @@ -884,50 +949,57 @@ def test_dynamic_imported_matching_prefix_based_on_community_list_p0(request): } } } - } - }) + }, + } + ) result = create_router_bgp(tgen, topo, input_dict_isr) - assert result is True, "Testcase {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Verify on R1 that only prefixes with community value 100:100" + step( + "Verify on R1 that only prefixes with community value 100:100" "in vrf ISR are imported to vrf default. While importing, the" - " community value has been stripped off:") + " community value has been stripped off:" + ) input_routes_r1 = { "r1": { - "static_routes": [{ - "network": [ - NETWORK1_1[addr_type] - ], - "vrf": "default" - }] + "static_routes": [ + {"network": [NETWORK1_1[addr_type]], "vrf": "default"} + ] } } result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1) - assert result is True, \ - "Testcase {} : Failed \n Error {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) - input_dict_comm = { - "community": "100:100" - } + input_dict_comm = {"community": "100:100"} - result = verify_bgp_community(tgen, addr_type, dut, [NETWORK1_1[addr_type]], - input_dict_comm, expected=False) - assert result is not True, ( - "Testcase {} : Failed \n Error: Commnunity is not stipped off, {}".format( - tc_name, result)) + result = verify_bgp_community( + tgen, + addr_type, + dut, + [NETWORK1_1[addr_type]], + input_dict_comm, + expected=False, + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Error: Commnunity is not stipped off, {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: step("Remove/re-add route-map XYZ from redistribution.") - input_dict_1={} + input_dict_1 = {} DUT = ["r1"] VRFS = ["ISR"] AS_NUM = [100] @@ -943,49 +1015,52 @@ def test_dynamic_imported_matching_prefix_based_on_community_list_p0(request): "address_family": { addr_type: { "unicast": { - "redistribute": [{ - "redist_type": "static", - "attribute": { - "route-map" : "rmap_XYZ_{}".\ - format(addr_type) - }, - "delete": True - }] + "redistribute": [ + { + "redist_type": "static", + "attribute": { + "route-map": "rmap_XYZ_{}".format(addr_type) + }, + "delete": True, + } + ] } } - } - }) + }, + } + ) result = create_router_bgp(tgen, topo, input_dict_1) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Verify that all the routes disappear from vrf default when " + step( + "Verify that all the routes disappear from vrf default when " "route-map is removed from redistribution, and appear again " - "when route-map is re-added to redistribution in vrf ISR.") + "when route-map is re-added to redistribution in vrf ISR." + ) input_routes_r1 = { "r1": { - "static_routes": [{ - "network": [ - NETWORK1_1[addr_type] - ], - "vrf": "default" - }] + "static_routes": [ + {"network": [NETWORK1_1[addr_type]], "vrf": "default"} + ] } } - result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, - expected=False) - assert result is not True, ( - "Testcase {} : Failed \n Error : Routes are still present \n {}".\ - format(tc_name, result)) + result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, expected=False) + assert ( + result is not True + ), "Testcase {} : Failed \n Error : Routes are still present \n {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - input_dict_1={} + input_dict_1 = {} DUT = ["r1"] VRFS = ["ISR"] AS_NUM = [100] @@ -1001,45 +1076,45 @@ def test_dynamic_imported_matching_prefix_based_on_community_list_p0(request): "address_family": { addr_type: { "unicast": { - "redistribute": [{ - "redist_type": "static", - "attribute": { - "route-map" : "rmap_XYZ_{}".\ - format(addr_type) + "redistribute": [ + { + "redist_type": "static", + "attribute": { + "route-map": "rmap_XYZ_{}".format(addr_type) + }, } - }] + ] } } - } - }) + }, + } + ) result = create_router_bgp(tgen, topo, input_dict_1) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: input_routes_r1 = { "r1": { - "static_routes": [{ - "network": [ - NETWORK1_1[addr_type] - ], - "vrf": "default" - }] + "static_routes": [ + {"network": [NETWORK1_1[addr_type]], "vrf": "default"} + ] } } result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1) - assert result is True, \ - "Testcase {} : Failed \n Error {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: step("Remove/re-add route-map IMP form import statement.") - input_dict_isr={} + input_dict_isr = {} DUT = ["r1"] VRFS = ["default"] AS_NUM = [100] @@ -1053,15 +1128,10 @@ def test_dynamic_imported_matching_prefix_based_on_community_list_p0(request): "local_as": as_num, "vrf": vrf, "address_family": { - addr_type: { - "unicast": { - "import": { - "vrf": "ISR" - } - } - } - } - }) + addr_type: {"unicast": {"import": {"vrf": "ISR"}}} + }, + } + ) temp[dut]["bgp"].append( { @@ -1072,43 +1142,44 @@ def test_dynamic_imported_matching_prefix_based_on_community_list_p0(request): "unicast": { "import": { "vrf": "route-map rmap_IMP_{}".format(addr_type), - "delete": True + "delete": True, } } } - } - }) + }, + } + ) result = create_router_bgp(tgen, topo, input_dict_isr) - assert result is True, "Testcase {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Verify that when route-map IMP is removed all the prefixes of" + step( + "Verify that when route-map IMP is removed all the prefixes of" " vrf ISR are imported to vrf default. However when route-map " "IMP is re-added only 11.11.11.1 and 11:11::1 (with community " - "value) are imported.") + "value) are imported." + ) input_routes_r1 = { "r1": { - "static_routes": [{ - "network": [ - NETWORK1_1[addr_type] - ], - "vrf": "default" - }] + "static_routes": [ + {"network": [NETWORK1_1[addr_type]], "vrf": "default"} + ] } } result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1) - assert result is True, \ - "Testcase {} : Failed \n Error {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - input_dict_isr={} + input_dict_isr = {} DUT = ["r1"] VRFS = ["default"] AS_NUM = [100] @@ -1122,15 +1193,10 @@ def test_dynamic_imported_matching_prefix_based_on_community_list_p0(request): "local_as": as_num, "vrf": vrf, "address_family": { - addr_type: { - "unicast": { - "import": { - "vrf": "ISR" - } - } - } - } - }) + addr_type: {"unicast": {"import": {"vrf": "ISR"}}} + }, + } + ) temp[dut]["bgp"].append( { @@ -1144,30 +1210,29 @@ def test_dynamic_imported_matching_prefix_based_on_community_list_p0(request): } } } - } - }) + }, + } + ) result = create_router_bgp(tgen, topo, input_dict_isr) - assert result is True, "Testcase {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: input_routes_r1 = { "r1": { - "static_routes": [{ - "network": [ - NETWORK1_1[addr_type] - ], - "vrf": "default" - }] + "static_routes": [ + {"network": [NETWORK1_1[addr_type]], "vrf": "default"} + ] } } result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1) - assert result is True, \ - "Testcase {} : Failed \n Error {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: @@ -1177,165 +1242,178 @@ def test_dynamic_imported_matching_prefix_based_on_community_list_p0(request): "r1": { "prefix_lists": { addr_type: { - "pflist_ABC_{}".format(addr_type): [{ - "seqid": 10, - "network": NETWORK1_1[addr_type], - "action": "permit", - "delete": True - }] + "pflist_ABC_{}".format(addr_type): [ + { + "seqid": 10, + "network": NETWORK1_1[addr_type], + "action": "permit", + "delete": True, + } + ] } } } } result = create_prefix_lists(tgen, input_dict_pf) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_routes_r1 = { "r1": { - "static_routes": [{ - "network": [ - NETWORK1_1[addr_type] - ], - "vrf": "default" - }] + "static_routes": [ + {"network": [NETWORK1_1[addr_type]], "vrf": "default"} + ] } } - result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, - expected=False) - assert result is not True, ( - "Testcase {} : Failed \n Error : Routes are still present \n {}".\ - format(tc_name, result)) + result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, expected=False) + assert ( + result is not True + ), "Testcase {} : Failed \n Error : Routes are still present \n {}".format( + tc_name, result + ) - input_dict_pf["r1"]["prefix_lists"][addr_type]["pflist_ABC_{}".\ - format(addr_type)][0]["delete"]=False + input_dict_pf["r1"]["prefix_lists"][addr_type][ + "pflist_ABC_{}".format(addr_type) + ][0]["delete"] = False result = create_prefix_lists(tgen, input_dict_pf) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1) - assert result is True, \ - "Testcase {} : Failed \n Error {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) step("Delete/Re-add community-list COMM.") input_dict_cl = { "r1": { "bgp_community_lists": [ - { - "community_type": "expanded", - "action": "permit", - "name": "COMM", - "value": "100:100", - "delete": True + { + "community_type": "expanded", + "action": "permit", + "name": "COMM", + "value": "100:100", + "delete": True, } ] } } result = create_bgp_community_lists(tgen, input_dict_cl) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) - result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, - expected=False) - assert result is not True, ( - "Testcase {} : Failed \n Error : Routes are still present \n {}".\ - format(tc_name, result)) + result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, expected=False) + assert ( + result is not True + ), "Testcase {} : Failed \n Error : Routes are still present \n {}".format( + tc_name, result + ) - input_dict_cl["r1"]["bgp_community_lists"][0]["delete"]=False + input_dict_cl["r1"]["bgp_community_lists"][0]["delete"] = False result = create_bgp_community_lists(tgen, input_dict_cl) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1) - assert result is True, \ - "Testcase {} : Failed \n Error {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) step("Delete/Re-add route-map XYZ.") input_dict_rm = { "r1": { "route_maps": { - "rmap_XYZ_{}".format(addr_type): [{ - "action": "permit", - "match": { - addr_type: { - "prefix_lists": - "pflist_ABC_{}".format(addr_type) - } - }, - "set": { - "community": {"num": "100:100"} - }, - "delete": True - }] + "rmap_XYZ_{}".format(addr_type): [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pflist_ABC_{}".format(addr_type) + } + }, + "set": {"community": {"num": "100:100"}}, + "delete": True, + } + ] } } } result = create_route_maps(tgen, input_dict_rm) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) - result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, - expected=False) - assert result is not True, ( - "Testcase {} : Failed \n Error : Routes are still present \n {}".\ - format(tc_name, result)) + result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, expected=False) + assert ( + result is not True + ), "Testcase {} : Failed \n Error : Routes are still present \n {}".format( + tc_name, result + ) - input_dict_rm["r1"]["route_maps"]["rmap_XYZ_{}".format(addr_type)][0]["delete"]=False + input_dict_rm["r1"]["route_maps"]["rmap_XYZ_{}".format(addr_type)][0][ + "delete" + ] = False result = create_route_maps(tgen, input_dict_rm) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1) - assert result is True, \ - "Testcase {} : Failed \n Error {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) step("Delete/Re-add route-map IMP.") input_dict_rm2 = { "r1": { "route_maps": { - "rmap_IMP_{}".format(addr_type): [{ - "action": "permit", - "match": { - "community_list": {"id": "COMM"} - }, - "set": { - "community": {"num": "none"} - }, - "delete": True - }] + "rmap_IMP_{}".format(addr_type): [ + { + "action": "permit", + "match": {"community_list": {"id": "COMM"}}, + "set": {"community": {"num": "none"}}, + "delete": True, + } + ] } } } result = create_route_maps(tgen, input_dict_rm2) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) - result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, - expected=False) - assert result is not True, ( - "Testcase {} : Failed \n Error : Routes are still present \n {}".\ - format(tc_name, result)) + result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, expected=False) + assert ( + result is not True + ), "Testcase {} : Failed \n Error : Routes are still present \n {}".format( + tc_name, result + ) - input_dict_rm2["r1"]["route_maps"]["rmap_IMP_{}".format(addr_type)][0]["delete"]=False + input_dict_rm2["r1"]["route_maps"]["rmap_IMP_{}".format(addr_type)][0][ + "delete" + ] = False result = create_route_maps(tgen, input_dict_rm2) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1) - assert result is True, \ - "Testcase {} : Failed \n Error {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) write_test_footer(tc_name) @@ -1356,71 +1434,77 @@ def test_routemap_operatons_with_dynamic_import_p0(request): for addr_type in ADDR_TYPES: - step("Configure route-map to set community attribute for a specific" - "prefix on R1 in vrf ISR") + step( + "Configure route-map to set community attribute for a specific" + "prefix on R1 in vrf ISR" + ) input_dict_pf = { "r1": { "prefix_lists": { addr_type: { - "pflist_ABC_{}".format(addr_type): [{ - "seqid": 10, - "network": NETWORK1_1[addr_type], - "action": "permit" - }] + "pflist_ABC_{}".format(addr_type): [ + { + "seqid": 10, + "network": NETWORK1_1[addr_type], + "action": "permit", + } + ] } } } } result = create_prefix_lists(tgen, input_dict_pf) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_dict_cl = { "r1": { "bgp_community_lists": [ - { - "community_type": "expanded", - "action": "permit", - "name": "COMM", - "value": "100:100" + { + "community_type": "expanded", + "action": "permit", + "name": "COMM", + "value": "100:100", } ] } } result = create_bgp_community_lists(tgen, input_dict_cl) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: input_dict_rm = { "r1": { "route_maps": { - "rmap_XYZ_{}".format(addr_type): [{ - "action": "permit", - "match": { - addr_type: { - "prefix_lists": - "pflist_ABC_{}".format(addr_type) - } - }, - "set": { - "community": {"num": "100:100"} + "rmap_XYZ_{}".format(addr_type): [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pflist_ABC_{}".format(addr_type) + } + }, + "set": {"community": {"num": "100:100"}}, } - }] + ] } } } result = create_route_maps(tgen, input_dict_rm) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Apply this route-map on R1 to vrf ISR while redistributing the" - " prefixes into BGP") + step( + "Apply this route-map on R1 to vrf ISR while redistributing the" + " prefixes into BGP" + ) - input_dict_1={} + input_dict_1 = {} DUT = ["r1"] VRFS = ["ISR"] AS_NUM = [100] @@ -1436,53 +1520,58 @@ def test_routemap_operatons_with_dynamic_import_p0(request): "address_family": { addr_type: { "unicast": { - "redistribute": [{ - "redist_type": "static", + "redistribute": [ + { + "redist_type": "static", "attribute": { - "route-map" : "rmap_XYZ_{}".\ - format(addr_type) - } + "route-map": "rmap_XYZ_{}".format(addr_type) + }, } ] } } - } - }) + }, + } + ) result = create_router_bgp(tgen, topo, input_dict_1) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Configure another route-map for filtering the prefixes based on" - " community attribute while importing into default vrf") + step( + "Configure another route-map for filtering the prefixes based on" + " community attribute while importing into default vrf" + ) input_dict_rm = { "r1": { "route_maps": { - "rmap_IMP_{}".format(addr_type): [{ - "action": "permit", - "match": { - "community_list": {"id": "COMM"} - }, - "set": { - "community": {"num": "500:500"} + "rmap_IMP_{}".format(addr_type): [ + { + "action": "permit", + "match": {"community_list": {"id": "COMM"}}, + "set": {"community": {"num": "500:500"}}, } - }] + ] } } } result = create_route_maps(tgen, input_dict_rm) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Apply the route-map while Importing vrf ISR's prefixes into " - "default vrf on router R1:") + step( + "Apply the route-map while Importing vrf ISR's prefixes into " + "default vrf on router R1:" + ) - input_dict_isr={} + input_dict_isr = {} DUT = ["r1"] VRFS = ["default"] AS_NUM = [100] @@ -1496,15 +1585,10 @@ def test_routemap_operatons_with_dynamic_import_p0(request): "local_as": as_num, "vrf": vrf, "address_family": { - addr_type: { - "unicast": { - "import": { - "vrf": "ISR" - } - } - } - } - }) + addr_type: {"unicast": {"import": {"vrf": "ISR"}}} + }, + } + ) temp[dut]["bgp"].append( { @@ -1518,42 +1602,45 @@ def test_routemap_operatons_with_dynamic_import_p0(request): } } } - } - }) + }, + } + ) result = create_router_bgp(tgen, topo, input_dict_isr) - assert result is True, "Testcase {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Verify on R1 that only prefixes with community value 100:100" + step( + "Verify on R1 that only prefixes with community value 100:100" "in vrf ISR are imported to vrf default. While importing, the" - " community value has been stripped off:") + " community value has been stripped off:" + ) input_routes_r1 = { "r1": { - "static_routes": [{ - "network": [ - NETWORK1_1[addr_type] - ], - "vrf": "default" - }] + "static_routes": [ + {"network": [NETWORK1_1[addr_type]], "vrf": "default"} + ] } } result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1) - assert result is True, \ - "Testcase {} : Failed \n Error {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: step("Applying route-map first followed by import VRF command.") - step("Apply the route-map while Importing vrf ISR's prefixes into " - "default vrf on router R1:") + step( + "Apply the route-map while Importing vrf ISR's prefixes into " + "default vrf on router R1:" + ) - input_dict_isr={} + input_dict_isr = {} DUT = ["r1"] VRFS = ["default"] AS_NUM = [100] @@ -1568,15 +1655,11 @@ def test_routemap_operatons_with_dynamic_import_p0(request): "vrf": vrf, "address_family": { addr_type: { - "unicast": { - "import": { - "vrf": "ISR", - "delete": True - } - } + "unicast": {"import": {"vrf": "ISR", "delete": True}} } - } - }) + }, + } + ) temp[dut]["bgp"].append( { @@ -1590,39 +1673,41 @@ def test_routemap_operatons_with_dynamic_import_p0(request): } } } - } - }) + }, + } + ) result = create_router_bgp(tgen, topo, input_dict_isr) - assert result is True, "Testcase {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Verify that until 'import VRF command' is not configured, " + step( + "Verify that until 'import VRF command' is not configured, " "routes are not imported. After configuring 'import VRF command'" - " repeat step-4 for verification") + " repeat step-4 for verification" + ) input_routes_r1 = { "r1": { - "static_routes": [{ - "network": [ - NETWORK1_1[addr_type] - ], - "vrf": "default" - }] + "static_routes": [ + {"network": [NETWORK1_1[addr_type]], "vrf": "default"} + ] } } - result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, - expected=False) - assert result is not True, ( - "Testcase {} : Failed \n Error : Routes are still present \n {}".\ - format(tc_name, result)) + result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, expected=False) + assert ( + result is not True + ), "Testcase {} : Failed \n Error : Routes are still present \n {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - input_dict_isr={} + input_dict_isr = {} DUT = ["r1"] VRFS = ["default"] AS_NUM = [100] @@ -1636,15 +1721,10 @@ def test_routemap_operatons_with_dynamic_import_p0(request): "local_as": as_num, "vrf": vrf, "address_family": { - addr_type: { - "unicast": { - "import": { - "vrf": "ISR" - } - } - } - } - }) + addr_type: {"unicast": {"import": {"vrf": "ISR"}}} + }, + } + ) temp[dut]["bgp"].append( { @@ -1658,37 +1738,35 @@ def test_routemap_operatons_with_dynamic_import_p0(request): } } } - } - }) + }, + } + ) result = create_router_bgp(tgen, topo, input_dict_isr) - assert result is True, "Testcase {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: input_routes_r1 = { "r1": { - "static_routes": [{ - "network": [ - NETWORK1_1[addr_type] - ], - "vrf": "default" - }] + "static_routes": [ + {"network": [NETWORK1_1[addr_type]], "vrf": "default"} + ] } } result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1) - assert result is True, \ - "Testcase {} : Failed \n Error {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Delete/re-add import vrf ISR command multiple times in default" - "vrf.") + step("Delete/re-add import vrf ISR command multiple times in default" "vrf.") - input_dict_isr={} + input_dict_isr = {} DUT = ["r1"] VRFS = ["default"] AS_NUM = [100] @@ -1703,112 +1781,111 @@ def test_routemap_operatons_with_dynamic_import_p0(request): "vrf": vrf, "address_family": { addr_type: { - "unicast": { - "import": { - "vrf": "ISR", - "delete": True - } - } + "unicast": {"import": {"vrf": "ISR", "delete": True}} } - } - }) + }, + } + ) result = create_router_bgp(tgen, topo, input_dict_isr) - assert result is True, "Testcase {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) - step("Verify that when import vrf ISR command is deleted, " - "all routes of vrf ISR disappear from default vrf and " - "when it's re-configured, repeat step-4 for verification.") + step( + "Verify that when import vrf ISR command is deleted, " + "all routes of vrf ISR disappear from default vrf and " + "when it's re-configured, repeat step-4 for verification." + ) input_routes_r1 = { "r1": { - "static_routes": [{ - "network": [ - NETWORK1_1[addr_type] - ], - "vrf": "default" - }] + "static_routes": [ + {"network": [NETWORK1_1[addr_type]], "vrf": "default"} + ] } } - result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, - expected=False) - assert result is not True, ( - "Testcase {} : Failed \n Routes are still present, Error {}". \ - format(tc_name, result)) + result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, expected=False) + assert ( + result is not True + ), "Testcase {} : Failed \n Routes are still present, Error {}".format( + tc_name, result + ) input_dict_isr["r1"]["bgp"][0]["address_family"][addr_type]["unicast"][ - "import"]["delete"]=False + "import" + ]["delete"] = False result = create_router_bgp(tgen, topo, input_dict_isr) - assert result is True, "Testcase {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1) - assert result is True, ( - "Testcase {} : Failed \n Error {}". \ - format(tc_name, result)) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Delete and re-configure route-map IMP from global config when " - "import and route-maps are applied in a ISR vrf.") + step( + "Delete and re-configure route-map IMP from global config when " + "import and route-maps are applied in a ISR vrf." + ) input_dict_rm = { "r1": { "route_maps": { - "rmap_IMP_{}".format(addr_type): [{ - "action": "permit", - "match": { - "community_list": {"id": "COMM"} - }, - "set": { - "community": {"num": "500:500"} - }, - "delete": True - }] + "rmap_IMP_{}".format(addr_type): [ + { + "action": "permit", + "match": {"community_list": {"id": "COMM"}}, + "set": {"community": {"num": "500:500"}}, + "delete": True, + } + ] } } } result = create_route_maps(tgen, input_dict_rm) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) input_routes_r1 = { "r1": { - "static_routes": [{ - "network": [ - NETWORK1_1[addr_type] - ], - "vrf": "default" - }] + "static_routes": [ + {"network": [NETWORK1_1[addr_type]], "vrf": "default"} + ] } } - result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, - expected=False) - assert result is not True, ( - "Testcase {} : Failed \n Routes are still present, Error {}". \ - format(tc_name, result)) + result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, expected=False) + assert ( + result is not True + ), "Testcase {} : Failed \n Routes are still present, Error {}".format( + tc_name, result + ) - input_dict_rm["r1"]["route_maps"]["rmap_IMP_{}".\ - format(addr_type)][0]["delete"]=False + input_dict_rm["r1"]["route_maps"]["rmap_IMP_{}".format(addr_type)][0][ + "delete" + ] = False result = create_route_maps(tgen, input_dict_rm) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) - input_dict_comm = { - "community": "500:500" - } + input_dict_comm = {"community": "500:500"} - result = verify_bgp_community(tgen, addr_type, dut, [NETWORK1_1[addr_type]], - input_dict_comm) - assert result is True, ( - "Testcase {} : Failed \n Error: {}".format( - tc_name, result)) + result = verify_bgp_community( + tgen, addr_type, dut, [NETWORK1_1[addr_type]], input_dict_comm + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) write_test_footer(tc_name) @@ -1828,21 +1905,21 @@ def test_verify_cli_json_p1(request): check_router_status(tgen) input_dict = { - "r1":{ - "cli": ["show bgp vrf default ipv4 summary", - "show bgp vrf all ipv6 summary", - "show bgp neighbors" + "r1": { + "cli": [ + "show bgp vrf default ipv4 summary", + "show bgp vrf all ipv6 summary", + "show bgp neighbors", ] } } result = verify_cli_json(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py index 6c106060b8..9106c163cd 100644 --- a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py +++ b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py @@ -38,8 +38,8 @@ import platform # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) -sys.path.append(os.path.join(CWD, '../lib/')) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) # Required to instantiate the topology builder class. @@ -50,22 +50,31 @@ from lib.topotest import version_cmp from mininet.topo import Topo from lib.common_config import ( - start_topology, write_test_header, check_address_types, + start_topology, + write_test_header, + check_address_types, write_test_footer, - verify_rib, step, create_route_maps, - create_static_routes, stop_router, start_router, + verify_rib, + step, + create_route_maps, + create_static_routes, + stop_router, + start_router, create_prefix_lists, create_bgp_community_lists, check_router_status, get_frr_ipv6_linklocal, - shutdown_bringup_interface + shutdown_bringup_interface, ) from lib.topolog import logger from lib.bgp import ( - verify_bgp_convergence, create_router_bgp, - verify_bgp_community, verify_bgp_attributes, - verify_best_path_as_per_bgp_attribute, verify_bgp_rib + verify_bgp_convergence, + create_router_bgp, + verify_bgp_community, + verify_bgp_attributes, + verify_best_path_as_per_bgp_attribute, + verify_bgp_rib, ) from lib.topojson import build_topo_from_json, build_config_from_json @@ -123,10 +132,11 @@ def setup_module(mod): start_topology(tgen) # Run these tests for kernel version 4.19 or above - if version_cmp(platform.release(), '4.19') < 0: - error_msg = ('BGP vrf dynamic route leak tests will not run ' - '(have kernel "{}", but it requires >= 4.19)'.\ - format(platform.release())) + if version_cmp(platform.release(), "4.19") < 0: + error_msg = ( + "BGP vrf dynamic route leak tests will not run " + '(have kernel "{}", but it requires >= 4.19)'.format(platform.release()) + ) pytest.skip(error_msg) # Creating configuration from JSON @@ -137,8 +147,9 @@ def setup_module(mod): ADDR_TYPES = check_address_types() BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) - assert BGP_CONVERGENCE is True, "setup_module : Failed \n Error: {}". \ - format(BGP_CONVERGENCE) + assert BGP_CONVERGENCE is True, "setup_module : Failed \n Error: {}".format( + BGP_CONVERGENCE + ) logger.info("Running setup_module() done") @@ -153,8 +164,9 @@ def teardown_module(): # Stop toplogy and Remove tmp files tgen.stop_topology() - logger.info("Testsuite end time: {}". - format(time.asctime(time.localtime(time.time())))) + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) logger.info("=" * 40) @@ -164,6 +176,7 @@ def teardown_module(): # ##################################################### + def test_bgp_best_path_with_dynamic_import_p0(request): """ TC6_FUNC_6: @@ -181,10 +194,11 @@ def test_bgp_best_path_with_dynamic_import_p0(request): for addr_type in ADDR_TYPES: - step("Redistribute configured static routes into BGP process" - " on R1/R2 and R3") + step( + "Redistribute configured static routes into BGP process" " on R1/R2 and R3" + ) - input_dict_1={} + input_dict_1 = {} DUT = ["r1", "r2", "r3", "r4"] VRFS = ["ISR", "ISR", "default", "default"] AS_NUM = [100, 100, 300, 400] @@ -199,24 +213,22 @@ def test_bgp_best_path_with_dynamic_import_p0(request): "vrf": vrf, "address_family": { addr_type: { - "unicast": { - "redistribute": [{ - "redist_type": "static" - }] - } + "unicast": {"redistribute": [{"redist_type": "static"}]} } - } - }) + }, + } + ) result = create_router_bgp(tgen, topo, input_dict_1) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: step("Import from default vrf into vrf ISR on R1 and R2 as below") - input_dict_vrf={} + input_dict_vrf = {} DUT = ["r1", "r2"] VRFS = ["ISR", "ISR"] AS_NUM = [100, 100] @@ -230,21 +242,17 @@ def test_bgp_best_path_with_dynamic_import_p0(request): "local_as": as_num, "vrf": vrf, "address_family": { - addr_type: { - "unicast": { - "import": { - "vrf": "default" - } - } - } - } - }) + addr_type: {"unicast": {"import": {"vrf": "default"}}} + }, + } + ) result = create_router_bgp(tgen, topo, input_dict_vrf) - assert result is True, "Testcase {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) - input_dict_default={} + input_dict_default = {} DUT = ["r1", "r2"] VRFS = ["default", "default"] AS_NUM = [100, 100] @@ -258,36 +266,28 @@ def test_bgp_best_path_with_dynamic_import_p0(request): "local_as": as_num, "vrf": vrf, "address_family": { - addr_type: { - "unicast": { - "import": { - "vrf": "ISR" - } - } - } - } - }) + addr_type: {"unicast": {"import": {"vrf": "ISR"}}} + }, + } + ) result = create_router_bgp(tgen, topo, input_dict_default) - assert result is True, "Testcase {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) - step("Verify ECMP/Next-hop/Imported routes Vs Locally originated " - "routes/eBGP routes vs iBGP routes --already covered in almost" - " all tests") + step( + "Verify ECMP/Next-hop/Imported routes Vs Locally originated " + "routes/eBGP routes vs iBGP routes --already covered in almost" + " all tests" + ) for addr_type in ADDR_TYPES: step("Verify Pre-emption") input_routes_r3 = { - "r3": { - "static_routes": [{ - "network": [ - NETWORK3_3[addr_type] - ] - }] - } + "r3": {"static_routes": [{"network": [NETWORK3_3[addr_type]]}]} } intf_r3_r1 = topo["routers"]["r3"]["links"]["r1-link1"]["interface"] @@ -297,30 +297,27 @@ def test_bgp_best_path_with_dynamic_import_p0(request): nh_r3_r1 = get_frr_ipv6_linklocal(tgen, "r3", intf=intf_r3_r1) nh_r4_r1 = get_frr_ipv6_linklocal(tgen, "r4", intf=intf_r4_r1) else: - nh_r3_r1 = topo["routers"]["r3"]["links"]\ - ["r1-link1"][addr_type].split("/")[0] - nh_r4_r1 = topo["routers"]["r4"]["links"]\ - ["r1-link1"][addr_type].split("/")[0] + nh_r3_r1 = topo["routers"]["r3"]["links"]["r1-link1"][addr_type].split("/")[ + 0 + ] + nh_r4_r1 = topo["routers"]["r4"]["links"]["r1-link1"][addr_type].split("/")[ + 0 + ] - result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r3, - next_hop=[nh_r4_r1]) - assert result is True, ( - "Testcase {} : Failed \n Error {}". \ - format(tc_name, result)) + result = verify_bgp_rib( + tgen, addr_type, "r1", input_routes_r3, next_hop=[nh_r4_r1] + ) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) step("Shutdown interface connected to r1 from r4:") - shutdown_bringup_interface(tgen, 'r4', intf_r4_r1, False) + shutdown_bringup_interface(tgen, "r4", intf_r4_r1, False) for addr_type in ADDR_TYPES: input_routes_r3 = { - "r3": { - "static_routes": [{ - "network": [ - NETWORK3_3[addr_type] - ] - }] - } + "r3": {"static_routes": [{"network": [NETWORK3_3[addr_type]]}]} } intf_r3_r1 = topo["routers"]["r3"]["links"]["r1-link1"]["interface"] @@ -330,31 +327,28 @@ def test_bgp_best_path_with_dynamic_import_p0(request): nh_r3_r1 = get_frr_ipv6_linklocal(tgen, "r3", intf=intf_r3_r1) nh_r4_r1 = get_frr_ipv6_linklocal(tgen, "r4", intf=intf_r4_r1) else: - nh_r3_r1 = topo["routers"]["r3"]["links"]\ - ["r1-link1"][addr_type].split("/")[0] - nh_r4_r1 = topo["routers"]["r4"]["links"]\ - ["r1-link1"][addr_type].split("/")[0] + nh_r3_r1 = topo["routers"]["r3"]["links"]["r1-link1"][addr_type].split("/")[ + 0 + ] + nh_r4_r1 = topo["routers"]["r4"]["links"]["r1-link1"][addr_type].split("/")[ + 0 + ] step("Verify next-hop is changed") - result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r3, - next_hop=[nh_r3_r1]) - assert result is True, ( - "Testcase {} : Failed \n Error {}". \ - format(tc_name, result)) + result = verify_bgp_rib( + tgen, addr_type, "r1", input_routes_r3, next_hop=[nh_r3_r1] + ) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) step("Bringup interface connected to r1 from r4:") - shutdown_bringup_interface(tgen, 'r4', intf_r4_r1, True) + shutdown_bringup_interface(tgen, "r4", intf_r4_r1, True) for addr_type in ADDR_TYPES: input_routes_r3 = { - "r3": { - "static_routes": [{ - "network": [ - NETWORK3_3[addr_type] - ] - }] - } + "r3": {"static_routes": [{"network": [NETWORK3_3[addr_type]]}]} } intf_r3_r1 = topo["routers"]["r3"]["links"]["r1-link1"]["interface"] @@ -364,17 +358,20 @@ def test_bgp_best_path_with_dynamic_import_p0(request): nh_r3_r1 = get_frr_ipv6_linklocal(tgen, "r3", intf=intf_r3_r1) nh_r4_r1 = get_frr_ipv6_linklocal(tgen, "r4", intf=intf_r4_r1) else: - nh_r3_r1 = topo["routers"]["r3"]["links"]\ - ["r1-link1"][addr_type].split("/")[0] - nh_r4_r1 = topo["routers"]["r4"]["links"]\ - ["r1-link1"][addr_type].split("/")[0] + nh_r3_r1 = topo["routers"]["r3"]["links"]["r1-link1"][addr_type].split("/")[ + 0 + ] + nh_r4_r1 = topo["routers"]["r4"]["links"]["r1-link1"][addr_type].split("/")[ + 0 + ] step("Verify next-hop is not chnaged aftr shutdown:") - result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r3, - next_hop=[nh_r3_r1]) - assert result is True, ( - "Testcase {} : Failed \n Error {}". \ - format(tc_name, result)) + result = verify_bgp_rib( + tgen, addr_type, "r1", input_routes_r3, next_hop=[nh_r3_r1] + ) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) step("Active-Standby scenario(as-path prepend and Local pref)") @@ -386,18 +383,21 @@ def test_bgp_best_path_with_dynamic_import_p0(request): "r1": { "prefix_lists": { addr_type: { - "pf_ls_{}".format(addr_type): [{ - "seqid": 10, - "network": NETWORK3_4[addr_type], - "action": "permit" - }] + "pf_ls_{}".format(addr_type): [ + { + "seqid": 10, + "network": NETWORK3_4[addr_type], + "action": "permit", + } + ] } } } } result = create_prefix_lists(tgen, input_dict_pf) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) for addr_type in ADDR_TYPES: @@ -406,57 +406,56 @@ def test_bgp_best_path_with_dynamic_import_p0(request): input_dict_rm = { "r1": { "route_maps": { - "rmap_PATH1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": 10, - "match": { - addr_type: { - "prefix_lists": - "pf_ls_{}".format(addr_type) - } - }, - "set": { - "locPrf": 500 + "rmap_PATH1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": 10, + "match": { + addr_type: { + "prefix_lists": "pf_ls_{}".format(addr_type) + } + }, + "set": {"locPrf": 500}, } - }] + ] } } } result = create_route_maps(tgen, input_dict_rm) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) step("Create route-map to match prefix-list and set localpref 600") input_dict_rm = { "r1": { "route_maps": { - "rmap_PATH2_{}".format(addr_type): [{ - "action": "permit", - "seq_id": 20, - "match": { - addr_type: { - "prefix_lists": - "pf_ls_{}".format(addr_type) - } - }, - "set": { - "locPrf": 600 + "rmap_PATH2_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": 20, + "match": { + addr_type: { + "prefix_lists": "pf_ls_{}".format(addr_type) + } + }, + "set": {"locPrf": 600}, } - }] + ] } } } result = create_route_maps(tgen, input_dict_rm) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) - input_dict_rma={ + input_dict_rma = { "r1": { - "bgp": - [ + "bgp": [ { "local_as": "100", "address_family": { @@ -466,36 +465,44 @@ def test_bgp_best_path_with_dynamic_import_p0(request): "r3": { "dest_link": { "r1-link1": { - "route_maps": [{ - "name": "rmap_PATH1_{}".\ - format(addr_type), - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_PATH1_{}".format( + addr_type + ), + "direction": "in", + } + ] } } }, "r4": { "dest_link": { "r1-link1": { - "route_maps": [{ - "name": "rmap_PATH2_{}".\ - format(addr_type), - "direction": "in" - }] + "route_maps": [ + { + "name": "rmap_PATH2_{}".format( + addr_type + ), + "direction": "in", + } + ] } } - } + }, } } } - } + }, } - ]} + ] } + } result = create_router_bgp(tgen, topo, input_dict_rma) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) dut = "r1" attribute = "locPrf" @@ -506,20 +513,18 @@ def test_bgp_best_path_with_dynamic_import_p0(request): input_routes_r3 = { "r3": { - "static_routes": [{ - "network": [ - NETWORK3_3[addr_type], \ - NETWORK3_4[addr_type] - ] - }] + "static_routes": [ + {"network": [NETWORK3_3[addr_type], NETWORK3_4[addr_type]]} + ] } } - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - input_routes_r3, - attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, input_routes_r3, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) for addr_type in ADDR_TYPES: @@ -528,26 +533,26 @@ def test_bgp_best_path_with_dynamic_import_p0(request): input_dict_rm = { "r1": { "route_maps": { - "rmap_PATH1_{}".format(addr_type): [{ - "action": "permit", - "seq_id": 10, - "match": { - addr_type: { - "prefix_lists": - "pf_ls_{}".format(addr_type) - } - }, - "set": { - "locPrf": 700 + "rmap_PATH1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": 10, + "match": { + addr_type: { + "prefix_lists": "pf_ls_{}".format(addr_type) + } + }, + "set": {"locPrf": 700}, } - }] + ] } } } result = create_route_maps(tgen, input_dict_rm) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: @@ -555,20 +560,18 @@ def test_bgp_best_path_with_dynamic_import_p0(request): input_routes_r3 = { "r3": { - "static_routes": [{ - "network": [ - NETWORK3_3[addr_type], \ - NETWORK3_4[addr_type] - ] - }] + "static_routes": [ + {"network": [NETWORK3_3[addr_type], NETWORK3_4[addr_type]]} + ] } } - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - input_routes_r3, - attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, input_routes_r3, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) for addr_type in ADDR_TYPES: @@ -577,30 +580,29 @@ def test_bgp_best_path_with_dynamic_import_p0(request): input_dict_rm = { "r1": { "route_maps": { - "rmap_PATH2_{}".format(addr_type): [{ - "action": "permit", - "seq_id": 20, - "match": { - addr_type: { - "prefix_lists": - "pf_ls_{}".format(addr_type) - } - }, - "set": { - "localpref": 700, - "path": { - "as_num": "111", - "as_action": "prepend" - } + "rmap_PATH2_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": 20, + "match": { + addr_type: { + "prefix_lists": "pf_ls_{}".format(addr_type) + } + }, + "set": { + "localpref": 700, + "path": {"as_num": "111", "as_action": "prepend"}, + }, } - }] + ] } } } result = create_route_maps(tgen, input_dict_rm) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) attribute = "path" @@ -610,20 +612,18 @@ def test_bgp_best_path_with_dynamic_import_p0(request): input_routes_r3 = { "r3": { - "static_routes": [{ - "network": [ - NETWORK3_3[addr_type], \ - NETWORK3_4[addr_type] - ] - }] + "static_routes": [ + {"network": [NETWORK3_3[addr_type], NETWORK3_4[addr_type]]} + ] } } - result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut, - input_routes_r3, - attribute) + result = verify_best_path_as_per_bgp_attribute( + tgen, addr_type, dut, input_routes_r3, attribute + ) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -645,71 +645,77 @@ def test_modify_route_map_match_set_clauses_p1(request): for addr_type in ADDR_TYPES: - step("Configure route-map to set community attribute for a specific" - "prefix on R1 in vrf ISR") + step( + "Configure route-map to set community attribute for a specific" + "prefix on R1 in vrf ISR" + ) input_dict_pf = { "r1": { "prefix_lists": { addr_type: { - "pflist_ABC_{}".format(addr_type): [{ - "seqid": 10, - "network": NETWORK1_1[addr_type], - "action": "permit" - }] + "pflist_ABC_{}".format(addr_type): [ + { + "seqid": 10, + "network": NETWORK1_1[addr_type], + "action": "permit", + } + ] } } } } result = create_prefix_lists(tgen, input_dict_pf) assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) input_dict_cl = { "r1": { "bgp_community_lists": [ - { - "community_type": "expanded", - "action": "permit", - "name": "COMM", - "value": "100:100" + { + "community_type": "expanded", + "action": "permit", + "name": "COMM", + "value": "100:100", } ] } } result = create_bgp_community_lists(tgen, input_dict_cl) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: input_dict_rm = { "r1": { "route_maps": { - "rmap_XYZ_{}".format(addr_type): [{ - "action": "permit", - "match": { - addr_type: { - "prefix_lists": - "pflist_ABC_{}".format(addr_type) - } - }, - "set": { - "community": {"num": "100:100"} + "rmap_XYZ_{}".format(addr_type): [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pflist_ABC_{}".format(addr_type) + } + }, + "set": {"community": {"num": "100:100"}}, } - }] + ] } } } result = create_route_maps(tgen, input_dict_rm) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Apply this route-map on R1 to vrf ISR while redistributing the" - " prefixes into BGP") + step( + "Apply this route-map on R1 to vrf ISR while redistributing the" + " prefixes into BGP" + ) - input_dict_1={} + input_dict_1 = {} DUT = ["r1"] VRFS = ["ISR"] AS_NUM = [100] @@ -725,54 +731,59 @@ def test_modify_route_map_match_set_clauses_p1(request): "address_family": { addr_type: { "unicast": { - "redistribute": [{ - "redist_type": "static", + "redistribute": [ + { + "redist_type": "static", "attribute": { - "route-map" : "rmap_XYZ_{}".\ - format(addr_type) - } + "route-map": "rmap_XYZ_{}".format(addr_type) + }, } ] } } - } - }) + }, + } + ) result = create_router_bgp(tgen, topo, input_dict_1) - assert result is True, "Testcase {} :Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Configure another route-map for filtering the prefixes based on" - " community attribute while importing into default vrf") + step( + "Configure another route-map for filtering the prefixes based on" + " community attribute while importing into default vrf" + ) input_dict_rm = { "r1": { "route_maps": { - "rmap_IMP_{}".format(addr_type): [{ - "action": "permit", - "seq_id": 10, - "match": { - "community_list": {"id": "COMM"} - }, - "set": { - "community": {"num": "none"} + "rmap_IMP_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": 10, + "match": {"community_list": {"id": "COMM"}}, + "set": {"community": {"num": "none"}}, } - }] + ] } } } result = create_route_maps(tgen, input_dict_rm) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Apply the route-map while Importing vrf ISR's prefixes into " - "default vrf on router R1:") + step( + "Apply the route-map while Importing vrf ISR's prefixes into " + "default vrf on router R1:" + ) - input_dict_isr={} + input_dict_isr = {} DUT = ["r1"] VRFS = ["default"] AS_NUM = [100] @@ -786,15 +797,10 @@ def test_modify_route_map_match_set_clauses_p1(request): "local_as": as_num, "vrf": vrf, "address_family": { - addr_type: { - "unicast": { - "import": { - "vrf": "ISR" - } - } - } - } - }) + addr_type: {"unicast": {"import": {"vrf": "ISR"}}} + }, + } + ) temp[dut]["bgp"].append( { @@ -808,155 +814,143 @@ def test_modify_route_map_match_set_clauses_p1(request): } } } - } - }) + }, + } + ) result = create_router_bgp(tgen, topo, input_dict_isr) - assert result is True, "Testcase {} : Failed \n Error: {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: - step("Verify on R1 that only prefixes with community value 100:100" + step( + "Verify on R1 that only prefixes with community value 100:100" "in vrf ISR are imported to vrf default. While importing, the" - " community value has been stripped off:") + " community value has been stripped off:" + ) input_routes_r1 = { "r1": { - "static_routes": [{ - "network": [ - NETWORK1_1[addr_type] - ], - "vrf": "default" - }] + "static_routes": [ + {"network": [NETWORK1_1[addr_type]], "vrf": "default"} + ] } } result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1) - assert result is True, \ - "Testcase {} : Failed \n Error {}". \ - format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) for addr_type in ADDR_TYPES: step("Add set clause in route-map IMP:") input_dict_rm = { - "r1": { - "route_maps": { - "rmap_IMP_{}".format(addr_type): [{ - "action": "permit", - "seq_id": 10, - "match": { - "community_list": {"id": "COMM"} - }, - "set": { - "large_community": {"num": "100:100:100"}, - "locPrf": 500, - "path": { - "as_num": "100 100", - "as_action": "prepend" - } - } - }] - } - } - } - result = create_route_maps(tgen, input_dict_rm) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) - - for addr_type in ADDR_TYPES: - - step("Verify that as we continue adding different attributes " - "step-by-step in route-map IMP those attributes gets " - "attached to prefixes:") - - input_routes_r1 = { - "r1": { - "static_routes": [{ - "network": [ - NETWORK1_1[addr_type] - ], - "vrf": "default" - }] - } - } - - input_dict_comm = { - "largeCommunity": "100:100:100" - } - - result = verify_bgp_community(tgen, addr_type, dut, [NETWORK1_1[addr_type]], - input_dict_comm) - assert result is True, ( - "Testcase {} : Failed \n Error {}".format( - tc_name, result)) - - input_rmap = { "r1": { "route_maps": { "rmap_IMP_{}".format(addr_type): [ { + "action": "permit", + "seq_id": 10, + "match": {"community_list": {"id": "COMM"}}, "set": { - "locPrf": 500 - } + "large_community": {"num": "100:100:100"}, + "locPrf": 500, + "path": {"as_num": "100 100", "as_action": "prepend"}, + }, } ] } } } + result = create_route_maps(tgen, input_dict_rm) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) - result = verify_bgp_attributes(tgen, addr_type, "r1",\ - [NETWORK1_1[addr_type]], - rmap_name="rmap_IMP_{}".format(addr_type),\ - input_dict=input_rmap) - assert result is True, "Testcase : Failed \n Error: {}".format( - tc_name, result) + for addr_type in ADDR_TYPES: - step("Change community-list to match a different value then " - "100:100.") + step( + "Verify that as we continue adding different attributes " + "step-by-step in route-map IMP those attributes gets " + "attached to prefixes:" + ) + + input_routes_r1 = { + "r1": { + "static_routes": [ + {"network": [NETWORK1_1[addr_type]], "vrf": "default"} + ] + } + } + + input_dict_comm = {"largeCommunity": "100:100:100"} + + result = verify_bgp_community( + tgen, addr_type, dut, [NETWORK1_1[addr_type]], input_dict_comm + ) + assert result is True, "Testcase {} : Failed \n Error {}".format( + tc_name, result + ) + + input_rmap = { + "r1": { + "route_maps": { + "rmap_IMP_{}".format(addr_type): [{"set": {"locPrf": 500}}] + } + } + } + + result = verify_bgp_attributes( + tgen, + addr_type, + "r1", + [NETWORK1_1[addr_type]], + rmap_name="rmap_IMP_{}".format(addr_type), + input_dict=input_rmap, + ) + assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result) + + step("Change community-list to match a different value then " "100:100.") input_dict_cl = { "r1": { "bgp_community_lists": [ - { - "community_type": "expanded", - "action": "permit", - "name": "COMM", - "value": "100:100", - "delete": True + { + "community_type": "expanded", + "action": "permit", + "name": "COMM", + "value": "100:100", + "delete": True, } ] } } result = create_bgp_community_lists(tgen, input_dict_cl) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: input_routes_r1 = { "r1": { - "static_routes": [{ - "network": [ - NETWORK1_1[addr_type] - ], - "vrf": "default" - }] + "static_routes": [ + {"network": [NETWORK1_1[addr_type]], "vrf": "default"} + ] } } - result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, - expected=False) + result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, expected=False) assert result is not True, ( "Testcase {} : Failed \n Error : Routes are still " - "present {}".\ - format(tc_name, result)) + "present {}".format(tc_name, result) + ) write_test_footer(tc_name) -if __name__ == '__main__': +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py index e913105e43..46e21857c8 100644 --- a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py +++ b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py @@ -71,7 +71,7 @@ from lib.common_config import ( configure_brctl, apply_raw_config, verify_vrf_vni, - verify_cli_json + verify_cli_json, ) from lib.topolog import logger @@ -81,7 +81,7 @@ from lib.bgp import ( clear_bgp, verify_best_path_as_per_bgp_attribute, verify_attributes_for_evpn_routes, - verify_evpn_routes + verify_evpn_routes, ) from lib.topojson import build_topo_from_json, build_config_from_json @@ -177,9 +177,11 @@ def setup_module(mod): # Creating configuration from JSON build_config_from_json(tgen, topo) - if version_cmp(platform.release(), '4.19') < 0: - error_msg = ('EVPN tests will not run (have kernel "{}", ' - 'but it requires >= 4.19)'.format(platform.release())) + if version_cmp(platform.release(), "4.19") < 0: + error_msg = ( + 'EVPN tests will not run (have kernel "{}", ' + "but it requires >= 4.19)".format(platform.release()) + ) pytest.skip(error_msg) global BGP_CONVERGENCE @@ -389,9 +391,9 @@ def test_verify_overlay_index_p1(request): "network": NETWORK3_1[addr_type], "next_hop": NEXT_HOP_IP[addr_type], "vrf": "GREEN", - } + }, ] - } + }, } result = create_static_routes(tgen, input_dict_1) @@ -463,7 +465,7 @@ def test_evpn_cli_json_available_p1(request): "cli": [ "show evpn vni detail", "show bgp l2vpn evpn all overlay", - "show bgp l2vpn evpn vni" + "show bgp l2vpn evpn vni", ] } } @@ -516,9 +518,9 @@ def test_RT_verification_auto_p0(request): "network": NETWORK4_1[addr_type], "next_hop": NEXT_HOP_IP[addr_type], "vrf": "GREEN", - } + }, ] - } + }, } result = create_static_routes(tgen, input_dict_1) diff --git a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py index c1eb7d68bb..87f391ae49 100644 --- a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py +++ b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py @@ -77,7 +77,7 @@ from lib.common_config import ( configure_vxlan, configure_brctl, verify_vrf_vni, - create_interface_in_kernel + create_interface_in_kernel, ) from lib.topolog import logger @@ -87,7 +87,7 @@ from lib.bgp import ( clear_bgp, verify_best_path_as_per_bgp_attribute, verify_attributes_for_evpn_routes, - verify_evpn_routes + verify_evpn_routes, ) from lib.topojson import build_topo_from_json, build_config_from_json @@ -179,9 +179,11 @@ def setup_module(mod): # Creating configuration from JSON build_config_from_json(tgen, topo) - if version_cmp(platform.release(), '4.19') < 0: - error_msg = ('EVPN tests will not run (have kernel "{}", ' - 'but it requires >= 4.19)'.format(platform.release())) + if version_cmp(platform.release(), "4.19") < 0: + error_msg = ( + 'EVPN tests will not run (have kernel "{}", ' + "but it requires >= 4.19)".format(platform.release()) + ) pytest.skip(error_msg) global BGP_CONVERGENCE @@ -387,9 +389,9 @@ def test_RD_verification_manual_and_auto_p0(request): "network": NETWORK3_1[addr_type], "next_hop": NEXT_HOP_IP[addr_type], "vrf": "GREEN", - } + }, ] - } + }, } result = create_static_routes(tgen, input_dict_1) @@ -453,7 +455,7 @@ def test_RD_verification_manual_and_auto_p0(request): "vrf": "RED", "address_family": { "l2vpn": {"evpn": {"rd": "100.100.100.100:100"}} - } + }, } ] } @@ -620,9 +622,9 @@ def test_RT_verification_manual_p0(request): "network": NETWORK3_1[addr_type], "next_hop": NEXT_HOP_IP[addr_type], "vrf": "GREEN", - } + }, ] - } + }, } result = create_static_routes(tgen, input_dict_1) @@ -652,7 +654,7 @@ def test_RT_verification_manual_p0(request): "l2vpn": { "evpn": {"route-target": {"export": [{"value": "100:100"}]}} }, - } + }, } ] } @@ -995,9 +997,9 @@ def test_active_standby_evpn_implementation_p1(request): "network": NETWORK1_4[addr_type], "next_hop": NEXT_HOP_IP[addr_type], "vrf": "GREEN", - } + }, ] - } + }, } result = create_static_routes(tgen, input_dict_1) @@ -1249,9 +1251,9 @@ def test_evpn_routes_from_VNFs_p1(request): "network": NETWORK3_1[addr_type], "next_hop": NEXT_HOP_IP[addr_type], "vrf": "GREEN", - } + }, ] - } + }, } result = create_static_routes(tgen, input_dict_1) @@ -1382,9 +1384,9 @@ def test_evpn_routes_from_VNFs_p1(request): "network": NETWORK3_1[addr_type], "next_hop": NEXT_HOP_IP[addr_type], "vrf": "GREEN", - } + }, ] - } + }, } result = create_static_routes(tgen, input_dict_1) @@ -1617,9 +1619,9 @@ def test_route_map_operations_for_evpn_address_family_p1(request, attribute): "network": NETWORK3_1[addr_type], "next_hop": NEXT_HOP_IP[addr_type], "vrf": "GREEN", - } + }, ] - } + }, } result = create_static_routes(tgen, input_dict_1) @@ -1811,9 +1813,9 @@ def test_bgp_attributes_for_evpn_address_family_p1(request, attribute): "network": NETWORK3_1[addr_type], "next_hop": NEXT_HOP_IP[addr_type], "vrf": "GREEN", - } + }, ] - } + }, } result = create_static_routes(tgen, input_dict_1) diff --git a/tests/topotests/isis-sr-topo1/test_isis_sr_topo1.py b/tests/topotests/isis-sr-topo1/test_isis_sr_topo1.py index d4ebe52bf6..34eb6d90f6 100644 --- a/tests/topotests/isis-sr-topo1/test_isis_sr_topo1.py +++ b/tests/topotests/isis-sr-topo1/test_isis_sr_topo1.py @@ -73,7 +73,7 @@ from functools import partial # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -84,8 +84,10 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class TemplateTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) @@ -93,44 +95,45 @@ class TemplateTopo(Topo): # # Define FRR Routers # - for router in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: + for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: tgen.add_router(router) # # Define connections # - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['rt1'], nodeif="eth-sw1") - switch.add_link(tgen.gears['rt2'], nodeif="eth-sw1") - switch.add_link(tgen.gears['rt3'], nodeif="eth-sw1") + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1") + switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1") + switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1") - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['rt2'], nodeif="eth-rt4-1") - switch.add_link(tgen.gears['rt4'], nodeif="eth-rt2-1") + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-1") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-1") - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['rt2'], nodeif="eth-rt4-2") - switch.add_link(tgen.gears['rt4'], nodeif="eth-rt2-2") + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-2") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-2") - switch = tgen.add_switch('s4') - switch.add_link(tgen.gears['rt3'], nodeif="eth-rt5-1") - switch.add_link(tgen.gears['rt5'], nodeif="eth-rt3-1") + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-1") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-1") - switch = tgen.add_switch('s5') - switch.add_link(tgen.gears['rt3'], nodeif="eth-rt5-2") - switch.add_link(tgen.gears['rt5'], nodeif="eth-rt3-2") + switch = tgen.add_switch("s5") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-2") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-2") - switch = tgen.add_switch('s6') - switch.add_link(tgen.gears['rt4'], nodeif="eth-rt5") - switch.add_link(tgen.gears['rt5'], nodeif="eth-rt4") + switch = tgen.add_switch("s6") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4") - switch = tgen.add_switch('s7') - switch.add_link(tgen.gears['rt4'], nodeif="eth-rt6") - switch.add_link(tgen.gears['rt6'], nodeif="eth-rt4") + switch = tgen.add_switch("s7") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6") + switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4") + + switch = tgen.add_switch("s8") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6") + switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5") - switch = tgen.add_switch('s8') - switch.add_link(tgen.gears['rt5'], nodeif="eth-rt6") - switch.add_link(tgen.gears['rt6'], nodeif="eth-rt5") def setup_module(mod): "Sets up the pytest environment" @@ -142,16 +145,15 @@ def setup_module(mod): # For all registered routers, load the zebra configuration file for rname, router in router_list.items(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_ISIS, - os.path.join(CWD, '{}/isisd.conf'.format(rname)) + TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): "Teardown the pytest environment" tgen = get_topogen() @@ -159,22 +161,23 @@ def teardown_module(mod): # This function tears down the whole topology. tgen.stop_topology() + def router_compare_json_output(rname, command, reference): "Compare router JSON output" logger.info('Comparing router "%s" "%s" output', rname, command) tgen = get_topogen() - filename = '{}/{}/{}'.format(CWD, rname, reference) + filename = "{}/{}/{}".format(CWD, rname, reference) expected = json.loads(open(filename).read()) # Run test function until we get an result. Wait at most 60 seconds. - test_func = partial(topotest.router_json_cmp, - tgen.gears[rname], command, expected) + test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected) _, diff = topotest.run_and_expect(test_func, None, count=120, wait=0.5) assertmsg = '"{}" JSON output mismatches the expected result'.format(rname) assert diff is None, assertmsg + # # Step 1 # @@ -188,9 +191,13 @@ def test_isis_adjacencies_step1(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd", - "step1/show_yang_interface_isis_adjacencies.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, + "show yang operational-data /frr-interface:lib isisd", + "step1/show_yang_interface_isis_adjacencies.ref", + ) + def test_rib_ipv4_step1(): logger.info("Test (step 1): verify IPv4 RIB") @@ -200,9 +207,11 @@ def test_rib_ipv4_step1(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - "step1/show_ip_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", "step1/show_ip_route.ref" + ) + def test_rib_ipv6_step1(): logger.info("Test (step 1): verify IPv6 RIB") @@ -212,9 +221,11 @@ def test_rib_ipv6_step1(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - "step1/show_ipv6_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", "step1/show_ipv6_route.ref" + ) + def test_mpls_lib_step1(): logger.info("Test (step 1): verify MPLS LIB") @@ -224,9 +235,11 @@ def test_mpls_lib_step1(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - "step1/show_mpls_table.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", "step1/show_mpls_table.ref" + ) + # # Step 2 @@ -252,13 +265,21 @@ def test_isis_adjacencies_step2(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Disabling IS-IS on the eth-rt5 interface on rt4') - tgen.net['rt4'].cmd('vtysh -c "conf t" -c "interface eth-rt5" -c "no ip router isis 1"') - tgen.net['rt4'].cmd('vtysh -c "conf t" -c "interface eth-rt5" -c "no ipv6 router isis 1"') + logger.info("Disabling IS-IS on the eth-rt5 interface on rt4") + tgen.net["rt4"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt5" -c "no ip router isis 1"' + ) + tgen.net["rt4"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt5" -c "no ipv6 router isis 1"' + ) + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, + "show yang operational-data /frr-interface:lib isisd", + "step2/show_yang_interface_isis_adjacencies.ref", + ) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd", - "step2/show_yang_interface_isis_adjacencies.ref") def test_rib_ipv4_step2(): logger.info("Test (step 2): verify IPv4 RIB") @@ -268,9 +289,11 @@ def test_rib_ipv4_step2(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - "step2/show_ip_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", "step2/show_ip_route.ref" + ) + def test_rib_ipv6_step2(): logger.info("Test (step 2): verify IPv6 RIB") @@ -280,9 +303,11 @@ def test_rib_ipv6_step2(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - "step2/show_ipv6_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", "step2/show_ipv6_route.ref" + ) + def test_mpls_lib_step2(): logger.info("Test (step 2): verify MPLS LIB") @@ -292,9 +317,11 @@ def test_mpls_lib_step2(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - "step2/show_mpls_table.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", "step2/show_mpls_table.ref" + ) + # # Step 3 @@ -318,14 +345,18 @@ def test_isis_adjacencies_step3(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Shutting down the eth-rt4 interface on rt6') - tgen.net['rt6'].cmd('vtysh -c "conf t" -c "interface eth-rt4" -c "shutdown"') - logger.info('Shutting down the eth-rt5 interface on rt6') - tgen.net['rt6'].cmd('vtysh -c "conf t" -c "interface eth-rt5" -c "shutdown"') + logger.info("Shutting down the eth-rt4 interface on rt6") + tgen.net["rt6"].cmd('vtysh -c "conf t" -c "interface eth-rt4" -c "shutdown"') + logger.info("Shutting down the eth-rt5 interface on rt6") + tgen.net["rt6"].cmd('vtysh -c "conf t" -c "interface eth-rt5" -c "shutdown"') + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, + "show yang operational-data /frr-interface:lib isisd", + "step3/show_yang_interface_isis_adjacencies.ref", + ) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd", - "step3/show_yang_interface_isis_adjacencies.ref") def test_rib_ipv4_step3(): logger.info("Test (step 3): verify IPv4 RIB") @@ -335,9 +366,11 @@ def test_rib_ipv4_step3(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - "step3/show_ip_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", "step3/show_ip_route.ref" + ) + def test_rib_ipv6_step3(): logger.info("Test (step 3): verify IPv6 RIB") @@ -347,9 +380,11 @@ def test_rib_ipv6_step3(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - "step3/show_ipv6_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", "step3/show_ipv6_route.ref" + ) + def test_mpls_lib_step3(): logger.info("Test (step 3): verify MPLS LIB") @@ -359,9 +394,11 @@ def test_mpls_lib_step3(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - "step3/show_mpls_table.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", "step3/show_mpls_table.ref" + ) + # # Step 4 @@ -386,16 +423,22 @@ def test_isis_adjacencies_step4(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Bringing up the eth-rt4 interface on rt6') - tgen.net['rt6'].cmd('vtysh -c "conf t" -c "interface eth-rt4" -c "no shutdown"') - logger.info('Bringing up the eth-rt5 interface on rt6') - tgen.net['rt6'].cmd('vtysh -c "conf t" -c "interface eth-rt5" -c "no shutdown"') - logger.info('Changing rt6\'s SRGB') - tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing global-block 18000 25999"') + logger.info("Bringing up the eth-rt4 interface on rt6") + tgen.net["rt6"].cmd('vtysh -c "conf t" -c "interface eth-rt4" -c "no shutdown"') + logger.info("Bringing up the eth-rt5 interface on rt6") + tgen.net["rt6"].cmd('vtysh -c "conf t" -c "interface eth-rt5" -c "no shutdown"') + logger.info("Changing rt6's SRGB") + tgen.net["rt6"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing global-block 18000 25999"' + ) + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, + "show yang operational-data /frr-interface:lib isisd", + "step4/show_yang_interface_isis_adjacencies.ref", + ) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd", - "step4/show_yang_interface_isis_adjacencies.ref") def test_rib_ipv4_step4(): logger.info("Test (step 4): verify IPv4 RIB") @@ -405,9 +448,11 @@ def test_rib_ipv4_step4(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - "step4/show_ip_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", "step4/show_ip_route.ref" + ) + def test_rib_ipv6_step4(): logger.info("Test (step 4): verify IPv6 RIB") @@ -417,9 +462,11 @@ def test_rib_ipv6_step4(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - "step4/show_ipv6_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", "step4/show_ipv6_route.ref" + ) + def test_mpls_lib_step4(): logger.info("Test (step 4): verify MPLS LIB") @@ -429,9 +476,11 @@ def test_mpls_lib_step4(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - "step4/show_mpls_table.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", "step4/show_mpls_table.ref" + ) + # # Step 5 @@ -453,12 +502,18 @@ def test_isis_adjacencies_step5(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Disabling SR on rt6') - tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no segment-routing on"') + logger.info("Disabling SR on rt6") + tgen.net["rt6"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "no segment-routing on"' + ) + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, + "show yang operational-data /frr-interface:lib isisd", + "step5/show_yang_interface_isis_adjacencies.ref", + ) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd", - "step5/show_yang_interface_isis_adjacencies.ref") def test_rib_ipv4_step5(): logger.info("Test (step 5): verify IPv4 RIB") @@ -468,9 +523,11 @@ def test_rib_ipv4_step5(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - "step5/show_ip_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", "step5/show_ip_route.ref" + ) + def test_rib_ipv6_step5(): logger.info("Test (step 5): verify IPv6 RIB") @@ -480,9 +537,11 @@ def test_rib_ipv6_step5(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - "step5/show_ipv6_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", "step5/show_ipv6_route.ref" + ) + def test_mpls_lib_step5(): logger.info("Test (step 5): verify MPLS LIB") @@ -492,9 +551,11 @@ def test_mpls_lib_step5(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - "step5/show_mpls_table.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", "step5/show_mpls_table.ref" + ) + # # Step 6 @@ -516,12 +577,16 @@ def test_isis_adjacencies_step6(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Enabling SR on rt6') - tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing on"') + logger.info("Enabling SR on rt6") + tgen.net["rt6"].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing on"') + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, + "show yang operational-data /frr-interface:lib isisd", + "step6/show_yang_interface_isis_adjacencies.ref", + ) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd", - "step6/show_yang_interface_isis_adjacencies.ref") def test_rib_ipv4_step6(): logger.info("Test (step 6): verify IPv4 RIB") @@ -531,9 +596,11 @@ def test_rib_ipv4_step6(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - "step6/show_ip_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", "step6/show_ip_route.ref" + ) + def test_rib_ipv6_step6(): logger.info("Test (step 6): verify IPv6 RIB") @@ -543,9 +610,11 @@ def test_rib_ipv6_step6(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - "step6/show_ipv6_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", "step6/show_ipv6_route.ref" + ) + def test_mpls_lib_step6(): logger.info("Test (step 6): verify MPLS LIB") @@ -555,9 +624,11 @@ def test_mpls_lib_step6(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - "step6/show_mpls_table.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", "step6/show_mpls_table.ref" + ) + # # Step 7 @@ -576,13 +647,21 @@ def test_isis_adjacencies_step7(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Deleting rt1\'s Prefix-SIDs') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no segment-routing prefix 1.1.1.1/32 index 10"') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no segment-routing prefix 2001:db8:1000::1/128 index 11"') + logger.info("Deleting rt1's Prefix-SIDs") + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "no segment-routing prefix 1.1.1.1/32 index 10"' + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "no segment-routing prefix 2001:db8:1000::1/128 index 11"' + ) + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, + "show yang operational-data /frr-interface:lib isisd", + "step7/show_yang_interface_isis_adjacencies.ref", + ) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd", - "step7/show_yang_interface_isis_adjacencies.ref") def test_rib_ipv4_step7(): logger.info("Test (step 7): verify IPv4 RIB") @@ -592,9 +671,11 @@ def test_rib_ipv4_step7(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - "step7/show_ip_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", "step7/show_ip_route.ref" + ) + def test_rib_ipv6_step7(): logger.info("Test (step 7): verify IPv6 RIB") @@ -604,9 +685,11 @@ def test_rib_ipv6_step7(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - "step7/show_ipv6_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", "step7/show_ipv6_route.ref" + ) + def test_mpls_lib_step7(): logger.info("Test (step 7): verify MPLS LIB") @@ -616,9 +699,11 @@ def test_mpls_lib_step7(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - "step7/show_mpls_table.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", "step7/show_mpls_table.ref" + ) + # # Step 8 @@ -637,13 +722,21 @@ def test_isis_adjacencies_step8(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Re-adding rt1\'s Prefix-SIDs') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 1.1.1.1/32 index 10"') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::1/128 index 11"') + logger.info("Re-adding rt1's Prefix-SIDs") + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 1.1.1.1/32 index 10"' + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::1/128 index 11"' + ) + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, + "show yang operational-data /frr-interface:lib isisd", + "step8/show_yang_interface_isis_adjacencies.ref", + ) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd", - "step8/show_yang_interface_isis_adjacencies.ref") def test_rib_ipv4_step8(): logger.info("Test (step 8): verify IPv4 RIB") @@ -653,9 +746,11 @@ def test_rib_ipv4_step8(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - "step8/show_ip_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", "step8/show_ip_route.ref" + ) + def test_rib_ipv6_step8(): logger.info("Test (step 8): verify IPv6 RIB") @@ -665,9 +760,11 @@ def test_rib_ipv6_step8(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - "step8/show_ipv6_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", "step8/show_ipv6_route.ref" + ) + def test_mpls_lib_step8(): logger.info("Test (step 8): verify MPLS LIB") @@ -677,9 +774,11 @@ def test_mpls_lib_step8(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - "step8/show_mpls_table.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", "step8/show_mpls_table.ref" + ) + # # Step 9 @@ -700,16 +799,28 @@ def test_isis_adjacencies_step9(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Changing rt1\'s Prefix-SIDs to use the no-php option') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 1.1.1.1/32 index 10 no-php-flag"') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::1/128 index 11 no-php-flag"') - logger.info('Change rt6\'s Prefix-SIDs to stop using the explicit-null option') - tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 6.6.6.6/32 index 60"') - tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::6/128 index 61"') + logger.info("Changing rt1's Prefix-SIDs to use the no-php option") + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 1.1.1.1/32 index 10 no-php-flag"' + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::1/128 index 11 no-php-flag"' + ) + logger.info("Change rt6's Prefix-SIDs to stop using the explicit-null option") + tgen.net["rt6"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 6.6.6.6/32 index 60"' + ) + tgen.net["rt6"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::6/128 index 61"' + ) + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, + "show yang operational-data /frr-interface:lib isisd", + "step9/show_yang_interface_isis_adjacencies.ref", + ) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd", - "step9/show_yang_interface_isis_adjacencies.ref") def test_rib_ipv4_step9(): logger.info("Test (step 9): verify IPv4 RIB") @@ -719,9 +830,11 @@ def test_rib_ipv4_step9(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - "step9/show_ip_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", "step9/show_ip_route.ref" + ) + def test_rib_ipv6_step9(): logger.info("Test (step 9): verify IPv6 RIB") @@ -731,9 +844,11 @@ def test_rib_ipv6_step9(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - "step9/show_ipv6_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", "step9/show_ipv6_route.ref" + ) + def test_mpls_lib_step9(): logger.info("Test (step 9): verify MPLS LIB") @@ -743,9 +858,11 @@ def test_mpls_lib_step9(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - "step9/show_mpls_table.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", "step9/show_mpls_table.ref" + ) + # # Step 10 @@ -766,12 +883,18 @@ def test_isis_adjacencies_step10(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Removing the IPv4 address from rt4\'s eth-rt2-1 interface') - tgen.net['rt4'].cmd('vtysh -c "conf t" -c "interface eth-rt2-1" -c "no ip address 10.0.2.4/24"') + logger.info("Removing the IPv4 address from rt4's eth-rt2-1 interface") + tgen.net["rt4"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt2-1" -c "no ip address 10.0.2.4/24"' + ) + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, + "show yang operational-data /frr-interface:lib isisd", + "step10/show_yang_interface_isis_adjacencies.ref", + ) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd", - "step10/show_yang_interface_isis_adjacencies.ref") def test_rib_ipv4_step10(): logger.info("Test (step 10): verify IPv4 RIB") @@ -781,9 +904,11 @@ def test_rib_ipv4_step10(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - "step10/show_ip_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", "step10/show_ip_route.ref" + ) + def test_rib_ipv6_step10(): logger.info("Test (step 10): verify IPv6 RIB") @@ -793,9 +918,11 @@ def test_rib_ipv6_step10(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - "step10/show_ipv6_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", "step10/show_ipv6_route.ref" + ) + def test_mpls_lib_step10(): logger.info("Test (step 10): verify MPLS LIB") @@ -805,9 +932,11 @@ def test_mpls_lib_step10(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - "step10/show_mpls_table.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", "step10/show_mpls_table.ref" + ) + # # Step 11 @@ -826,13 +955,26 @@ def test_isis_invalid_config_step11(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Entering invalid Segment Routing configuration...') - ret = tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 1.1.1.1/32 index 10000"') - assert re.search("Configuration failed", ret) is not None, "Invalid SR configuration wasn't rejected" - ret = tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing global-block 16000 14999"') - assert re.search("Configuration failed", ret) is not None, "Invalid SR configuration wasn't rejected" - ret = tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing global-block 16000 16001"') - assert re.search("Configuration failed", ret) is not None, "Invalid SR configuration wasn't rejected" + logger.info("Entering invalid Segment Routing configuration...") + ret = tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 1.1.1.1/32 index 10000"' + ) + assert ( + re.search("Configuration failed", ret) is not None + ), "Invalid SR configuration wasn't rejected" + ret = tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing global-block 16000 14999"' + ) + assert ( + re.search("Configuration failed", ret) is not None + ), "Invalid SR configuration wasn't rejected" + ret = tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing global-block 16000 16001"' + ) + assert ( + re.search("Configuration failed", ret) is not None + ), "Invalid SR configuration wasn't rejected" + # # Step 12 @@ -851,19 +993,39 @@ def test_isis_adjacencies_step12(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Restoring the original network setup') - tgen.net['rt4'].cmd('vtysh -c "conf t" -c "interface eth-rt5" -c "ip router isis 1"') - tgen.net['rt4'].cmd('vtysh -c "conf t" -c "interface eth-rt5" -c "ipv6 router isis 1"') - tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing global-block 16000 23999"') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 1.1.1.1/32 index 10"') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::1/128 index 11"') - tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 6.6.6.6/32 index 60 explicit-null"') - tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::6/128 index 61 explicit-null"') - tgen.net['rt4'].cmd('vtysh -c "conf t" -c "interface eth-rt2-1" -c "ip address 10.0.2.4/24"') + logger.info("Restoring the original network setup") + tgen.net["rt4"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt5" -c "ip router isis 1"' + ) + tgen.net["rt4"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt5" -c "ipv6 router isis 1"' + ) + tgen.net["rt6"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing global-block 16000 23999"' + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 1.1.1.1/32 index 10"' + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::1/128 index 11"' + ) + tgen.net["rt6"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 6.6.6.6/32 index 60 explicit-null"' + ) + tgen.net["rt6"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::6/128 index 61 explicit-null"' + ) + tgen.net["rt4"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt2-1" -c "ip address 10.0.2.4/24"' + ) + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, + "show yang operational-data /frr-interface:lib isisd", + "step1/show_yang_interface_isis_adjacencies.ref", + ) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd", - "step1/show_yang_interface_isis_adjacencies.ref") def test_rib_ipv4_step12(): logger.info("Test (step 12): verify IPv4 RIB") @@ -873,9 +1035,11 @@ def test_rib_ipv4_step12(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - "step1/show_ip_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", "step1/show_ip_route.ref" + ) + def test_rib_ipv6_step12(): logger.info("Test (step 12): verify IPv6 RIB") @@ -885,9 +1049,11 @@ def test_rib_ipv6_step12(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - "step1/show_ipv6_route.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", "step1/show_ipv6_route.ref" + ) + def test_mpls_lib_step12(): logger.info("Test (step 12): verify MPLS LIB") @@ -897,19 +1063,22 @@ def test_mpls_lib_step12(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - "step1/show_mpls_table.ref") + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", "step1/show_mpls_table.ref" + ) + # Memory leak test template def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/isis-topo1-vrf/test_isis_topo1_vrf.py b/tests/topotests/isis-topo1-vrf/test_isis_topo1_vrf.py index 7943b94189..12121e4ddf 100644 --- a/tests/topotests/isis-topo1-vrf/test_isis_topo1_vrf.py +++ b/tests/topotests/isis-topo1-vrf/test_isis_topo1_vrf.py @@ -82,6 +82,7 @@ class ISISTopo1(Topo): sw.add_link(tgen.gears["r4"]) sw.add_link(tgen.gears["r5"]) + def setup_module(mod): "Sets up the pytest environment" tgen = Topogen(ISISTopo1, mod.__name__) @@ -129,16 +130,14 @@ def setup_module(mod): for rname, router in tgen.routers().items(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, "{}/zebra.conf".format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_ISIS, - os.path.join(CWD, "{}/isisd.conf".format(rname)) + TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname)) ) # After loading the configurations, this function loads configured daemons. tgen.start_router() - + has_version_20 = False for router in tgen.routers().values(): if router.has_version("<", "4"): @@ -148,6 +147,7 @@ def setup_module(mod): logger.info("Skipping ISIS vrf tests for FRR 2.0") tgen.set_error("ISIS has convergence problems with IPv6") + def teardown_module(mod): "Teardown the pytest environment" tgen = get_topogen() @@ -155,6 +155,7 @@ def teardown_module(mod): # delete rx-vrf tgen.stop_topology() + def test_isis_convergence(): "Wait for the protocol to converge before starting to test" tgen = get_topogen() @@ -163,10 +164,11 @@ def test_isis_convergence(): pytest.skip(tgen.errors) logger.info("waiting for ISIS protocol to converge") - + for rname, router in tgen.routers().items(): filename = "{0}/{1}/{1}_topology.json".format(CWD, rname) expected = json.loads(open(filename).read()) + def compare_isis_topology(router, expected): "Helper function to test ISIS vrf topology convergence." actual = show_isis_topology(router) @@ -177,6 +179,7 @@ def test_isis_convergence(): (result, diff) = topotest.run_and_expect(test_func, None, wait=0.5, count=120) assert result, "ISIS did not converge on {}:\n{}".format(rname, diff) + def test_isis_route_installation(): "Check whether all expected routes are present" tgen = get_topogen() @@ -189,7 +192,9 @@ def test_isis_route_installation(): for rname, router in tgen.routers().items(): filename = "{0}/{1}/{1}_route.json".format(CWD, rname) expected = json.loads(open(filename, "r").read()) - actual = router.vtysh_cmd("show ip route vrf {0}-cust1 json".format(rname) , isjson=True) + actual = router.vtysh_cmd( + "show ip route vrf {0}-cust1 json".format(rname), isjson=True + ) # Older FRR versions don't list interfaces in some ISIS routes if router.has_version("<", "3.1"): for network, routes in expected.items(): @@ -209,7 +214,7 @@ def test_isis_linux_route_installation(): dist = platform.dist() - if (dist[1] == "16.04"): + if dist[1] == "16.04": pytest.skip("Kernel not supported for vrf") "Check whether all expected routes are present and installed in the OS" @@ -234,6 +239,7 @@ def test_isis_linux_route_installation(): assertmsg = "Router '{}' OS routes mismatch".format(rname) assert topotest.json_cmp(actual, expected) is None, assertmsg + def test_isis_route6_installation(): "Check whether all expected routes are present" tgen = get_topogen() @@ -246,7 +252,9 @@ def test_isis_route6_installation(): for rname, router in tgen.routers().items(): filename = "{0}/{1}/{1}_route6.json".format(CWD, rname) expected = json.loads(open(filename, "r").read()) - actual = router.vtysh_cmd("show ipv6 route vrf {}-cust1 json".format(rname) , isjson=True) + actual = router.vtysh_cmd( + "show ipv6 route vrf {}-cust1 json".format(rname), isjson=True + ) # Older FRR versions don't list interfaces in some ISIS routes if router.has_version("<", "3.1"): @@ -262,11 +270,12 @@ def test_isis_route6_installation(): assertmsg = "Router '{}' routes mismatch".format(rname) assert topotest.json_cmp(actual, expected) is None, assertmsg + def test_isis_linux_route6_installation(): dist = platform.dist() - if (dist[1] == "16.04"): + if dist[1] == "16.04": pytest.skip("Kernel not supported for vrf") "Check whether all expected routes are present and installed in the OS" @@ -291,6 +300,7 @@ def test_isis_linux_route6_installation(): assertmsg = "Router '{}' OS routes mismatch".format(rname) assert topotest.json_cmp(actual, expected) is None, assertmsg + def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() @@ -452,4 +462,3 @@ def show_isis_topology(router): dict_merge(l1, l2) return l1 - diff --git a/tests/topotests/ldp-sync-isis-topo1/test_ldp_sync_isis_topo1.py b/tests/topotests/ldp-sync-isis-topo1/test_ldp_sync_isis_topo1.py index d339b7bd7b..331e6fafd4 100644 --- a/tests/topotests/ldp-sync-isis-topo1/test_ldp_sync_isis_topo1.py +++ b/tests/topotests/ldp-sync-isis-topo1/test_ldp_sync_isis_topo1.py @@ -182,7 +182,9 @@ def test_isis_convergence(): router_compare_json_output( rname, "show yang operational-data /frr-interface:lib isisd", - "show_yang_interface_isis_adjacencies.ref") + "show_yang_interface_isis_adjacencies.ref", + ) + def test_rib(): logger.info("Test: verify RIB") @@ -265,6 +267,7 @@ def test_ldp_pseudowires(): rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref" ) + def test_ldp_igp_sync(): logger.info("Test: verify LDP igp-sync") tgen = get_topogen() @@ -278,6 +281,7 @@ def test_ldp_igp_sync(): rname, "show mpls ldp igp-sync json", "show_ldp_igp_sync.ref" ) + def test_isis_ldp_sync(): logger.info("Test: verify ISIS igp-sync") tgen = get_topogen() @@ -287,9 +291,7 @@ def test_isis_ldp_sync(): pytest.skip(tgen.errors) for rname in ["r1", "r2", "r3"]: - (result, diff) = validate_show_isis_ldp_sync( - rname, "show_isis_ldp_sync.ref" - ) + (result, diff) = validate_show_isis_ldp_sync(rname, "show_isis_ldp_sync.ref") assert result, "ISIS did not converge on {}:\n{}".format(rname, diff) for rname in ["r1", "r2", "r3"]: @@ -320,7 +322,9 @@ def test_r1_eth1_shutdown(): for rname in ["r1", "r2", "r3"]: router_compare_json_output( - rname, "show mpls ldp igp-sync json", "show_ldp_igp_sync_r1_eth1_shutdown.ref" + rname, + "show mpls ldp igp-sync json", + "show_ldp_igp_sync_r1_eth1_shutdown.ref", ) for rname in ["r1", "r2", "r3"]: @@ -355,9 +359,7 @@ def test_r1_eth1_no_shutdown(): ) for rname in ["r1", "r2", "r3"]: - (result, diff) = validate_show_isis_ldp_sync( - rname, "show_isis_ldp_sync.ref" - ) + (result, diff) = validate_show_isis_ldp_sync(rname, "show_isis_ldp_sync.ref") assert result, "ISIS did not converge on {}:\n{}".format(rname, diff) for rname in ["r1", "r2", "r3"]: @@ -382,7 +384,9 @@ def test_r2_eth1_shutdown(): for rname in ["r1", "r2", "r3"]: router_compare_json_output( - rname, "show mpls ldp igp-sync json", "show_ldp_igp_sync_r1_eth1_shutdown.ref" + rname, + "show mpls ldp igp-sync json", + "show_ldp_igp_sync_r1_eth1_shutdown.ref", ) for rname in ["r1", "r2", "r3"]: @@ -417,9 +421,7 @@ def test_r2_eth1_no_shutdown(): ) for rname in ["r1", "r2", "r3"]: - (result, diff) = validate_show_isis_ldp_sync( - rname, "show_isis_ldp_sync.ref" - ) + (result, diff) = validate_show_isis_ldp_sync(rname, "show_isis_ldp_sync.ref") assert result, "ISIS did not converge on {}:\n{}".format(rname, diff) for rname in ["r1", "r2", "r3"]: @@ -448,6 +450,7 @@ if __name__ == "__main__": # Auxiliary functions # + def parse_show_isis_ldp_sync(lines, rname): """ Parse the output of 'show isis mpls ldp sync' into a Python dict. @@ -461,23 +464,23 @@ def parse_show_isis_ldp_sync(lines, rname): interface = {} interface_name = None - line = it.next(); + line = it.next() if line.startswith(rname + "-eth"): interface_name = line - line = it.next(); + line = it.next() if line.startswith(" LDP-IGP Synchronization enabled: "): interface["ldpIgpSyncEnabled"] = line.endswith("yes") - line = it.next(); + line = it.next() if line.startswith(" holddown timer in seconds: "): - interface["holdDownTimeInSec"] = int(line.split(": ")[-1]) - line = it.next(); + interface["holdDownTimeInSec"] = int(line.split(": ")[-1]) + line = it.next() if line.startswith(" State: "): - interface["ldpIgpSyncState"] = line.split(": ")[-1] + interface["ldpIgpSyncState"] = line.split(": ")[-1] elif line.startswith(" Interface "): interface["Interface"] = line.endswith("down") @@ -534,7 +537,7 @@ def parse_show_isis_interface_detail(lines, rname): while True: try: - line = it.next(); + line = it.next() area_match = re.match(r"Area (.+):", line) if not area_match: @@ -543,34 +546,36 @@ def parse_show_isis_interface_detail(lines, rname): area_id = area_match.group(1) area = {} - line = it.next(); + line = it.next() while line.startswith(" Interface: "): - interface_name = re.split(':|,', line)[1].lstrip() + interface_name = re.split(":|,", line)[1].lstrip() - area[interface_name]= [] + area[interface_name] = [] # Look for keyword: Level-1 or Level-2 while not line.startswith(" Level-"): - line = it.next(); + line = it.next() while line.startswith(" Level-"): level = {} level_name = line.split()[0] - level['level'] = level_name + level["level"] = level_name - line = it.next(); + line = it.next() if line.startswith(" Metric:"): - level['metric'] = re.split(':|,', line)[1].lstrip() + level["metric"] = re.split(":|,", line)[1].lstrip() area[interface_name].append(level) # Look for keyword: Level-1 or Level-2 or Interface: - while not line.startswith(" Level-") and not line.startswith(" Interface: "): - line = it.next(); + while not line.startswith(" Level-") and not line.startswith( + " Interface: " + ): + line = it.next() if line.startswith(" Level-"): continue @@ -623,4 +628,3 @@ def validate_show_isis_interface_detail(rname, fname): (result, diff) = topotest.run_and_expect(test_func, None, wait=0.5, count=160) return (result, diff) - diff --git a/tests/topotests/ldp-sync-ospf-topo1/test_ldp_sync_ospf_topo1.py b/tests/topotests/ldp-sync-ospf-topo1/test_ldp_sync_ospf_topo1.py index 9694fa982f..20d7f46d4c 100644 --- a/tests/topotests/ldp-sync-ospf-topo1/test_ldp_sync_ospf_topo1.py +++ b/tests/topotests/ldp-sync-ospf-topo1/test_ldp_sync_ospf_topo1.py @@ -264,6 +264,7 @@ def test_ldp_pseudowires(): rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref" ) + def test_ldp_igp_sync(): logger.info("Test: verify LDP igp-sync") tgen = get_topogen() @@ -277,6 +278,7 @@ def test_ldp_igp_sync(): rname, "show mpls ldp igp-sync json", "show_ldp_igp_sync.ref" ) + def test_ospf_ldp_sync(): logger.info("Test: verify OSPF igp-sync") tgen = get_topogen() @@ -317,19 +319,26 @@ def test_r1_eth1_shutdown(): for rname in ["r1", "r2", "r3"]: router_compare_json_output( - rname, "show mpls ldp igp-sync json", "show_ldp_igp_sync_r1_eth1_shutdown.ref" + rname, + "show mpls ldp igp-sync json", + "show_ldp_igp_sync_r1_eth1_shutdown.ref", ) for rname in ["r1", "r2", "r3"]: router_compare_json_output( - rname, "show ip ospf mpls ldp-sync json", "show_ospf_ldp_sync_r1_eth1_shutdown.ref" + rname, + "show ip ospf mpls ldp-sync json", + "show_ospf_ldp_sync_r1_eth1_shutdown.ref", ) for rname in ["r1", "r2", "r3"]: router_compare_json_output( - rname, "show ip ospf interface json", "show_ip_ospf_interface_r1_eth1_shutdown.ref" + rname, + "show ip ospf interface json", + "show_ip_ospf_interface_r1_eth1_shutdown.ref", ) + def test_r1_eth1_no_shutdown(): logger.info("Test: verify behaviour after r1-eth1 is no shutdown") tgen = get_topogen() @@ -358,6 +367,7 @@ def test_r1_eth1_no_shutdown(): rname, "show ip ospf interface json", "show_ip_ospf_interface.ref" ) + def test_r2_eth1_shutdown(): logger.info("Test: verify behaviour after r2-eth1 is shutdown") tgen = get_topogen() @@ -373,19 +383,26 @@ def test_r2_eth1_shutdown(): for rname in ["r1", "r2", "r3"]: router_compare_json_output( - rname, "show mpls ldp igp-sync json", "show_ldp_igp_sync_r1_eth1_shutdown.ref" + rname, + "show mpls ldp igp-sync json", + "show_ldp_igp_sync_r1_eth1_shutdown.ref", ) for rname in ["r1", "r2", "r3"]: router_compare_json_output( - rname, "show ip ospf mpls ldp-sync json", "show_ospf_ldp_sync_r2_eth1_shutdown.ref" + rname, + "show ip ospf mpls ldp-sync json", + "show_ospf_ldp_sync_r2_eth1_shutdown.ref", ) for rname in ["r1", "r2", "r3"]: router_compare_json_output( - rname, "show ip ospf interface json", "show_ip_ospf_interface_r2_eth1_shutdown.ref" + rname, + "show ip ospf interface json", + "show_ip_ospf_interface_r2_eth1_shutdown.ref", ) + def test_r2_eth1_no_shutdown(): logger.info("Test: verify behaviour after r2-eth1 is no shutdown") tgen = get_topogen() @@ -414,6 +431,7 @@ def test_r2_eth1_no_shutdown(): rname, "show ip ospf interface json", "show_ip_ospf_interface.ref" ) + # Memory leak test template def test_memory_leak(): "Run the memory leak test and report results." diff --git a/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py b/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py index 0b8bf4de0e..ba94cd47d4 100644 --- a/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py +++ b/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py @@ -283,8 +283,7 @@ def test_ldp_pseudowires_after_link_down(): # for nexthop resolution). Give some extra wait time. for rname in ["r1", "r2", "r3"]: router_compare_json_output( - rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref", - count=160, wait=1 + rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref", count=160, wait=1 ) diff --git a/tests/topotests/lib/bgprib.py b/tests/topotests/lib/bgprib.py index 8629ebd2ca..a23092de83 100644 --- a/tests/topotests/lib/bgprib.py +++ b/tests/topotests/lib/bgprib.py @@ -40,9 +40,8 @@ import re # gpz: get rib in json form and compare against desired routes class BgpRib: - def log(self, str): - LUtil.log ("BgpRib: "+ str) + LUtil.log("BgpRib: " + str) def routes_include_wanted(self, pfxtbl, want, debug): # helper function to RequireVpnRoutes @@ -156,7 +155,7 @@ class BgpRib: errstr = "-script ERROR: check if vrf missing" luResult(target, False, title + errstr, logstr) return - #if debug: + # if debug: # self.log("table=%s" % table) for want in wantroutes: if debug: diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py index 1fa6d35101..d83f946c42 100644 --- a/tests/topotests/lib/common_config.py +++ b/tests/topotests/lib/common_config.py @@ -400,6 +400,7 @@ def check_router_status(tgen): logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) return True + def getStrIO(): """ Return a StringIO object appropriate for the current python version. @@ -409,6 +410,7 @@ def getStrIO(): else: return StringIO.StringIO() + def reset_config_on_routers(tgen, routerName=None): """ Resets configuration on routers to the snapshot created using input JSON @@ -702,14 +704,14 @@ def start_topology(tgen, daemon=None): ) TMPDIR = os.path.join(LOGDIR, tgen.modname) - linux_ver = '' + linux_ver = "" router_list = tgen.routers() for rname in ROUTER_LIST: router = router_list[rname] # It will help in debugging the failures, will give more details on which # specific kernel version tests are failing - if linux_ver == '': + if linux_ver == "": linux_ver = router.run("uname -a") logger.info("Logging platform related details: \n %s \n", linux_ver) @@ -741,11 +743,10 @@ def start_topology(tgen, daemon=None): # Loading empty bgpd.conf file to router, to start the bgp deamon router.load_config(TopoRouter.RD_BGP, "{}/{}/bgpd.conf".format(TMPDIR, rname)) - if daemon and 'ospfd' in daemon: + if daemon and "ospfd" in daemon: # Loading empty ospf.conf file to router, to start the bgp deamon router.load_config( - TopoRouter.RD_OSPF, - '{}/{}/ospfd.conf'.format(TMPDIR, rname) + TopoRouter.RD_OSPF, "{}/{}/ospfd.conf".format(TMPDIR, rname) ) # Starting routers logger.info("Starting all routers once topology is created") @@ -831,8 +832,8 @@ def topo_daemons(tgen, topo): ) for rtr in ROUTER_LIST: - if 'ospf' in topo['routers'][rtr] and 'ospfd' not in daemon_list: - daemon_list.append('ospfd') + if "ospf" in topo["routers"][rtr] and "ospfd" not in daemon_list: + daemon_list.append("ospfd") return daemon_list @@ -1266,8 +1267,7 @@ def interface_status(tgen, topo, input_dict): return True -def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0, - return_is_dict=False): +def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0, return_is_dict=False): """ Retries function execution, if return is an errormsg or exception @@ -1279,11 +1279,10 @@ def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0, """ def _retry(func): - @wraps(func) def func_retry(*args, **kwargs): - _wait = kwargs.pop('wait', wait) - _attempts = kwargs.pop('attempts', attempts) + _wait = kwargs.pop("wait", wait) + _attempts = kwargs.pop("attempts", attempts) _attempts = int(_attempts) expected = True if _attempts < 0: @@ -1293,19 +1292,21 @@ def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0, logger.info("Waiting for [%s]s as initial delay", initial_wait) sleep(initial_wait) - _return_is_str = kwargs.pop('return_is_str', return_is_str) - _return_is_dict = kwargs.pop('return_is_str', return_is_dict) + _return_is_str = kwargs.pop("return_is_str", return_is_str) + _return_is_dict = kwargs.pop("return_is_str", return_is_dict) for i in range(1, _attempts + 1): try: - _expected = kwargs.setdefault('expected', True) + _expected = kwargs.setdefault("expected", True) if _expected is False: expected = _expected - kwargs.pop('expected') + kwargs.pop("expected") ret = func(*args, **kwargs) logger.debug("Function returned %s", ret) if _return_is_str and isinstance(ret, bool) and _expected: return ret - if (isinstance(ret, str) or isinstance(ret, unicode)) and _expected is False: + if ( + isinstance(ret, str) or isinstance(ret, unicode) + ) and _expected is False: return ret if _return_is_dict and isinstance(ret, dict): return ret @@ -1316,17 +1317,17 @@ def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0, except Exception as err: if _attempts == i and expected: generate_support_bundle() - logger.info("Max number of attempts (%r) reached", - _attempts) + logger.info("Max number of attempts (%r) reached", _attempts) raise else: logger.info("Function returned %s", err) if i < _attempts: - logger.info("Retry [#%r] after sleeping for %ss" - % (i, _wait)) + logger.info("Retry [#%r] after sleeping for %ss" % (i, _wait)) sleep(_wait) + func_retry._original = func return func_retry + return _retry @@ -1420,58 +1421,63 @@ def create_interfaces_cfg(tgen, topo, build=False): else: interface_data.append("ipv6 address {}\n".format(intf_addr)) - if 'ospf' in data: - ospf_data = data['ospf'] - if 'area' in ospf_data: - intf_ospf_area = c_data["links"][destRouterLink][ - "ospf"]["area"] + if "ospf" in data: + ospf_data = data["ospf"] + if "area" in ospf_data: + intf_ospf_area = c_data["links"][destRouterLink]["ospf"]["area"] if "delete" in data and data["delete"]: interface_data.append("no ip ospf area") else: - interface_data.append("ip ospf area {}".format( - intf_ospf_area - )) + interface_data.append( + "ip ospf area {}".format(intf_ospf_area) + ) - if "hello_interval" in ospf_data: - intf_ospf_hello = c_data["links"][destRouterLink][ - "ospf"]["hello_interval"] + if "hello_interval" in ospf_data: + intf_ospf_hello = c_data["links"][destRouterLink]["ospf"][ + "hello_interval" + ] if "delete" in data and data["delete"]: - interface_data.append("no ip ospf "\ - " hello-interval") + interface_data.append("no ip ospf " " hello-interval") else: - interface_data.append("ip ospf "\ - " hello-interval {}".format(intf_ospf_hello)) + interface_data.append( + "ip ospf " " hello-interval {}".format(intf_ospf_hello) + ) if "dead_interval" in ospf_data: - intf_ospf_dead = c_data["links"][destRouterLink][ - "ospf"]["dead_interval"] + intf_ospf_dead = c_data["links"][destRouterLink]["ospf"][ + "dead_interval" + ] if "delete" in data and data["delete"]: - interface_data.append("no ip ospf"\ - " dead-interval") + interface_data.append("no ip ospf" " dead-interval") else: - interface_data.append("ip ospf "\ - " dead-interval {}".format(intf_ospf_dead)) + interface_data.append( + "ip ospf " " dead-interval {}".format(intf_ospf_dead) + ) if "network" in ospf_data: - intf_ospf_nw = c_data["links"][destRouterLink][ - "ospf"]["network"] + intf_ospf_nw = c_data["links"][destRouterLink]["ospf"][ + "network" + ] if "delete" in data and data["delete"]: - interface_data.append("no ip ospf"\ - " network {}".format(intf_ospf_nw)) + interface_data.append( + "no ip ospf" " network {}".format(intf_ospf_nw) + ) else: - interface_data.append("ip ospf"\ - " network {}".format(intf_ospf_nw)) + interface_data.append( + "ip ospf" " network {}".format(intf_ospf_nw) + ) if "priority" in ospf_data: - intf_ospf_nw = c_data["links"][destRouterLink][ - "ospf"]["priority"] + intf_ospf_nw = c_data["links"][destRouterLink]["ospf"][ + "priority" + ] if "delete" in data and data["delete"]: - interface_data.append("no ip ospf"\ - " priority") + interface_data.append("no ip ospf" " priority") else: - interface_data.append("ip ospf"\ - " priority {}".format(intf_ospf_nw)) + interface_data.append( + "ip ospf" " priority {}".format(intf_ospf_nw) + ) result = create_common_configuration( tgen, c_router, interface_data, "interface_config", build=build ) @@ -3013,7 +3019,7 @@ def verify_fib_routes(tgen, addr_type, dut, input_dict, next_hop=None): for st_rt in ip_list: st_rt = str(ipaddress.ip_network(frr_unicode(st_rt))) - #st_rt = str(ipaddr.IPNetwork(unicode(st_rt))) + # st_rt = str(ipaddr.IPNetwork(unicode(st_rt))) _addr_type = validate_ip_address(st_rt) if _addr_type != addr_type: @@ -3118,7 +3124,7 @@ def verify_fib_routes(tgen, addr_type, dut, input_dict, next_hop=None): nh_found = False for st_rt in ip_list: - #st_rt = str(ipaddr.IPNetwork(unicode(st_rt))) + # st_rt = str(ipaddr.IPNetwork(unicode(st_rt))) st_rt = str(ipaddress.ip_network(frr_unicode(st_rt))) _addr_type = validate_ip_address(st_rt) @@ -4075,8 +4081,9 @@ def required_linux_kernel_version(required_version): """ system_kernel = platform.release() if version_cmp(system_kernel, required_version) < 0: - error_msg = ('These tests will not run on kernel "{}", ' - 'they require kernel >= {})'.format(system_kernel, - required_version )) + error_msg = ( + 'These tests will not run on kernel "{}", ' + "they require kernel >= {})".format(system_kernel, required_version) + ) return error_msg return True diff --git a/tests/topotests/lib/ltemplate.py b/tests/topotests/lib/ltemplate.py index 3c93e1ac5c..d211be8836 100644 --- a/tests/topotests/lib/ltemplate.py +++ b/tests/topotests/lib/ltemplate.py @@ -43,7 +43,8 @@ from mininet.topo import Topo customize = None -class LTemplate(): + +class LTemplate: test = None testdir = None scriptdir = None @@ -54,12 +55,12 @@ class LTemplate(): def __init__(self, test, testdir): global customize - customize = imp.load_source('customize', os.path.join(testdir, 'customize.py')) + customize = imp.load_source("customize", os.path.join(testdir, "customize.py")) self.test = test self.testdir = testdir self.scriptdir = testdir - self.logdir = '/tmp/topotests/{0}.test_{0}'.format(test) - logger.info('LTemplate: '+test) + self.logdir = "/tmp/topotests/{0}.test_{0}".format(test) + logger.info("LTemplate: " + test) def setup_module(self, mod): "Sets up the pytest environment" @@ -68,14 +69,14 @@ class LTemplate(): # ... and here it calls Mininet initialization functions. tgen.start_topology() - logger.info('Topology started') + logger.info("Topology started") try: self.prestarthooksuccess = customize.ltemplatePreRouterStartHook() except AttributeError: - #not defined + # not defined logger.debug("ltemplatePreRouterStartHook() not defined") if self.prestarthooksuccess != True: - logger.info('ltemplatePreRouterStartHook() failed, skipping test') + logger.info("ltemplatePreRouterStartHook() failed, skipping test") return # This is a sample of configuration loading. @@ -85,48 +86,57 @@ class LTemplate(): for rname, router in router_list.items(): logger.info("Setting up %s" % rname) for rd_val in TopoRouter.RD: - config = os.path.join(self.testdir, '{}/{}.conf'.format(rname,TopoRouter.RD[rd_val])) + config = os.path.join( + self.testdir, "{}/{}.conf".format(rname, TopoRouter.RD[rd_val]) + ) prog = os.path.join(tgen.net[rname].daemondir, TopoRouter.RD[rd_val]) if os.path.exists(config): if os.path.exists(prog): router.load_config(rd_val, config) else: - logger.warning("{} not found, but have {}.conf file".format(prog, TopoRouter.RD[rd_val])) + logger.warning( + "{} not found, but have {}.conf file".format( + prog, TopoRouter.RD[rd_val] + ) + ) # After loading the configurations, this function loads configured daemons. - logger.info('Starting routers') + logger.info("Starting routers") tgen.start_router() try: self.poststarthooksuccess = customize.ltemplatePostRouterStartHook() except AttributeError: - #not defined + # not defined logger.debug("ltemplatePostRouterStartHook() not defined") luStart(baseScriptDir=self.scriptdir, baseLogDir=self.logdir, net=tgen.net) -#initialized by ltemplate_start + +# initialized by ltemplate_start _lt = None + def setup_module(mod): global _lt root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - test = mod.__name__[:mod.__name__.rfind(".")] + test = mod.__name__[: mod.__name__.rfind(".")] testdir = os.path.join(root, test) - #don't do this for now as reload didn't work as expected - #fixup sys.path, want test dir there only once - #try: + # don't do this for now as reload didn't work as expected + # fixup sys.path, want test dir there only once + # try: # sys.path.remove(testdir) - #except ValueError: + # except ValueError: # logger.debug(testdir+" not found in original sys.path") - #add testdir - #sys.path.append(testdir) + # add testdir + # sys.path.append(testdir) - #init class + # init class _lt = LTemplate(test, testdir) _lt.setup_module(mod) - #drop testdir - #sys.path.remove(testdir) + # drop testdir + # sys.path.remove(testdir) + def teardown_module(mod): global _lt @@ -141,7 +151,10 @@ def teardown_module(mod): tgen.stop_topology() _lt = None -def ltemplateTest(script, SkipIfFailed=True, CallOnFail=None, CheckFuncStr=None, KeepGoing=False): + +def ltemplateTest( + script, SkipIfFailed=True, CallOnFail=None, CheckFuncStr=None, KeepGoing=False +): global _lt if _lt == None or _lt.prestarthooksuccess != True: return @@ -149,8 +162,8 @@ def ltemplateTest(script, SkipIfFailed=True, CallOnFail=None, CheckFuncStr=None, tgen = get_topogen() if not os.path.isfile(script): if not os.path.isfile(os.path.join(_lt.scriptdir, script)): - logger.error('Could not find script file: ' + script) - assert 'Could not find script file: ' + script + logger.error("Could not find script file: " + script) + assert "Could not find script file: " + script logger.info("Starting template test: " + script) numEntry = luNumFail() @@ -163,7 +176,7 @@ def ltemplateTest(script, SkipIfFailed=True, CallOnFail=None, CheckFuncStr=None, if CheckFuncStr != None: check = eval(CheckFuncStr) if check != True: - pytest.skip("Check function '"+CheckFuncStr+"' returned: " + check) + pytest.skip("Check function '" + CheckFuncStr + "' returned: " + check) if CallOnFail != None: CallOnFail = eval(CallOnFail) @@ -173,22 +186,26 @@ def ltemplateTest(script, SkipIfFailed=True, CallOnFail=None, CheckFuncStr=None, luShowFail() fatal_error = "%d tests failed" % numFail if not KeepGoing: - assert "scripts/cleanup_all.py failed" == "See summary output above", fatal_error + assert ( + "scripts/cleanup_all.py failed" == "See summary output above" + ), fatal_error + # Memory leak test template def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -class ltemplateRtrCmd(): + +class ltemplateRtrCmd: def __init__(self): self.resetCounts() - def doCmd(self, tgen, rtr, cmd, checkstr = None): + def doCmd(self, tgen, rtr, cmd, checkstr=None): output = tgen.net[rtr].cmd(cmd).strip() if len(output): self.output += 1 @@ -199,8 +216,8 @@ class ltemplateRtrCmd(): else: self.match += 1 return ret - logger.info('command: {} {}'.format(rtr, cmd)) - logger.info('output: ' + output) + logger.info("command: {} {}".format(rtr, cmd)) + logger.info("output: " + output) self.none += 1 return None @@ -222,63 +239,69 @@ class ltemplateRtrCmd(): def getNone(self): return self.none -def ltemplateVersionCheck(vstr, rname='r1', compstr='<',cli=False, kernel='4.9', iproute2=None, mpls=True): + +def ltemplateVersionCheck( + vstr, rname="r1", compstr="<", cli=False, kernel="4.9", iproute2=None, mpls=True +): tgen = get_topogen() router = tgen.gears[rname] if cli: - logger.info('calling mininet CLI') + logger.info("calling mininet CLI") tgen.mininet_cli() - logger.info('exited mininet CLI') + logger.info("exited mininet CLI") if _lt == None: - ret = 'Template not initialized' + ret = "Template not initialized" return ret if _lt.prestarthooksuccess != True: - ret = 'ltemplatePreRouterStartHook failed' + ret = "ltemplatePreRouterStartHook failed" return ret if _lt.poststarthooksuccess != True: - ret = 'ltemplatePostRouterStartHook failed' + ret = "ltemplatePostRouterStartHook failed" return ret if mpls == True and tgen.hasmpls != True: - ret = 'MPLS not initialized' + ret = "MPLS not initialized" return ret if kernel != None: krel = platform.release() if topotest.version_cmp(krel, kernel) < 0: - ret = 'Skipping tests, old kernel ({} < {})'.format(krel, kernel) + ret = "Skipping tests, old kernel ({} < {})".format(krel, kernel) return ret if iproute2 != None: if _lt.iproute2Ver == None: - #collect/log info on iproute2 + # collect/log info on iproute2 cc = ltemplateRtrCmd() - found = cc.doCmd(tgen, rname, 'apt-cache policy iproute2', 'Installed: ([\d\.]*)') + found = cc.doCmd( + tgen, rname, "apt-cache policy iproute2", "Installed: ([\d\.]*)" + ) if found != None: iproute2Ver = found.group(1) else: - iproute2Ver = '0-unknown' - logger.info('Have iproute2 version=' + iproute2Ver) + iproute2Ver = "0-unknown" + logger.info("Have iproute2 version=" + iproute2Ver) if topotest.version_cmp(iproute2Ver, iproute2) < 0: - ret = 'Skipping tests, old iproute2 ({} < {})'.format(iproute2Ver, iproute2) + ret = "Skipping tests, old iproute2 ({} < {})".format(iproute2Ver, iproute2) return ret ret = True try: if router.has_version(compstr, vstr): - ret = 'Skipping tests, old FRR version {} {}'.format(compstr, vstr) + ret = "Skipping tests, old FRR version {} {}".format(compstr, vstr) return ret except: ret = True return ret -#for testing -if __name__ == '__main__': + +# for testing +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/lib/lutil.py b/tests/topotests/lib/lutil.py index 05ed9c007d..1fb4f48b0f 100644 --- a/tests/topotests/lib/lutil.py +++ b/tests/topotests/lib/lutil.py @@ -32,46 +32,53 @@ from mininet.net import Mininet # These functions are inteneted to provide support for CI testing within MiniNet # environments. + class lUtil: - #to be made configurable in the future - base_script_dir = '.' - base_log_dir = '.' - fout_name = 'output.log' - fsum_name = 'summary.txt' + # to be made configurable in the future + base_script_dir = "." + base_log_dir = "." + fout_name = "output.log" + fsum_name = "summary.txt" l_level = 6 CallOnFail = False l_total = 0 l_pass = 0 l_fail = 0 - l_filename = '' + l_filename = "" l_last = None l_line = 0 l_dotall_experiment = False l_last_nl = None - fout = '' - fsum = '' - net = '' + fout = "" + fsum = "" + net = "" def log(self, str, level=6): if self.l_level > 0: - if self.fout == '': - self.fout = open(self.fout_name, 'w', 0) - self.fout.write(str+'\n') + if self.fout == "": + self.fout = open(self.fout_name, "w", 0) + self.fout.write(str + "\n") if level <= self.l_level: print(str) def summary(self, str): - if self.fsum == '': - self.fsum = open(self.fsum_name, 'w', 0) - self.fsum.write('\ -******************************************************************************\n') - self.fsum.write('\ -Test Target Summary Pass Fail\n') - self.fsum.write('\ -******************************************************************************\n') - self.fsum.write(str+'\n') + if self.fsum == "": + self.fsum = open(self.fsum_name, "w", 0) + self.fsum.write( + "\ +******************************************************************************\n" + ) + self.fsum.write( + "\ +Test Target Summary Pass Fail\n" + ) + self.fsum.write( + "\ +******************************************************************************\n" + ) + self.fsum.write(str + "\n") def result(self, target, success, str, logstr=None): if success: @@ -88,32 +95,34 @@ Test Target Summary Pass Fail\n if logstr != None: self.log("R:%d %s: %s" % (self.l_total, sstr, logstr)) res = "%-4d %-6s %-56s %-4d %d" % (self.l_total, target, str, p, f) - self.log ('R:'+res) + self.log("R:" + res) self.summary(res) if f == 1 and self.CallOnFail != False: self.CallOnFail() def closeFiles(self): - ret = '\ + ret = ( + "\ ******************************************************************************\n\ Total %-4d %-4d %d\n\ -******************************************************************************'\ -% (self.l_total, self.l_pass, self.l_fail) - if self.fsum != '': - self.fsum.write(ret + '\n') +******************************************************************************" + % (self.l_total, self.l_pass, self.l_fail) + ) + if self.fsum != "": + self.fsum.write(ret + "\n") self.fsum.close() - self.fsum = '' - if self.fout != '': + self.fsum = "" + if self.fout != "": if os.path.isfile(self.fsum_name): - r = open(self.fsum_name, 'r') + r = open(self.fsum_name, "r") self.fout.write(r.read()) r.close() self.fout.close() - self.fout = '' + self.fout = "" return ret def setFilename(self, name): - str = 'FILE: ' + name + str = "FILE: " + name self.log(str) self.summary(str) self.l_filename = name @@ -128,19 +137,19 @@ Total %-4d %-4d %d\n\ def strToArray(self, string): a = [] c = 0 - end = '' + end = "" words = string.split() - if len(words) < 1 or words[0].startswith('#'): + if len(words) < 1 or words[0].startswith("#"): return a words = string.split() for word in words: if len(end) == 0: a.append(word) else: - a[c] += str(' '+word) - if end == '\\': - end = '' - if not word.endswith('\\'): + a[c] += str(" " + word) + if end == "\\": + end = "" + if not word.endswith("\\"): if end != '"': if word.startswith('"'): end = '"' @@ -148,14 +157,14 @@ Total %-4d %-4d %d\n\ c += 1 else: if word.endswith('"'): - end = '' + end = "" c += 1 else: c += 1 else: - end = '\\' - # if len(end) == 0: - # print('%d:%s:' % (c, a[c-1])) + end = "\\" + # if len(end) == 0: + # print('%d:%s:' % (c, a[c-1])) return a @@ -169,27 +178,37 @@ Total %-4d %-4d %d\n\ luCommand(a[1], a[2], a[3], a[4], a[5]) else: self.l_line += 1 - self.log('%s:%s %s' % (self.l_filename, self.l_line , line)) + self.log("%s:%s %s" % (self.l_filename, self.l_line, line)) if len(a) >= 2: - if a[0] == 'sleep': + if a[0] == "sleep": time.sleep(int(a[1])) - elif a[0] == 'include': + elif a[0] == "include": self.execTestFile(a[1]) f.close() else: - self.log('unable to read: ' + tstFile) + self.log("unable to read: " + tstFile) sys.exit(1) def command(self, target, command, regexp, op, result, returnJson): global net - if op != 'wait': - self.l_line += 1 - self.log('(#%d) %s:%s COMMAND:%s:%s:%s:%s:%s:' % \ - (self.l_total+1, - self.l_filename, self.l_line, target, command, regexp, op, result)) - if self.net == '': + if op != "wait": + self.l_line += 1 + self.log( + "(#%d) %s:%s COMMAND:%s:%s:%s:%s:%s:" + % ( + self.l_total + 1, + self.l_filename, + self.l_line, + target, + command, + regexp, + op, + result, + ) + ) + if self.net == "": return False - #self.log("Running %s %s" % (target, command)) + # self.log("Running %s %s" % (target, command)) js = None out = self.net[target].cmd(command).rstrip() if len(out) == 0: @@ -201,13 +220,15 @@ Total %-4d %-4d %d\n\ js = json.loads(out) except: js = None - self.log('WARNING: JSON load failed -- confirm command output is in JSON format.') - self.log('COMMAND OUTPUT:%s:' % report) + self.log( + "WARNING: JSON load failed -- confirm command output is in JSON format." + ) + self.log("COMMAND OUTPUT:%s:" % report) # Experiment: can we achieve the same match behavior via DOTALL # without converting newlines to spaces? out_nl = out - search_nl = re.search(regexp, out_nl, re.DOTALL); + search_nl = re.search(regexp, out_nl, re.DOTALL) self.l_last_nl = search_nl # Set up for comparison if search_nl != None: @@ -220,32 +241,50 @@ Total %-4d %-4d %d\n\ search = re.search(regexp, out) self.l_last = search if search == None: - if op == 'fail': + if op == "fail": success = True else: success = False ret = success else: ret = search.group() - if op != 'fail': + if op != "fail": success = True level = 7 else: success = False level = 5 - self.log('found:%s:' % ret, level) + self.log("found:%s:" % ret, level) # Experiment: compare matched strings obtained each way if self.l_dotall_experiment and (group_nl_converted != ret): - self.log('DOTALL experiment: strings differ dotall=[%s] orig=[%s]' % (group_nl_converted, ret), 9) - if op == 'pass' or op == 'fail': + self.log( + "DOTALL experiment: strings differ dotall=[%s] orig=[%s]" + % (group_nl_converted, ret), + 9, + ) + if op == "pass" or op == "fail": self.result(target, success, result) if js != None: return js return ret - def wait(self, target, command, regexp, op, result, wait, returnJson, wait_time=0.5): - self.log('%s:%s WAIT:%s:%s:%s:%s:%s:%s:%s:' % \ - (self.l_filename, self.l_line, target, command, regexp, op, result,wait,wait_time)) + def wait( + self, target, command, regexp, op, result, wait, returnJson, wait_time=0.5 + ): + self.log( + "%s:%s WAIT:%s:%s:%s:%s:%s:%s:%s:" + % ( + self.l_filename, + self.l_line, + target, + command, + regexp, + op, + result, + wait, + wait_time, + ) + ) found = False n = 0 startt = time.time() @@ -264,103 +303,137 @@ Total %-4d %-4d %d\n\ time.sleep(wait_time) delta = time.time() - startt - self.log('Done after %d loops, time=%s, Found=%s' % (n, delta, found)) - found = self.command(target, command, regexp, 'pass', '%s +%4.2f secs' % (result, delta), returnJson) + self.log("Done after %d loops, time=%s, Found=%s" % (n, delta, found)) + found = self.command( + target, + command, + regexp, + "pass", + "%s +%4.2f secs" % (result, delta), + returnJson, + ) return found -#initialized by luStart -LUtil=None -#entry calls -def luStart(baseScriptDir='.', baseLogDir='.', net='', - fout='output.log', fsum='summary.txt', level=None): +# initialized by luStart +LUtil = None + +# entry calls +def luStart( + baseScriptDir=".", + baseLogDir=".", + net="", + fout="output.log", + fsum="summary.txt", + level=None, +): global LUtil - #init class - LUtil=lUtil() + # init class + LUtil = lUtil() LUtil.base_script_dir = baseScriptDir LUtil.base_log_dir = baseLogDir LUtil.net = net - if fout != '': - LUtil.fout_name = baseLogDir + '/' + fout + if fout != "": + LUtil.fout_name = baseLogDir + "/" + fout if fsum != None: - LUtil.fsum_name = baseLogDir + '/' + fsum + LUtil.fsum_name = baseLogDir + "/" + fsum if level != None: LUtil.l_level = level LUtil.l_dotall_experiment = False LUtil.l_dotall_experiment = True -def luCommand(target, command, regexp='.', op='none', result='', time=10, returnJson=False, wait_time=0.5): - if op != 'wait': + +def luCommand( + target, + command, + regexp=".", + op="none", + result="", + time=10, + returnJson=False, + wait_time=0.5, +): + if op != "wait": return LUtil.command(target, command, regexp, op, result, returnJson) else: - return LUtil.wait(target, command, regexp, op, result, time, returnJson, wait_time) + return LUtil.wait( + target, command, regexp, op, result, time, returnJson, wait_time + ) + def luLast(usenl=False): if usenl: if LUtil.l_last_nl != None: - LUtil.log('luLast:%s:' % LUtil.l_last_nl.group(), 7) + LUtil.log("luLast:%s:" % LUtil.l_last_nl.group(), 7) return LUtil.l_last_nl else: if LUtil.l_last != None: - LUtil.log('luLast:%s:' % LUtil.l_last.group(), 7) + LUtil.log("luLast:%s:" % LUtil.l_last.group(), 7) return LUtil.l_last + def luInclude(filename, CallOnFail=None): - tstFile = LUtil.base_script_dir + '/' + filename + tstFile = LUtil.base_script_dir + "/" + filename LUtil.setFilename(filename) if CallOnFail != None: oldCallOnFail = LUtil.getCallOnFail() LUtil.setCallOnFail(CallOnFail) - if filename.endswith('.py'): - LUtil.log("luInclude: execfile "+tstFile) + if filename.endswith(".py"): + LUtil.log("luInclude: execfile " + tstFile) execfile(tstFile) else: - LUtil.log("luInclude: execTestFile "+tstFile) + LUtil.log("luInclude: execTestFile " + tstFile) LUtil.execTestFile(tstFile) if CallOnFail != None: LUtil.setCallOnFail(oldCallOnFail) + def luFinish(): global LUtil ret = LUtil.closeFiles() - #done + # done LUtil = None - return ret; + return ret + def luNumFail(): return LUtil.l_fail + def luNumPass(): return LUtil.l_pass + def luResult(target, success, str, logstr=None): return LUtil.result(target, success, str, logstr) + def luShowResults(prFunction): printed = 0 - sf = open(LUtil.fsum_name, 'r') + sf = open(LUtil.fsum_name, "r") for line in sf: - printed+=1 + printed += 1 prFunction(line.rstrip()) sf.close() + def luShowFail(): printed = 0 - sf = open(LUtil.fsum_name, 'r') + sf = open(LUtil.fsum_name, "r") for line in sf: if line[-2] != "0": - printed+=1 + printed += 1 logger.error(line.rstrip()) sf.close() if printed > 0: - logger.error("See %s for details of errors" % LUtil.fout_name) + logger.error("See %s for details of errors" % LUtil.fout_name) -#for testing -if __name__ == '__main__': - print(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/lib') + +# for testing +if __name__ == "__main__": + print(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/lib") luStart() for arg in sys.argv[1:]: luInclude(arg) luFinish() sys.exit(0) - diff --git a/tests/topotests/lib/ospf.py b/tests/topotests/lib/ospf.py index 9d6b8fa691..9f3d4841b0 100644 --- a/tests/topotests/lib/ospf.py +++ b/tests/topotests/lib/ospf.py @@ -26,12 +26,15 @@ import ipaddr from lib.topotest import frr_unicode # Import common_config to use commomnly used APIs -from lib.common_config import (create_common_configuration, - InvalidCLIError, retry, - generate_ips, - check_address_types, - validate_ip_address, - run_frr_cmd) +from lib.common_config import ( + create_common_configuration, + InvalidCLIError, + retry, + generate_ips, + check_address_types, + validate_ip_address, + run_frr_cmd, +) LOGDIR = "/tmp/topotests/" TMPDIR = None @@ -40,9 +43,8 @@ TMPDIR = None # Configure procs ################################ -def create_router_ospf( - tgen, topo, input_dict=None, build=False, - load_config=True): + +def create_router_ospf(tgen, topo, input_dict=None, build=False, load_config=True): """ API to configure ospf on router. @@ -84,19 +86,15 @@ def create_router_ospf( logger.debug("Router %s: 'ospf' not present in input_dict", router) continue - result = __create_ospf_global( - tgen, input_dict, router, build, load_config) + result = __create_ospf_global(tgen, input_dict, router, build, load_config) if result is True: ospf_data = input_dict[router]["ospf"] - logger.debug("Exiting lib API: create_router_ospf()") return result -def __create_ospf_global( - tgen, input_dict, router, build=False, - load_config=True): +def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True): """ Helper API to create ospf global configuration. @@ -121,9 +119,9 @@ def __create_ospf_global( del_ospf_action = ospf_data.setdefault("delete", False) if del_ospf_action: config_data = ["no router ospf"] - result = create_common_configuration(tgen, router, config_data, - "ospf", build, - load_config) + result = create_common_configuration( + tgen, router, config_data, "ospf", build, load_config + ) return result config_data = [] @@ -137,34 +135,33 @@ def __create_ospf_global( if del_router_id: config_data.append("no ospf router-id") if router_id: - config_data.append("ospf router-id {}".format( - router_id)) + config_data.append("ospf router-id {}".format(router_id)) # redistribute command redistribute_data = ospf_data.setdefault("redistribute", {}) if redistribute_data: for redistribute in redistribute_data: if "redist_type" not in redistribute: - logger.debug("Router %s: 'redist_type' not present in " - "input_dict", router) + logger.debug( + "Router %s: 'redist_type' not present in " "input_dict", router + ) else: - cmd = "redistribute {}".format( - redistribute["redist_type"]) + cmd = "redistribute {}".format(redistribute["redist_type"]) for red_type in redistribute_data: if "route_map" in red_type: - cmd = cmd + " route-map {}".format(red_type[ - 'route_map']) + cmd = cmd + " route-map {}".format(red_type["route_map"]) del_action = redistribute.setdefault("delete", False) if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) - #area information + # area information area_data = ospf_data.setdefault("area", {}) if area_data: for area in area_data: if "id" not in area: - logger.debug("Router %s: 'area id' not present in " - "input_dict", router) + logger.debug( + "Router %s: 'area id' not present in " "input_dict", router + ) else: cmd = "area {}".format(area["id"]) @@ -175,19 +172,21 @@ def __create_ospf_global( if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) - result = create_common_configuration(tgen, router, config_data, - "ospf", build, load_config) + result = create_common_configuration( + tgen, router, config_data, "ospf", build, load_config + ) # summary information summary_data = ospf_data.setdefault("summary-address", {}) if summary_data: for summary in summary_data: if "prefix" not in summary: - logger.debug("Router %s: 'summary-address' not present in " - "input_dict", router) + logger.debug( + "Router %s: 'summary-address' not present in " "input_dict", + router, + ) else: - cmd = "summary {}/{}".format(summary["prefix"], summary[ - "mask"]) + cmd = "summary {}/{}".format(summary["prefix"], summary["mask"]) _tag = summary.setdefault("tag", None) if _tag: @@ -201,8 +200,9 @@ def __create_ospf_global( if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) - result = create_common_configuration(tgen, router, config_data, - "ospf", build, load_config) + result = create_common_configuration( + tgen, router, config_data, "ospf", build, load_config + ) except InvalidCLIError: # Traceback @@ -214,9 +214,7 @@ def __create_ospf_global( return result -def create_router_ospf6( - tgen, topo, input_dict=None, build=False, - load_config=True): +def create_router_ospf6(tgen, topo, input_dict=None, build=False, load_config=True): """ API to configure ospf on router @@ -253,16 +251,13 @@ def create_router_ospf6( logger.debug("Router %s: 'ospf' not present in input_dict", router) continue - result = __create_ospf_global( - tgen, input_dict, router, build, load_config) + result = __create_ospf_global(tgen, input_dict, router, build, load_config) logger.debug("Exiting lib API: create_router_ospf()") return result -def __create_ospf6_global( - tgen, input_dict, router, build=False, - load_config=True): +def __create_ospf6_global(tgen, input_dict, router, build=False, load_config=True): """ Helper API to create ospf global configuration. @@ -286,9 +281,9 @@ def __create_ospf6_global( del_ospf_action = ospf_data.setdefault("delete", False) if del_ospf_action: config_data = ["no ipv6 router ospf"] - result = create_common_configuration(tgen, router, config_data, - "ospf", build, - load_config) + result = create_common_configuration( + tgen, router, config_data, "ospf", build, load_config + ) return result config_data = [] @@ -301,11 +296,11 @@ def __create_ospf6_global( if del_router_id: config_data.append("no ospf router-id") if router_id: - config_data.append("ospf router-id {}".format( - router_id)) + config_data.append("ospf router-id {}".format(router_id)) - result = create_common_configuration(tgen, router, config_data, - "ospf", build, load_config) + result = create_common_configuration( + tgen, router, config_data, "ospf", build, load_config + ) except InvalidCLIError: # Traceback errormsg = traceback.format_exc() @@ -315,8 +310,8 @@ def __create_ospf6_global( logger.debug("Exiting lib API: create_ospf_global()") return result -def config_ospf_interface (tgen, topo, input_dict=None, build=False, - load_config=True): + +def config_ospf_interface(tgen, topo, input_dict=None, build=False, load_config=True): """ API to configure ospf on router. @@ -356,22 +351,25 @@ def config_ospf_interface (tgen, topo, input_dict=None, build=False, input_dict = deepcopy(input_dict) for router in input_dict.keys(): config_data = [] - for lnk in input_dict[router]['links'].keys(): - if "ospf" not in input_dict[router]['links'][lnk]: - logger.debug("Router %s: ospf configs is not present in" - "input_dict, passed input_dict", router, - input_dict) + for lnk in input_dict[router]["links"].keys(): + if "ospf" not in input_dict[router]["links"][lnk]: + logger.debug( + "Router %s: ospf configs is not present in" + "input_dict, passed input_dict", + router, + input_dict, + ) continue - ospf_data = input_dict[router]['links'][lnk]['ospf'] + ospf_data = input_dict[router]["links"][lnk]["ospf"] data_ospf_area = ospf_data.setdefault("area", None) data_ospf_auth = ospf_data.setdefault("authentication", None) data_ospf_dr_priority = ospf_data.setdefault("priority", None) data_ospf_cost = ospf_data.setdefault("cost", None) try: - intf = topo['routers'][router]['links'][lnk]['interface'] + intf = topo["routers"][router]["links"][lnk]["interface"] except KeyError: - intf = topo['switches'][router]['links'][lnk]['interface'] + intf = topo["switches"][router]["links"][lnk]["interface"] # interface cmd = "interface {}".format(intf) @@ -383,58 +381,60 @@ def config_ospf_interface (tgen, topo, input_dict=None, build=False, config_data.append(cmd) # interface ospf auth if data_ospf_auth: - if data_ospf_auth == 'null': + if data_ospf_auth == "null": cmd = "ip ospf authentication null" - elif data_ospf_auth == 'message-digest': + elif data_ospf_auth == "message-digest": cmd = "ip ospf authentication message-digest" else: cmd = "ip ospf authentication" - if 'del_action' in ospf_data: + if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) if "message-digest-key" in ospf_data: cmd = "ip ospf message-digest-key {} md5 {}".format( - ospf_data["message-digest-key"],ospf_data[ - "authentication-key"]) - if 'del_action' in ospf_data: + ospf_data["message-digest-key"], ospf_data["authentication-key"] + ) + if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) - if "authentication-key" in ospf_data and \ - "message-digest-key" not in ospf_data: - cmd = "ip ospf authentication-key {}".format(ospf_data[ - "authentication-key"]) - if 'del_action' in ospf_data: + if ( + "authentication-key" in ospf_data + and "message-digest-key" not in ospf_data + ): + cmd = "ip ospf authentication-key {}".format( + ospf_data["authentication-key"] + ) + if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) # interface ospf dr priority if data_ospf_dr_priority in ospf_data: - cmd = "ip ospf priority {}".format( - ospf_data["priority"]) - if 'del_action' in ospf_data: + cmd = "ip ospf priority {}".format(ospf_data["priority"]) + if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) # interface ospf cost if data_ospf_cost in ospf_data: - cmd = "ip ospf cost {}".format( - ospf_data["cost"]) - if 'del_action' in ospf_data: + cmd = "ip ospf cost {}".format(ospf_data["cost"]) + if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) if build: return config_data else: - result = create_common_configuration(tgen, router, config_data, - "interface_config", - build=build) + result = create_common_configuration( + tgen, router, config_data, "interface_config", build=build + ) logger.debug("Exiting lib API: create_igmp_config()") return result + def clear_ospf(tgen, router): """ This API is to clear ospf neighborship by running @@ -517,15 +517,16 @@ def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False): result = False if input_dict: for router, rnode in tgen.routers().items(): - if 'ospf' not in topo['routers'][router]: + if "ospf" not in topo["routers"][router]: continue if dut is not None and dut != router: continue logger.info("Verifying OSPF neighborship on router %s:", router) - show_ospf_json = run_frr_cmd(rnode, - "show ip ospf neighbor all json", isjson=True) + show_ospf_json = run_frr_cmd( + rnode, "show ip ospf neighbor all json", isjson=True + ) # Verifying output dictionary show_ospf_json is empty or not if not bool(show_ospf_json): @@ -533,126 +534,134 @@ def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False): return errormsg ospf_data_list = input_dict[router]["ospf"] - ospf_nbr_list = ospf_data_list['neighbors'] + ospf_nbr_list = ospf_data_list["neighbors"] for ospf_nbr, nbr_data in ospf_nbr_list.items(): - data_ip = topo['routers'][ospf_nbr]['links'] - data_rid = topo['routers'][ospf_nbr]['ospf']['router_id'] + data_ip = topo["routers"][ospf_nbr]["links"] + data_rid = topo["routers"][ospf_nbr]["ospf"]["router_id"] if ospf_nbr in data_ip: nbr_details = nbr_data[ospf_nbr] elif lan: - for switch in topo['switches']: - if 'ospf' in topo['switches'][switch]['links'][router]: - neighbor_ip = data_ip[switch]['ipv4'].split("/")[0] + for switch in topo["switches"]: + if "ospf" in topo["switches"][switch]["links"][router]: + neighbor_ip = data_ip[switch]["ipv4"].split("/")[0] else: continue else: - neighbor_ip = data_ip[router]['ipv4'].split("/")[0] + neighbor_ip = data_ip[router]["ipv4"].split("/")[0] nh_state = None neighbor_ip = neighbor_ip.lower() nbr_rid = data_rid try: - nh_state = show_ospf_json[nbr_rid][0][ - 'state'].split('/')[0] - intf_state = show_ospf_json[nbr_rid][0][ - 'state'].split('/')[1] + nh_state = show_ospf_json[nbr_rid][0]["state"].split("/")[0] + intf_state = show_ospf_json[nbr_rid][0]["state"].split("/")[1] except KeyError: - errormsg = "[DUT: {}] OSPF peer {} missing".format(router, - nbr_rid) + errormsg = "[DUT: {}] OSPF peer {} missing".format(router, nbr_rid) return errormsg - nbr_state = nbr_data.setdefault("state",None) - nbr_role = nbr_data.setdefault("role",None) + nbr_state = nbr_data.setdefault("state", None) + nbr_role = nbr_data.setdefault("role", None) if nbr_state: if nbr_state == nh_state: - logger.info("[DUT: {}] OSPF Nbr is {}:{} State {}".format - (router, ospf_nbr, nbr_rid, nh_state)) + logger.info( + "[DUT: {}] OSPF Nbr is {}:{} State {}".format( + router, ospf_nbr, nbr_rid, nh_state + ) + ) result = True else: - errormsg = ("[DUT: {}] OSPF is not Converged, neighbor" - " state is {}".format(router, nh_state)) + errormsg = ( + "[DUT: {}] OSPF is not Converged, neighbor" + " state is {}".format(router, nh_state) + ) return errormsg if nbr_role: if nbr_role == intf_state: - logger.info("[DUT: {}] OSPF Nbr is {}: {} Role {}".format( - router, ospf_nbr, nbr_rid, nbr_role)) + logger.info( + "[DUT: {}] OSPF Nbr is {}: {} Role {}".format( + router, ospf_nbr, nbr_rid, nbr_role + ) + ) else: - errormsg = ("[DUT: {}] OSPF is not Converged with rid" - "{}, role is {}".format(router, nbr_rid, intf_state)) + errormsg = ( + "[DUT: {}] OSPF is not Converged with rid" + "{}, role is {}".format(router, nbr_rid, intf_state) + ) return errormsg continue else: for router, rnode in tgen.routers().items(): - if 'ospf' not in topo['routers'][router]: + if "ospf" not in topo["routers"][router]: continue if dut is not None and dut != router: continue logger.info("Verifying OSPF neighborship on router %s:", router) - show_ospf_json = run_frr_cmd(rnode, - "show ip ospf neighbor all json", isjson=True) + show_ospf_json = run_frr_cmd( + rnode, "show ip ospf neighbor all json", isjson=True + ) # Verifying output dictionary show_ospf_json is empty or not if not bool(show_ospf_json): errormsg = "OSPF is not running" return errormsg ospf_data_list = topo["routers"][router]["ospf"] - ospf_neighbors = ospf_data_list['neighbors'] + ospf_neighbors = ospf_data_list["neighbors"] total_peer = 0 total_peer = len(ospf_neighbors.keys()) no_of_ospf_nbr = 0 - ospf_nbr_list = ospf_data_list['neighbors'] + ospf_nbr_list = ospf_data_list["neighbors"] no_of_peer = 0 for ospf_nbr, nbr_data in ospf_nbr_list.items(): if nbr_data: - data_ip = topo['routers'][nbr_data["nbr"]]['links'] - data_rid = topo['routers'][nbr_data["nbr"]][ - 'ospf']['router_id'] + data_ip = topo["routers"][nbr_data["nbr"]]["links"] + data_rid = topo["routers"][nbr_data["nbr"]]["ospf"]["router_id"] else: - data_ip = topo['routers'][ospf_nbr]['links'] - data_rid = topo['routers'][ospf_nbr]['ospf']['router_id'] + data_ip = topo["routers"][ospf_nbr]["links"] + data_rid = topo["routers"][ospf_nbr]["ospf"]["router_id"] if ospf_nbr in data_ip: nbr_details = nbr_data[ospf_nbr] elif lan: - for switch in topo['switches']: - if 'ospf' in topo['switches'][switch]['links'][router]: - neighbor_ip = data_ip[switch]['ipv4'].split("/")[0] + for switch in topo["switches"]: + if "ospf" in topo["switches"][switch]["links"][router]: + neighbor_ip = data_ip[switch]["ipv4"].split("/")[0] else: continue else: - neighbor_ip = data_ip[router]['ipv4'].split("/")[0] + neighbor_ip = data_ip[router]["ipv4"].split("/")[0] nh_state = None neighbor_ip = neighbor_ip.lower() nbr_rid = data_rid try: - nh_state = show_ospf_json[nbr_rid][0][ - 'state'].split('/')[0] + nh_state = show_ospf_json[nbr_rid][0]["state"].split("/")[0] except KeyError: - errormsg = "[DUT: {}] OSPF peer {} missing,from "\ - "{} ".format(router, - nbr_rid, ospf_nbr) + errormsg = "[DUT: {}] OSPF peer {} missing,from " "{} ".format( + router, nbr_rid, ospf_nbr + ) return errormsg - if nh_state == 'Full': + if nh_state == "Full": no_of_peer += 1 if no_of_peer == total_peer: logger.info("[DUT: {}] OSPF is Converged".format(router)) result = True else: - errormsg = ("[DUT: {}] OSPF is not Converged".format(router)) + errormsg = "[DUT: {}] OSPF is not Converged".format(router) return errormsg logger.debug("Exiting API: verify_ospf_neighbor()") return result + @retry(attempts=21, wait=2, return_is_str=True) -def verify_ospf_rib(tgen, dut, input_dict, next_hop=None, - tag=None, metric=None, fib=None): +def verify_ospf_rib( + tgen, dut, input_dict, next_hop=None, tag=None, metric=None, fib=None +): """ This API is to verify ospf routes by running show ip ospf route command. @@ -706,25 +715,28 @@ def verify_ospf_rib(tgen, dut, input_dict, next_hop=None, found_routes = [] missing_routes = [] - if "static_routes" in input_dict[routerInput] or \ - "prefix" in input_dict[routerInput]: + if ( + "static_routes" in input_dict[routerInput] + or "prefix" in input_dict[routerInput] + ): if "prefix" in input_dict[routerInput]: static_routes = input_dict[routerInput]["prefix"] else: static_routes = input_dict[routerInput]["static_routes"] - for static_route in static_routes: cmd = "{}".format(command) cmd = "{} json".format(cmd) - ospf_rib_json = run_frr_cmd(rnode, cmd, isjson=True) + ospf_rib_json = run_frr_cmd(rnode, cmd, isjson=True) # Verifying output dictionary ospf_rib_json is not empty if bool(ospf_rib_json) is False: - errormsg = "[DUT: {}] No routes found in OSPF route " \ + errormsg = ( + "[DUT: {}] No routes found in OSPF route " "table".format(router) + ) return errormsg network = static_route["network"] @@ -732,7 +744,6 @@ def verify_ospf_rib(tgen, dut, input_dict, next_hop=None, _tag = static_route.setdefault("tag", None) _rtype = static_route.setdefault("routeType", None) - # Generating IPs for verification ip_list = generate_ips(network, no_of_ip) st_found = False @@ -742,7 +753,7 @@ def verify_ospf_rib(tgen, dut, input_dict, next_hop=None, st_rt = str(ipaddr.IPNetwork(frr_unicode(st_rt))) _addr_type = validate_ip_address(st_rt) - if _addr_type != 'ipv4': + if _addr_type != "ipv4": continue if st_rt in ospf_rib_json: @@ -754,17 +765,26 @@ def verify_ospf_rib(tgen, dut, input_dict, next_hop=None, next_hop = [next_hop] for mnh in range(0, len(ospf_rib_json[st_rt])): - if 'fib' in ospf_rib_json[st_rt][ - mnh]["nexthops"][0]: - found_hops.append([rib_r[ - "ip"] for rib_r in ospf_rib_json[ - st_rt][mnh]["nexthops"]]) + if ( + "fib" + in ospf_rib_json[st_rt][mnh]["nexthops"][0] + ): + found_hops.append( + [ + rib_r["ip"] + for rib_r in ospf_rib_json[st_rt][mnh][ + "nexthops" + ] + ] + ) if found_hops[0]: - missing_list_of_nexthops = \ - set(found_hops[0]).difference(next_hop) - additional_nexthops_in_required_nhs = \ - set(next_hop).difference(found_hops[0]) + missing_list_of_nexthops = set( + found_hops[0] + ).difference(next_hop) + additional_nexthops_in_required_nhs = set( + next_hop + ).difference(found_hops[0]) if additional_nexthops_in_required_nhs: logger.info( @@ -772,13 +792,18 @@ def verify_ospf_rib(tgen, dut, input_dict, next_hop=None, "%s is not active for route %s in " "RIB of router %s\n", additional_nexthops_in_required_nhs, - st_rt, dut) + st_rt, + dut, + ) errormsg = ( "Nexthop {} is not active" " for route {} in RIB of router" " {}\n".format( - additional_nexthops_in_required_nhs, - st_rt, dut)) + additional_nexthops_in_required_nhs, + st_rt, + dut, + ) + ) return errormsg else: nh_found = True @@ -786,99 +811,111 @@ def verify_ospf_rib(tgen, dut, input_dict, next_hop=None, elif next_hop and fib is None: if type(next_hop) is not list: next_hop = [next_hop] - found_hops = [rib_r["ip"] for rib_r in - ospf_rib_json[st_rt][ - "nexthops"]] + found_hops = [ + rib_r["ip"] + for rib_r in ospf_rib_json[st_rt]["nexthops"] + ] if found_hops: - missing_list_of_nexthops = \ - set(found_hops).difference(next_hop) - additional_nexthops_in_required_nhs = \ - set(next_hop).difference(found_hops) + missing_list_of_nexthops = set( + found_hops + ).difference(next_hop) + additional_nexthops_in_required_nhs = set( + next_hop + ).difference(found_hops) if additional_nexthops_in_required_nhs: logger.info( - "Missing nexthop %s for route"\ - " %s in RIB of router %s\n", \ - additional_nexthops_in_required_nhs, \ - st_rt, dut) - errormsg=("Nexthop {} is Missing for "\ - "route {} in RIB of router {}\n".format( + "Missing nexthop %s for route" + " %s in RIB of router %s\n", additional_nexthops_in_required_nhs, - st_rt, dut)) + st_rt, + dut, + ) + errormsg = ( + "Nexthop {} is Missing for " + "route {} in RIB of router {}\n".format( + additional_nexthops_in_required_nhs, + st_rt, + dut, + ) + ) return errormsg else: nh_found = True if _rtype: - if "routeType" not in ospf_rib_json[ - st_rt]: - errormsg = ("[DUT: {}]: routeType missing" - "for route {} in OSPF RIB \n".\ - format(dut, st_rt)) + if "routeType" not in ospf_rib_json[st_rt]: + errormsg = ( + "[DUT: {}]: routeType missing" + "for route {} in OSPF RIB \n".format(dut, st_rt) + ) return errormsg - elif _rtype != ospf_rib_json[st_rt][ - "routeType"]: - errormsg = ("[DUT: {}]: routeType mismatch" - "for route {} in OSPF RIB \n".\ - format(dut, st_rt)) + elif _rtype != ospf_rib_json[st_rt]["routeType"]: + errormsg = ( + "[DUT: {}]: routeType mismatch" + "for route {} in OSPF RIB \n".format(dut, st_rt) + ) return errormsg else: - logger.info("DUT: {}]: Found routeType {}" - "for route {}".\ - format(dut, _rtype, st_rt)) + logger.info( + "DUT: {}]: Found routeType {}" + "for route {}".format(dut, _rtype, st_rt) + ) if tag: - if "tag" not in ospf_rib_json[ - st_rt]: - errormsg = ("[DUT: {}]: tag is not" - " present for" - " route {} in RIB \n".\ - format(dut, st_rt - )) + if "tag" not in ospf_rib_json[st_rt]: + errormsg = ( + "[DUT: {}]: tag is not" + " present for" + " route {} in RIB \n".format(dut, st_rt) + ) return errormsg - if _tag != ospf_rib_json[ - st_rt]["tag"]: - errormsg = ("[DUT: {}]: tag value {}" - " is not matched for" - " route {} in RIB \n".\ - format(dut, _tag, st_rt, - )) + if _tag != ospf_rib_json[st_rt]["tag"]: + errormsg = ( + "[DUT: {}]: tag value {}" + " is not matched for" + " route {} in RIB \n".format(dut, _tag, st_rt,) + ) return errormsg if metric is not None: - if "type2cost" not in ospf_rib_json[ - st_rt]: - errormsg = ("[DUT: {}]: metric is" - " not present for" - " route {} in RIB \n".\ - format(dut, st_rt)) + if "type2cost" not in ospf_rib_json[st_rt]: + errormsg = ( + "[DUT: {}]: metric is" + " not present for" + " route {} in RIB \n".format(dut, st_rt) + ) return errormsg - if metric != ospf_rib_json[ - st_rt]["type2cost"]: - errormsg = ("[DUT: {}]: metric value " - "{} is not matched for " - "route {} in RIB \n".\ - format(dut, metric, st_rt, - )) + if metric != ospf_rib_json[st_rt]["type2cost"]: + errormsg = ( + "[DUT: {}]: metric value " + "{} is not matched for " + "route {} in RIB \n".format(dut, metric, st_rt,) + ) return errormsg else: missing_routes.append(st_rt) if nh_found: - logger.info("[DUT: {}]: Found next_hop {} for all OSPF" - " routes in RIB".format(router, next_hop)) + logger.info( + "[DUT: {}]: Found next_hop {} for all OSPF" + " routes in RIB".format(router, next_hop) + ) if len(missing_routes) > 0: - errormsg = ("[DUT: {}]: Missing route in RIB, " - "routes: {}".\ - format(dut, missing_routes)) + errormsg = "[DUT: {}]: Missing route in RIB, " "routes: {}".format( + dut, missing_routes + ) return errormsg if found_routes: - logger.info("[DUT: %s]: Verified routes in RIB, found" - " routes are: %s\n", dut, found_routes) + logger.info( + "[DUT: %s]: Verified routes in RIB, found" " routes are: %s\n", + dut, + found_routes, + ) result = True logger.info("Exiting lib API: verify_ospf_rib()") @@ -886,7 +923,7 @@ def verify_ospf_rib(tgen, dut, input_dict, next_hop=None, @retry(attempts=10, wait=2, return_is_str=True) -def verify_ospf_interface(tgen, topo, dut=None,lan=False, input_dict=None): +def verify_ospf_interface(tgen, topo, dut=None, lan=False, input_dict=None): """ This API is to verify ospf routes by running show ip ospf interface command. @@ -928,15 +965,14 @@ def verify_ospf_interface(tgen, topo, dut=None,lan=False, input_dict=None): logger.debug("Entering lib API: verify_ospf_interface()") result = False for router, rnode in tgen.routers().items(): - if 'ospf' not in topo['routers'][router]: + if "ospf" not in topo["routers"][router]: continue if dut is not None and dut != router: continue logger.info("Verifying OSPF interface on router %s:", router) - show_ospf_json = run_frr_cmd(rnode, "show ip ospf interface json", - isjson=True) + show_ospf_json = run_frr_cmd(rnode, "show ip ospf interface json", isjson=True) # Verifying output dictionary show_ospf_json is empty or not if not bool(show_ospf_json): @@ -946,19 +982,29 @@ def verify_ospf_interface(tgen, topo, dut=None,lan=False, input_dict=None): # To find neighbor ip type ospf_intf_data = input_dict[router]["links"] for ospf_intf, intf_data in ospf_intf_data.items(): - intf = topo['routers'][router]['links'][ospf_intf]['interface'] - if intf in show_ospf_json['interfaces']: - for intf_attribute in intf_data['ospf']: - if intf_data['ospf'][intf_attribute] == show_ospf_json[ - 'interfaces'][intf][intf_attribute]: - logger.info("[DUT: %s] OSPF interface %s: %s is %s", - router, intf, intf_attribute, intf_data['ospf'][ - intf_attribute]) + intf = topo["routers"][router]["links"][ospf_intf]["interface"] + if intf in show_ospf_json["interfaces"]: + for intf_attribute in intf_data["ospf"]: + if ( + intf_data["ospf"][intf_attribute] + == show_ospf_json["interfaces"][intf][intf_attribute] + ): + logger.info( + "[DUT: %s] OSPF interface %s: %s is %s", + router, + intf, + intf_attribute, + intf_data["ospf"][intf_attribute], + ) else: - errormsg= "[DUT: {}] OSPF interface {}: {} is {}, \ - Expected is {}".format(router, intf, intf_attribute, - intf_data['ospf'][intf_attribute], show_ospf_json[ - 'interfaces'][intf][intf_attribute]) + errormsg = "[DUT: {}] OSPF interface {}: {} is {}, \ + Expected is {}".format( + router, + intf, + intf_attribute, + intf_data["ospf"][intf_attribute], + show_ospf_json["interfaces"][intf][intf_attribute], + ) return errormsg result = True logger.debug("Exiting API: verify_ospf_interface()") @@ -1016,16 +1062,14 @@ def verify_ospf_database(tgen, topo, dut, input_dict): router = dut logger.debug("Entering lib API: verify_ospf_database()") - if 'ospf' not in topo['routers'][dut]: - errormsg = "[DUT: {}] OSPF is not configured on the router.".format( - dut) + if "ospf" not in topo["routers"][dut]: + errormsg = "[DUT: {}] OSPF is not configured on the router.".format(dut) return errormsg rnode = tgen.routers()[dut] logger.info("Verifying OSPF interface on router %s:", dut) - show_ospf_json = run_frr_cmd(rnode, "show ip ospf database json", - isjson=True) + show_ospf_json = run_frr_cmd(rnode, "show ip ospf database json", isjson=True) # Verifying output dictionary show_ospf_json is empty or not if not bool(show_ospf_json): errormsg = "OSPF is not running" @@ -1033,82 +1077,103 @@ def verify_ospf_database(tgen, topo, dut, input_dict): # for inter and inter lsa's ospf_db_data = input_dict.setdefault("areas", None) - ospf_external_lsa = input_dict.setdefault( - 'AS External Link States', None) + ospf_external_lsa = input_dict.setdefault("AS External Link States", None) if ospf_db_data: - for ospf_area, area_lsa in ospf_db_data.items(): - if ospf_area in show_ospf_json['areas']: - if 'Router Link States' in area_lsa: - for lsa in area_lsa['Router Link States']: - if lsa in show_ospf_json['areas'][ospf_area][ - 'Router Link States']: - logger.info( - "[DUT: %s] OSPF LSDB area %s:Router " - "LSA %s", router, ospf_area, lsa) - result = True - else: - errormsg = \ - "[DUT: {}] OSPF LSDB area {}: expected" \ + for ospf_area, area_lsa in ospf_db_data.items(): + if ospf_area in show_ospf_json["areas"]: + if "Router Link States" in area_lsa: + for lsa in area_lsa["Router Link States"]: + if ( + lsa + in show_ospf_json["areas"][ospf_area]["Router Link States"] + ): + logger.info( + "[DUT: %s] OSPF LSDB area %s:Router " "LSA %s", + router, + ospf_area, + lsa, + ) + result = True + else: + errormsg = ( + "[DUT: {}] OSPF LSDB area {}: expected" " Router LSA is {}".format(router, ospf_area, lsa) - return errormsg - if 'Net Link States' in area_lsa: - for lsa in area_lsa['Net Link States']: - if lsa in show_ospf_json['areas'][ospf_area][ - 'Net Link States']: - logger.info( - "[DUT: %s] OSPF LSDB area %s:Network " - "LSA %s", router, ospf_area, lsa) - result = True - else: - errormsg = \ - "[DUT: {}] OSPF LSDB area {}: expected" \ + ) + return errormsg + if "Net Link States" in area_lsa: + for lsa in area_lsa["Net Link States"]: + if lsa in show_ospf_json["areas"][ospf_area]["Net Link States"]: + logger.info( + "[DUT: %s] OSPF LSDB area %s:Network " "LSA %s", + router, + ospf_area, + lsa, + ) + result = True + else: + errormsg = ( + "[DUT: {}] OSPF LSDB area {}: expected" " Network LSA is {}".format(router, ospf_area, lsa) - return errormsg - if 'Summary Link States' in area_lsa: - for lsa in area_lsa['Summary Link States']: - if lsa in show_ospf_json['areas'][ospf_area][ - 'Summary Link States']: - logger.info( - "[DUT: %s] OSPF LSDB area %s:Summary " - "LSA %s", router, ospf_area, lsa) - result = True - else: - errormsg = \ - "[DUT: {}] OSPF LSDB area {}: expected" \ + ) + return errormsg + if "Summary Link States" in area_lsa: + for lsa in area_lsa["Summary Link States"]: + if ( + lsa + in show_ospf_json["areas"][ospf_area]["Summary Link States"] + ): + logger.info( + "[DUT: %s] OSPF LSDB area %s:Summary " "LSA %s", + router, + ospf_area, + lsa, + ) + result = True + else: + errormsg = ( + "[DUT: {}] OSPF LSDB area {}: expected" " Summary LSA is {}".format(router, ospf_area, lsa) - return errormsg - if 'ASBR-Summary Link States' in area_lsa: - for lsa in area_lsa['ASBR-Summary Link States']: - if lsa in show_ospf_json['areas'][ospf_area][ - 'ASBR-Summary Link States']: - logger.info( - "[DUT: %s] OSPF LSDB area %s:ASBR Summary " - "LSA %s", router, ospf_area, lsa) - result = True - else: - errormsg = \ - "[DUT: {}] OSPF LSDB area {}: expected" \ - " ASBR Summary LSA is {}".format( - router, ospf_area, lsa) - return errormsg + ) + return errormsg + if "ASBR-Summary Link States" in area_lsa: + for lsa in area_lsa["ASBR-Summary Link States"]: + if ( + lsa + in show_ospf_json["areas"][ospf_area][ + "ASBR-Summary Link States" + ] + ): + logger.info( + "[DUT: %s] OSPF LSDB area %s:ASBR Summary " "LSA %s", + router, + ospf_area, + lsa, + ) + result = True + else: + errormsg = ( + "[DUT: {}] OSPF LSDB area {}: expected" + " ASBR Summary LSA is {}".format(router, ospf_area, lsa) + ) + return errormsg if ospf_external_lsa: - for ospf_ext_lsa, ext_lsa_data in ospf_external_lsa.items(): - if ospf_ext_lsa in show_ospf_json['AS External Link States']: - logger.info( - "[DUT: %s] OSPF LSDB:External LSA %s", - router, ospf_ext_lsa) - result = True - else: - errormsg = \ - "[DUT: {}] OSPF LSDB : expected" \ - " External LSA is {}".format(router, ospf_ext_lsa) - return errormsg + for ospf_ext_lsa, ext_lsa_data in ospf_external_lsa.items(): + if ospf_ext_lsa in show_ospf_json["AS External Link States"]: + logger.info( + "[DUT: %s] OSPF LSDB:External LSA %s", router, ospf_ext_lsa + ) + result = True + else: + errormsg = ( + "[DUT: {}] OSPF LSDB : expected" + " External LSA is {}".format(router, ospf_ext_lsa) + ) + return errormsg logger.debug("Exiting API: verify_ospf_database()") return result - @retry(attempts=10, wait=2, return_is_str=True) def verify_ospf_summary(tgen, topo, dut, input_dict): """ @@ -1146,14 +1211,12 @@ def verify_ospf_summary(tgen, topo, dut, input_dict): logger.info("Verifying OSPF summary on router %s:", router) - if 'ospf' not in topo['routers'][dut]: - errormsg = "[DUT: {}] OSPF is not configured on the router.".format( - router) + if "ospf" not in topo["routers"][dut]: + errormsg = "[DUT: {}] OSPF is not configured on the router.".format(router) return errormsg rnode = tgen.routers()[dut] - show_ospf_json = run_frr_cmd(rnode, "show ip ospf summary detail json", - isjson=True) + show_ospf_json = run_frr_cmd(rnode, "show ip ospf summary detail json", isjson=True) # Verifying output dictionary show_ospf_json is empty or not if not bool(show_ospf_json): @@ -1165,17 +1228,25 @@ def verify_ospf_summary(tgen, topo, dut, input_dict): for ospf_summ, summ_data in ospf_summary_data.items(): if ospf_summ not in show_ospf_json: continue - summary = ospf_summary_data[ospf_summ]['Summary address'] + summary = ospf_summary_data[ospf_summ]["Summary address"] if summary in show_ospf_json: for summ in summ_data: if summ_data[summ] == show_ospf_json[summary][summ]: - logger.info("[DUT: %s] OSPF summary %s:%s is %s", - router, summary, summ, summ_data[summ]) + logger.info( + "[DUT: %s] OSPF summary %s:%s is %s", + router, + summary, + summ, + summ_data[summ], + ) result = True else: - errormsg = ("[DUT: {}] OSPF summary {}:{} is %s, " - "Expected is {}".format(router,summary, summ, - show_ospf_json[summary][summ])) + errormsg = ( + "[DUT: {}] OSPF summary {}:{} is %s, " + "Expected is {}".format( + router, summary, summ, show_ospf_json[summary][summ] + ) + ) return errormsg logger.debug("Exiting API: verify_ospf_summary()") diff --git a/tests/topotests/lib/test/test_json.py b/tests/topotests/lib/test/test_json.py index 7a061a9bc6..b85e193d3b 100755 --- a/tests/topotests/lib/test/test_json.py +++ b/tests/topotests/lib/test/test_json.py @@ -296,7 +296,7 @@ def test_json_list_ordered(): ] dsub1 = [ - '__ordered__', + "__ordered__", "some string", {"id": 1, "value": "abc"}, 123, @@ -312,28 +312,28 @@ def test_json_list_exact_matching(): {"id": 1, "value": "abc"}, "some string", 123, - [1,2,3], + [1, 2, 3], ] dsub1 = [ "some string", {"id": 1, "value": "abc"}, 123, - [1,2,3], + [1, 2, 3], ] dsub2 = [ {"id": 1}, "some string", 123, - [1,2,3], + [1, 2, 3], ] dsub3 = [ {"id": 1, "value": "abc"}, "some string", 123, - [1,3,2], + [1, 3, 2], ] assert json_cmp(dcomplete, dsub1, exact=True) is not None @@ -344,30 +344,30 @@ def test_json_object_exact_matching(): "Test JSON object on exact matching using the 'exact' parameter." dcomplete = { - 'a': {"id": 1, "value": "abc"}, - 'b': "some string", - 'c': 123, - 'd': [1,2,3], + "a": {"id": 1, "value": "abc"}, + "b": "some string", + "c": 123, + "d": [1, 2, 3], } dsub1 = { - 'a': {"id": 1, "value": "abc"}, - 'c': 123, - 'd': [1,2,3], + "a": {"id": 1, "value": "abc"}, + "c": 123, + "d": [1, 2, 3], } dsub2 = { - 'a': {"id": 1}, - 'b': "some string", - 'c': 123, - 'd': [1,2,3], + "a": {"id": 1}, + "b": "some string", + "c": 123, + "d": [1, 2, 3], } dsub3 = { - 'a': {"id": 1, "value": "abc"}, - 'b': "some string", - 'c': 123, - 'd': [1,3], + "a": {"id": 1, "value": "abc"}, + "b": "some string", + "c": 123, + "d": [1, 3], } assert json_cmp(dcomplete, dsub1, exact=True) is not None @@ -382,35 +382,35 @@ def test_json_list_asterisk_matching(): {"id": 1, "value": "abc"}, "some string", 123, - [1,2,3], + [1, 2, 3], ] dsub1 = [ - '*', + "*", "some string", 123, - [1,2,3], + [1, 2, 3], ] dsub2 = [ - {"id": '*', "value": "abc"}, + {"id": "*", "value": "abc"}, "some string", 123, - [1,2,3], + [1, 2, 3], ] dsub3 = [ {"id": 1, "value": "abc"}, "some string", 123, - [1,'*',3], + [1, "*", 3], ] dsub4 = [ - '*', + "*", "some string", - '*', - [1,2,3], + "*", + [1, 2, 3], ] assert json_cmp(dcomplete, dsub1) is None @@ -423,38 +423,38 @@ def test_json_object_asterisk_matching(): "Test JSON object value elements on matching '*' as a placeholder for arbitrary data." dcomplete = { - 'a': {"id": 1, "value": "abc"}, - 'b': "some string", - 'c': 123, - 'd': [1,2,3], + "a": {"id": 1, "value": "abc"}, + "b": "some string", + "c": 123, + "d": [1, 2, 3], } dsub1 = { - 'a': '*', - 'b': "some string", - 'c': 123, - 'd': [1,2,3], + "a": "*", + "b": "some string", + "c": 123, + "d": [1, 2, 3], } dsub2 = { - 'a': {"id": 1, "value": "abc"}, - 'b': "some string", - 'c': 123, - 'd': [1,'*',3], + "a": {"id": 1, "value": "abc"}, + "b": "some string", + "c": 123, + "d": [1, "*", 3], } dsub3 = { - 'a': {"id": '*', "value": "abc"}, - 'b': "some string", - 'c': 123, - 'd': [1,2,3], + "a": {"id": "*", "value": "abc"}, + "b": "some string", + "c": 123, + "d": [1, 2, 3], } dsub4 = { - 'a': '*', - 'b': "some string", - 'c': '*', - 'd': [1,2,3], + "a": "*", + "b": "some string", + "c": "*", + "d": [1, 2, 3], } assert json_cmp(dcomplete, dsub1) is None @@ -465,37 +465,12 @@ def test_json_object_asterisk_matching(): def test_json_list_nested_with_objects(): - dcomplete = [ - { - "key": 1, - "list": [ - 123 - ] - }, - { - "key": 2, - "list": [ - 123 - ] - } - ] + dcomplete = [{"key": 1, "list": [123]}, {"key": 2, "list": [123]}] - dsub1 = [ - { - "key": 2, - "list": [ - 123 - ] - }, - { - "key": 1, - "list": [ - 123 - ] - } - ] + dsub1 = [{"key": 2, "list": [123]}, {"key": 1, "list": [123]}] assert json_cmp(dcomplete, dsub1) is None + if __name__ == "__main__": sys.exit(pytest.main()) diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py index ffdcb683e7..3e92bd7565 100644 --- a/tests/topotests/lib/topogen.py +++ b/tests/topotests/lib/topogen.py @@ -703,11 +703,9 @@ class TopoRouter(TopoGear): Stop router, private internal version * Kill daemons """ - self.logger.debug("stopping: wait {}, assert {}".format( - wait, assertOnError)) + self.logger.debug("stopping: wait {}, assert {}".format(wait, assertOnError)) return self.tgen.net[self.name].stopRouter(wait, assertOnError) - def stop(self): """ Stop router cleanly: @@ -724,7 +722,7 @@ class TopoRouter(TopoGear): * Start daemons (e.g. FRR) * Configure daemon logging files """ - self.logger.debug('starting') + self.logger.debug("starting") nrouter = self.tgen.net[self.name] result = nrouter.startRouterDaemons(daemons) @@ -734,10 +732,12 @@ class TopoRouter(TopoGear): for d in daemons: if enabled == 0: continue - self.vtysh_cmd('configure terminal\nlog commands\nlog file {}.log'.\ - format(daemon), daemon=daemon) + self.vtysh_cmd( + "configure terminal\nlog commands\nlog file {}.log".format(daemon), + daemon=daemon, + ) - if result != '': + if result != "": self.tgen.set_error(result) return result @@ -747,7 +747,7 @@ class TopoRouter(TopoGear): Kill specific daemon(user defined daemon only) forcefully using SIGKILL """ - self.logger.debug('Killing daemons using SIGKILL..') + self.logger.debug("Killing daemons using SIGKILL..") return self.tgen.net[self.name].killRouterDaemons(daemons, wait, assertOnError) def vtysh_cmd(self, command, isjson=False, daemon=None): @@ -1070,7 +1070,7 @@ def diagnose_env_linux(): "isisd", "pimd", "ldpd", - "pbrd" + "pbrd", ]: path = os.path.join(frrdir, fname) if not os.path.isfile(path): diff --git a/tests/topotests/lib/topojson.py b/tests/topotests/lib/topojson.py index 6535918e36..f2fafa5e2a 100644 --- a/tests/topotests/lib/topojson.py +++ b/tests/topotests/lib/topojson.py @@ -45,6 +45,7 @@ from lib.common_config import ( from lib.bgp import create_router_bgp from lib.ospf import create_router_ospf + ROUTER_LIST = [] @@ -214,13 +215,14 @@ def build_topo_from_json(tgen, topo): while listSwitches != []: curSwitch = listSwitches.pop(0) # Physical Interfaces - if "links" in topo['switches'][curSwitch]: + if "links" in topo["switches"][curSwitch]: for destRouterLink, data in sorted( - topo['switches'][curSwitch]['links'].items()): + topo["switches"][curSwitch]["links"].items() + ): # Loopback interfaces if "dst_node" in data: - destRouter = data['dst_node'] + destRouter = data["dst_node"] elif "-" in destRouterLink: # Spliting and storing destRouterLink data in tempList @@ -232,39 +234,55 @@ def build_topo_from_json(tgen, topo): if destRouter in listAllRouters: - topo['routers'][destRouter]['links'][curSwitch] = \ - deepcopy(topo['switches'][curSwitch]['links'][destRouterLink]) + topo["routers"][destRouter]["links"][curSwitch] = deepcopy( + topo["switches"][curSwitch]["links"][destRouterLink] + ) # Assigning name to interfaces - topo['routers'][destRouter]['links'][curSwitch]['interface'] = \ - '{}-{}-eth{}'.format(destRouter, curSwitch, topo['routers'] \ - [destRouter]['nextIfname']) + topo["routers"][destRouter]["links"][curSwitch][ + "interface" + ] = "{}-{}-eth{}".format( + destRouter, curSwitch, topo["routers"][destRouter]["nextIfname"] + ) - topo['switches'][curSwitch]['links'][destRouter]['interface'] = \ - '{}-{}-eth{}'.format(curSwitch, destRouter, topo['routers'] \ - [destRouter]['nextIfname']) + topo["switches"][curSwitch]["links"][destRouter][ + "interface" + ] = "{}-{}-eth{}".format( + curSwitch, destRouter, topo["routers"][destRouter]["nextIfname"] + ) - topo['routers'][destRouter]['nextIfname'] += 1 + topo["routers"][destRouter]["nextIfname"] += 1 # Add links - dictSwitches[curSwitch].add_link(tgen.gears[destRouter], \ - topo['switches'][curSwitch]['links'][destRouter]['interface'], - topo['routers'][destRouter]['links'][curSwitch]['interface'], - ) + dictSwitches[curSwitch].add_link( + tgen.gears[destRouter], + topo["switches"][curSwitch]["links"][destRouter]["interface"], + topo["routers"][destRouter]["links"][curSwitch]["interface"], + ) # IPv4 - if 'ipv4' in topo['routers'][destRouter]['links'][curSwitch]: - if topo['routers'][destRouter]['links'][curSwitch]['ipv4'] == 'auto': - topo['routers'][destRouter]['links'][curSwitch]['ipv4'] = \ - '{}/{}'.format(ipv4Next, topo['link_ip_start'][ \ - 'v4mask']) + if "ipv4" in topo["routers"][destRouter]["links"][curSwitch]: + if ( + topo["routers"][destRouter]["links"][curSwitch]["ipv4"] + == "auto" + ): + topo["routers"][destRouter]["links"][curSwitch][ + "ipv4" + ] = "{}/{}".format( + ipv4Next, topo["link_ip_start"]["v4mask"] + ) ipv4Next += 1 # IPv6 - if 'ipv6' in topo['routers'][destRouter]['links'][curSwitch]: - if topo['routers'][destRouter]['links'][curSwitch]['ipv6'] == 'auto': - topo['routers'][destRouter]['links'][curSwitch]['ipv6'] = \ - '{}/{}'.format(ipv6Next, topo['link_ip_start'][ \ - 'v6mask']) + if "ipv6" in topo["routers"][destRouter]["links"][curSwitch]: + if ( + topo["routers"][destRouter]["links"][curSwitch]["ipv6"] + == "auto" + ): + topo["routers"][destRouter]["links"][curSwitch][ + "ipv6" + ] = "{}/{}".format( + ipv6Next, topo["link_ip_start"]["v6mask"] + ) ipv6Next = ipaddr.IPv6Address(int(ipv6Next) + ipv6Step) logger.debug( @@ -294,7 +312,7 @@ def build_config_from_json(tgen, topo, save_bkup=True): ("bgp_community_list", create_bgp_community_lists), ("route_maps", create_route_maps), ("bgp", create_router_bgp), - ("ospf", create_router_ospf) + ("ospf", create_router_ospf), ] ) diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py index a187971e41..4b18862101 100644 --- a/tests/topotests/lib/topotest.py +++ b/tests/topotests/lib/topotest.py @@ -51,8 +51,9 @@ from mininet.log import setLogLevel, info from mininet.cli import CLI from mininet.link import Intf + def gdb_core(obj, daemon, corefiles): - gdbcmds = ''' + gdbcmds = """ info threads bt full disassemble @@ -66,21 +67,21 @@ def gdb_core(obj, daemon, corefiles): disassemble up disassemble - ''' - gdbcmds = [['-ex', i.strip()] for i in gdbcmds.strip().split('\n')] + """ + gdbcmds = [["-ex", i.strip()] for i in gdbcmds.strip().split("\n")] gdbcmds = [item for sl in gdbcmds for item in sl] daemon_path = os.path.join(obj.daemondir, daemon) backtrace = subprocess.check_output( - ['gdb', daemon_path, corefiles[0], '--batch'] + gdbcmds + ["gdb", daemon_path, corefiles[0], "--batch"] + gdbcmds ) sys.stderr.write( - "\n%s: %s crashed. Core file found - Backtrace follows:\n" - % (obj.name, daemon) + "\n%s: %s crashed. Core file found - Backtrace follows:\n" % (obj.name, daemon) ) sys.stderr.write("%s" % backtrace) return backtrace + class json_cmp_result(object): "json_cmp result class for better assertion messages" @@ -739,7 +740,8 @@ def ip4_vrf_route(node): } """ output = normalize_text( - node.run("ip route show vrf {0}-cust1".format(node.name))).splitlines() + node.run("ip route show vrf {0}-cust1".format(node.name)) + ).splitlines() result = {} for line in output: @@ -821,7 +823,8 @@ def ip6_vrf_route(node): } """ output = normalize_text( - node.run("ip -6 route show vrf {0}-cust1".format(node.name))).splitlines() + node.run("ip -6 route show vrf {0}-cust1".format(node.name)) + ).splitlines() result = {} for line in output: columns = line.split(" ") @@ -992,7 +995,7 @@ class Router(Node): # Backward compatibility: # Load configuration defaults like topogen. self.config_defaults = configparser.ConfigParser( - defaults = { + defaults={ "verbosity": "info", "frrdir": "/usr/lib/frr", "routertype": "frr", @@ -1095,7 +1098,7 @@ class Router(Node): if re.search(r"No such file or directory", rundaemons): return 0 if rundaemons is not None: - bet = rundaemons.split('\n') + bet = rundaemons.split("\n") for d in bet[:-1]: daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip() if daemonpid.isdigit() and pid_exists(int(daemonpid)): @@ -1110,24 +1113,28 @@ class Router(Node): if re.search(r"No such file or directory", rundaemons): return errors if rundaemons is not None: - dmns = rundaemons.split('\n') + dmns = rundaemons.split("\n") # Exclude empty string at end of list for d in dmns[:-1]: daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip() if daemonpid.isdigit() and pid_exists(int(daemonpid)): daemonname = os.path.basename(d.rstrip().rsplit(".", 1)[0]) - logger.info( - "{}: stopping {}".format( - self.name, daemonname - ) - ) + logger.info("{}: stopping {}".format(self.name, daemonname)) try: os.kill(int(daemonpid), signal.SIGTERM) except OSError as err: if err.errno == errno.ESRCH: - logger.error("{}: {} left a dead pidfile (pid={})".format(self.name, daemonname, daemonpid)) + logger.error( + "{}: {} left a dead pidfile (pid={})".format( + self.name, daemonname, daemonpid + ) + ) else: - logger.info("{}: {} could not kill pid {}: {}".format(self.name, daemonname, daemonpid, str(err))) + logger.info( + "{}: {} could not kill pid {}: {}".format( + self.name, daemonname, daemonpid, str(err) + ) + ) if not wait: return errors @@ -1135,18 +1142,28 @@ class Router(Node): running = self.listDaemons() if running: - sleep(0.1, "{}: waiting for daemons stopping: {}".format(self.name, ', '.join(running))) + sleep( + 0.1, + "{}: waiting for daemons stopping: {}".format( + self.name, ", ".join(running) + ), + ) running = self.listDaemons() counter = 20 while counter > 0 and running: - sleep(0.5, "{}: waiting for daemons stopping: {}".format(self.name, ', '.join(running))) + sleep( + 0.5, + "{}: waiting for daemons stopping: {}".format( + self.name, ", ".join(running) + ), + ) running = self.listDaemons() counter -= 1 if running: # 2nd round of kill if daemons didn't exit - dmns = rundaemons.split('\n') + dmns = rundaemons.split("\n") # Exclude empty string at end of list for d in dmns[:-1]: daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip() @@ -1295,11 +1312,12 @@ class Router(Node): def startRouterDaemons(self, daemons=None): "Starts all FRR daemons for this router." - bundle_data = '' + bundle_data = "" - if os.path.exists('/etc/frr/support_bundle_commands.conf'): + if os.path.exists("/etc/frr/support_bundle_commands.conf"): bundle_data = subprocess.check_output( - ["cat /etc/frr/support_bundle_commands.conf"], shell=True) + ["cat /etc/frr/support_bundle_commands.conf"], shell=True + ) self.cmd( "echo '{}' > /etc/frr/support_bundle_commands.conf".format(bundle_data) ) @@ -1400,7 +1418,7 @@ class Router(Node): for daemon in daemons: if rundaemons is not None and daemon in rundaemons: numRunning = 0 - dmns = rundaemons.split('\n') + dmns = rundaemons.split("\n") # Exclude empty string at end of list for d in dmns[:-1]: if re.search(r"%s" % daemon, d): @@ -1738,8 +1756,9 @@ class LegacySwitch(OVSSwitch): OVSSwitch.__init__(self, name, failMode="standalone", **params) self.switchIP = None + def frr_unicode(s): - '''Convert string to unicode, depending on python version''' + """Convert string to unicode, depending on python version""" if sys.version_info[0] > 2: return s else: diff --git a/tests/topotests/ospf-sr-topo1/test_ospf_sr_topo1.py b/tests/topotests/ospf-sr-topo1/test_ospf_sr_topo1.py index 86fc90e665..53322f432f 100644 --- a/tests/topotests/ospf-sr-topo1/test_ospf_sr_topo1.py +++ b/tests/topotests/ospf-sr-topo1/test_ospf_sr_topo1.py @@ -65,22 +65,22 @@ class OspfSrTopo(Topo): tgen.add_router("r{}".format(routern)) # Interconect router 1 and 2 with 2 links - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) # Interconect router 3 and 2 - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['r3']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r3"]) + switch.add_link(tgen.gears["r2"]) # Interconect router 4 and 2 - switch = tgen.add_switch('s4') - switch.add_link(tgen.gears['r4']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["r4"]) + switch.add_link(tgen.gears["r2"]) def setup_module(mod): @@ -134,12 +134,13 @@ def test_ospf_sr(): # Run test function until we get an result. Wait at most 60 seconds. rt = tgen.gears[router] test_func = partial( - topotest.router_json_cmp, rt, 'show ip ospf database segment-routing json', expected + topotest.router_json_cmp, + rt, + "show ip ospf database segment-routing json", + expected, ) rv, diff = topotest.run_and_expect(test_func, None, count=25, wait=3) - assert rv, "OSPF did not start Segment Routing on {}:\n{}".format( - router, diff - ) + assert rv, "OSPF did not start Segment Routing on {}:\n{}".format(router, diff) def test_ospf_kernel_route(): @@ -169,7 +170,7 @@ def test_ospf_kernel_route(): } ] """ - out = rt.vtysh_cmd('show mpls table json', isjson=True) + out = rt.vtysh_cmd("show mpls table json", isjson=True) outlist = [] for key in out.keys(): diff --git a/tests/topotests/ospf-topo2/test_ospf_topo2.py b/tests/topotests/ospf-topo2/test_ospf_topo2.py index 79e8e6bf58..6451f5fb32 100644 --- a/tests/topotests/ospf-topo2/test_ospf_topo2.py +++ b/tests/topotests/ospf-topo2/test_ospf_topo2.py @@ -35,7 +35,7 @@ import json # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -46,28 +46,30 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo + class OSPFTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) # Create 4 routers for routern in range(1, 3): - tgen.add_router('r{}'.format(routern)) + tgen.add_router("r{}".format(routern)) # Create a empty network for router 1 - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['r1']) + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) # Create a empty network for router 2 - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) # Interconect router 1, 2 - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['r1']) - switch.add_link(tgen.gears['r2']) + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) def setup_module(mod): @@ -78,12 +80,10 @@ def setup_module(mod): router_list = tgen.routers() for rname, router in router_list.items(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_OSPF, - os.path.join(CWD, '{}/ospfd.conf'.format(rname)) + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) ) # What is this? OSPF Unnumbered depends on the rp_filter @@ -93,18 +93,15 @@ def setup_module(mod): # the rp_filter. Setting it to '0' allows the OS to pass # up the mcast packet not destined for the local routers # network. - topotest.set_sysctl(tgen.net['r1'], - 'net.ipv4.conf.r1-eth1.rp_filter', 0) - topotest.set_sysctl(tgen.net['r1'], - 'net.ipv4.conf.all.rp_filter', 0) - topotest.set_sysctl(tgen.net['r2'], - 'net.ipv4.conf.r2-eth1.rp_filter', 0) - topotest.set_sysctl(tgen.net['r2'], - 'net.ipv4.conf.all.rp_filter', 0) + topotest.set_sysctl(tgen.net["r1"], "net.ipv4.conf.r1-eth1.rp_filter", 0) + topotest.set_sysctl(tgen.net["r1"], "net.ipv4.conf.all.rp_filter", 0) + topotest.set_sysctl(tgen.net["r2"], "net.ipv4.conf.r2-eth1.rp_filter", 0) + topotest.set_sysctl(tgen.net["r2"], "net.ipv4.conf.all.rp_filter", 0) # Initialize all routers. tgen.start_router() - #tgen.mininet_cli() + # tgen.mininet_cli() + def teardown_module(mod): "Teardown the pytest environment" @@ -116,50 +113,54 @@ def test_ospf_convergence(): "Test OSPF daemon convergence and that we have received the ospf routes" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") for router, rnode in tgen.routers().items(): logger.info('Waiting for router "%s" convergence', router) - json_file = '{}/{}/ospf-route.json'.format(CWD, router) + json_file = "{}/{}/ospf-route.json".format(CWD, router) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - rnode, 'show ip ospf route json', expected) + test_func = partial( + topotest.router_json_cmp, rnode, "show ip ospf route json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5) assertmsg = '"{}" JSON output mismatches'.format(router) assert result is None, assertmsg - #tgen.mininet_cli() + # tgen.mininet_cli() + def test_ospf_kernel_route(): "Test OSPF kernel route installation and we have the onlink success" tgen = get_topogen() if tgen.routers_have_failure(): - pytest.skip('skipped because of router(s) failure') + pytest.skip("skipped because of router(s) failure") rlist = tgen.routers().values() for router in rlist: logger.info('Checking OSPF IPv4 kernel routes in "%s"', router.name) - json_file = '{}/{}/v4_route.json'.format(CWD, router.name) + json_file = "{}/{}/v4_route.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, - router, 'show ip route json', expected) - _, result = topotest.run_and_expect(test_func, None, count=10, wait=.5) + test_func = partial( + topotest.router_json_cmp, router, "show ip route json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) assertmsg = '"{}" JSON output mistmatches'.format(router) assert result is None, assertmsg - #tgen.mininet_cli() + # tgen.mininet_cli() def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py b/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py index a2f9c03ab4..e92baefabf 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py @@ -48,7 +48,7 @@ from lib.common_config import ( reset_config_on_routers, step, shutdown_bringup_interface, - topo_daemons + topo_daemons, ) from lib.topolog import logger from lib.topojson import build_topo_from_json, build_config_from_json diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py index 399fa02230..3b37b8a92f 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py @@ -53,7 +53,7 @@ from lib.common_config import ( create_route_maps, shutdown_bringup_interface, create_interfaces_cfg, - topo_daemons + topo_daemons, ) from lib.topolog import logger diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py index 17a3676e2e..967bc44879 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py @@ -53,7 +53,7 @@ from lib.common_config import ( shutdown_bringup_interface, stop_router, start_router, - topo_daemons + topo_daemons, ) from lib.bgp import verify_bgp_convergence, create_router_bgp from lib.topolog import logger diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_lan.py b/tests/topotests/ospf_basic_functionality/test_ospf_lan.py index f261104206..1357a86c81 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_lan.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_lan.py @@ -55,7 +55,7 @@ from lib.common_config import ( shutdown_bringup_interface, stop_router, start_router, - topo_daemons + topo_daemons, ) from lib.bgp import verify_bgp_convergence, create_router_bgp from lib.topolog import logger diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py b/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py index ff4399f19e..82a34d046c 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py @@ -44,7 +44,7 @@ from lib.common_config import ( create_route_maps, shutdown_bringup_interface, create_interfaces_cfg, - topo_daemons + topo_daemons, ) from ipaddress import IPv4Address from lib.topogen import Topogen, get_topogen diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py b/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py index 6ebc74a013..64edc1ebbf 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py @@ -52,7 +52,7 @@ from lib.common_config import ( step, create_route_maps, verify_prefix_lists, - topo_daemons + topo_daemons, ) from lib.topolog import logger from lib.topojson import build_topo_from_json, build_config_from_json diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py b/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py index 2c6bcf0162..6ac0b515df 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py @@ -50,7 +50,7 @@ from lib.common_config import ( create_static_routes, step, shutdown_bringup_interface, - topo_daemons + topo_daemons, ) from lib.bgp import verify_bgp_convergence, create_router_bgp from lib.topolog import logger @@ -278,8 +278,7 @@ def test_ospf_redistribution_tc5_p0(request): dut = "r1" for num in range(0, nretry): - result = verify_ospf_rib( - tgen, dut, input_dict, next_hop=nh, expected=False) + result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh, expected=False) if result is not True: break @@ -399,8 +398,7 @@ def test_ospf_redistribution_tc6_p0(request): dut = "r1" for num in range(0, nretry): - result = verify_ospf_rib( - tgen, dut, input_dict, next_hop=nh, expected=False) + result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh, expected=False) if result is not True: break assert result is not True, "Testcase {} : Failed \n Error: {}".format( @@ -409,13 +407,7 @@ def test_ospf_redistribution_tc6_p0(request): protocol = "ospf" result = verify_rib( - tgen, - "ipv4", - dut, - input_dict, - protocol=protocol, - next_hop=nh, - expected=False, + tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh, expected=False, ) assert result is not True, "Testcase {} : Failed \n Error: {}".format( tc_name, result diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py b/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py index 5a141224f1..f563637b3c 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py @@ -53,7 +53,7 @@ from lib.common_config import ( create_route_maps, shutdown_bringup_interface, create_interfaces_cfg, - topo_daemons + topo_daemons, ) from lib.topolog import logger from lib.topojson import build_topo_from_json, build_config_from_json diff --git a/tests/topotests/pbr-topo1/test_pbr_topo1.py b/tests/topotests/pbr-topo1/test_pbr_topo1.py index 91979a8f04..fcbe3c0adf 100644 --- a/tests/topotests/pbr-topo1/test_pbr_topo1.py +++ b/tests/topotests/pbr-topo1/test_pbr_topo1.py @@ -147,7 +147,9 @@ def test_pbr_data(): expected = json.loads(open(intf_file).read()) # Actual output from router - test_func = partial(topotest.router_json_cmp, router, "show pbr interface json", expected) + test_func = partial( + topotest.router_json_cmp, router, "show pbr interface json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) assertmsg = '"show pbr interface" mismatches on {}'.format(router.name) if result is not None: @@ -161,7 +163,9 @@ def test_pbr_data(): expected = json.loads(open(map_file).read()) # Actual output from router - test_func = partial(topotest.router_json_cmp, router, "show pbr map json", expected) + test_func = partial( + topotest.router_json_cmp, router, "show pbr map json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) assertmsg = '"show pbr map" mismatches on {}'.format(router.name) if result is not None: @@ -175,13 +179,16 @@ def test_pbr_data(): expected = json.loads(open(nexthop_file).read()) # Actual output from router - test_func = partial(topotest.router_json_cmp, router, "show pbr nexthop-groups json", expected) + test_func = partial( + topotest.router_json_cmp, router, "show pbr nexthop-groups json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) assertmsg = '"show pbr nexthop-groups" mismatches on {}'.format(router.name) if result is not None: gather_pbr_data_on_error(router) assert result is None, assertmsg + def test_pbr_flap(): "Test PBR interface flapping" @@ -212,7 +219,9 @@ def test_pbr_flap(): expected = json.loads(open(intf_file).read()) # Actual output from router - test_func = partial(topotest.router_json_cmp, router, "show pbr interface json", expected) + test_func = partial( + topotest.router_json_cmp, router, "show pbr interface json", expected + ) _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) assertmsg = '"show pbr interface" mismatches on {}'.format(router.name) if result is not None: @@ -274,4 +283,3 @@ def gather_pbr_data_on_error(router): logger.info(router.run("ip route show table 10005")) logger.info(router.run("ip -6 route show table 10005")) logger.info(router.run("ip rule show")) - diff --git a/tests/topotests/pim-basic/mcast-rx.py b/tests/topotests/pim-basic/mcast-rx.py index 7aa4d4027e..862ad46af4 100755 --- a/tests/topotests/pim-basic/mcast-rx.py +++ b/tests/topotests/pim-basic/mcast-rx.py @@ -35,8 +35,9 @@ import time def ifname_to_ifindex(ifname): - output = subprocess.check_output("ip link show %s" % ifname, - shell=True, universal_newlines=True) + output = subprocess.check_output( + "ip link show %s" % ifname, shell=True, universal_newlines=True + ) first_line = output.split("\n")[0] re_index = re.search("^(\d+):", first_line) diff --git a/tests/topotests/pim-basic/mcast-tx.py b/tests/topotests/pim-basic/mcast-tx.py index 7fb980c647..87038ad5cf 100755 --- a/tests/topotests/pim-basic/mcast-tx.py +++ b/tests/topotests/pim-basic/mcast-tx.py @@ -39,9 +39,7 @@ logging.addLevelName( ) log = logging.getLogger(__name__) -parser = argparse.ArgumentParser( - description="Multicast packet generator" -) +parser = argparse.ArgumentParser(description="Multicast packet generator") parser.add_argument("group", help="Multicast IP") parser.add_argument("ifname", help="Interface name") parser.add_argument("--port", type=int, help="UDP port number", default=1000) @@ -62,8 +60,9 @@ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # if sys.version_info[0] > 2: sock.setsockopt( - socket.SOL_SOCKET, 25, struct.pack("%ds" % len(args.ifname), - args.ifname.encode('utf-8')) + socket.SOL_SOCKET, + 25, + struct.pack("%ds" % len(args.ifname), args.ifname.encode("utf-8")), ) else: sock.setsockopt( diff --git a/tests/topotests/route-scale/test_route_scale.py b/tests/topotests/route-scale/test_route_scale.py index 0bfae3b830..8aedfc198c 100644 --- a/tests/topotests/route-scale/test_route_scale.py +++ b/tests/topotests/route-scale/test_route_scale.py @@ -95,7 +95,8 @@ def setup_module(module): ) tgen.start_router() - #tgen.mininet_cli() + # tgen.mininet_cli() + def teardown_module(_mod): "Teardown the pytest environment" @@ -104,6 +105,7 @@ def teardown_module(_mod): # This function tears down the whole topology. tgen.stop_topology() + def test_converge_protocols(): "Wait for protocol convergence" @@ -112,37 +114,45 @@ def test_converge_protocols(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) + def run_one_setup(r1, s): "Run one ecmp config" # Extract params - expected_installed = s['expect_in'] - expected_removed = s['expect_rem'] + expected_installed = s["expect_in"] + expected_removed = s["expect_rem"] - count = s['count'] - wait = s['wait'] + count = s["count"] + wait = s["wait"] - logger.info("Testing 1 million routes X {} ecmp".format(s['ecmp'])) + logger.info("Testing 1 million routes X {} ecmp".format(s["ecmp"])) - r1.vtysh_cmd("sharp install route 1.0.0.0 \ - nexthop-group {} 1000000".format(s['nhg']), - isjson=False) + r1.vtysh_cmd( + "sharp install route 1.0.0.0 \ + nexthop-group {} 1000000".format( + s["nhg"] + ), + isjson=False, + ) - test_func = partial(topotest.router_json_cmp, r1, "show ip route summary json", expected_installed) + test_func = partial( + topotest.router_json_cmp, r1, "show ip route summary json", expected_installed + ) success, result = topotest.run_and_expect(test_func, None, count, wait) assert success, "Route scale test install failed:\n{}".format(result) output = r1.vtysh_cmd("sharp data route", isjson=False) - logger.info("1 million routes X {} ecmp installed".format(s['ecmp'])) + logger.info("1 million routes X {} ecmp installed".format(s["ecmp"])) logger.info(output) r1.vtysh_cmd("sharp remove route 1.0.0.0 1000000", isjson=False) - test_func = partial(topotest.router_json_cmp, r1, "show ip route summary json", expected_removed) + test_func = partial( + topotest.router_json_cmp, r1, "show ip route summary json", expected_removed + ) success, result = topotest.run_and_expect(test_func, None, count, wait) assert success, "Route scale test remove failed:\n{}".format(result) output = r1.vtysh_cmd("sharp data route", isjson=False) - logger.info("1 million routes x {} ecmp removed".format( - s['ecmp'])) + logger.info("1 million routes x {} ecmp removed".format(s["ecmp"])) logger.info(output) @@ -164,19 +174,23 @@ def test_route_install(): # dict keys of params: ecmp number, corresponding nhg name, timeout, # number of times to wait - scale_keys = ['ecmp', 'nhg', 'wait', 'count', 'expect_in', 'expect_rem'] + scale_keys = ["ecmp", "nhg", "wait", "count", "expect_in", "expect_rem"] # Table of defaults, used for timeout values and 'expected' objects - scale_defaults = dict(zip(scale_keys, [None, None, 7, 30, - expected_installed, - expected_removed])) + scale_defaults = dict( + zip(scale_keys, [None, None, 7, 30, expected_installed, expected_removed]) + ) # List of params for each step in the test; note extra time given # for the highest ecmp steps. Executing 'show' at scale can be costly # so we widen the interval there too. scale_steps = [ - [1, 'one'], [2, 'two'], [4, 'four'], - [8, 'eight'], [16, 'sixteen', 10, 40], [32, 'thirtytwo', 10, 40] + [1, "one"], + [2, "two"], + [4, "four"], + [8, "eight"], + [16, "sixteen", 10, 40], + [32, "thirtytwo", 10, 40], ] # Build up a list of dicts with params for each step of the test; @@ -191,17 +205,18 @@ def test_route_install(): scale_setups.append(d) # Avoid top ecmp case for runs with < 4G memory - p = os.popen('free') + p = os.popen("free") l = p.readlines()[1].split() mem = int(l[1]) if mem < 4000000: - logger.info('Limited memory available: {}, skipping x32 testcase'.format(mem)) + logger.info("Limited memory available: {}, skipping x32 testcase".format(mem)) scale_setups = scale_setups[0:-1] # Run each step using the dicts we've built for s in scale_setups: run_one_setup(r1, s) + # Mem leak testcase def test_memory_leak(): "Run the memory leak test and report results." @@ -210,6 +225,7 @@ def test_memory_leak(): pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() + if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tools/fixup-deprecated.py b/tools/fixup-deprecated.py index 38958480a8..57f9df9065 100755 --- a/tools/fixup-deprecated.py +++ b/tools/fixup-deprecated.py @@ -8,48 +8,74 @@ import sys, re, subprocess, os + class replaceEntry: - compiled = None #compiled regex - repl = None #regex + compiled = None # compiled regex + repl = None # regex + def __init__(self, c, r): self.compiled = c self.repl = r + rList = [ # old #define VNL, VTYNL, VTY_NEWLINE - replaceEntry(re.compile(r'(VNL|VTYNL|VTY_NEWLINE)'), - r'"\\n"'), + replaceEntry(re.compile(r"(VNL|VTYNL|VTY_NEWLINE)"), r'"\\n"'), # old #define VTY_GET_INTEGER(desc, v, str) # old #define VTY_GET_INTEGER_RANGE(desc, v, str, min, max) # old #define VTY_GET_ULONG(desc, v, str) - replaceEntry(re.compile(r'(VTY_GET_INTEGER(_RANGE|)|VTY_GET_ULONG)[\s\(]*(.*?)\s*,\s*(.*?)\s*,\s*(.*?)(\s*|)(\)|,).*?;', re.M | re.S), - r'(\4) = strtoul((\5), NULL, 10);\t/* \3 */'), + replaceEntry( + re.compile( + r"(VTY_GET_INTEGER(_RANGE|)|VTY_GET_ULONG)[\s\(]*(.*?)\s*,\s*(.*?)\s*,\s*(.*?)(\s*|)(\)|,).*?;", + re.M | re.S, + ), + r"(\4) = strtoul((\5), NULL, 10);\t/* \3 */", + ), # old #define VTY_GET_ULL(desc, v, str) - replaceEntry(re.compile(r'VTY_GET_ULL[\s\(]*(.*?)\s*,\s*(.*?)\s*,\s*(.*?)(\s*|)(\)|,).*?;', re.M | re.S), - r'(\2) = strtoull((\3), NULL, 10);\t/* \1 */'), + replaceEntry( + re.compile( + r"VTY_GET_ULL[\s\(]*(.*?)\s*,\s*(.*?)\s*,\s*(.*?)(\s*|)(\)|,).*?;", + re.M | re.S, + ), + r"(\2) = strtoull((\3), NULL, 10);\t/* \1 */", + ), # old #define VTY_GET_IPV4_ADDRESS(desc, v, str) - replaceEntry(re.compile(r'VTY_GET_IPV4_ADDRESS[\s\(]*(.*?)\s*,\s*(.*?)\s*,\s*(.*?)(\s*|)(\)|,).*?;', re.M | re.S), - r'inet_aton((\3), &(\2));\t/* \1 */'), + replaceEntry( + re.compile( + r"VTY_GET_IPV4_ADDRESS[\s\(]*(.*?)\s*,\s*(.*?)\s*,\s*(.*?)(\s*|)(\)|,).*?;", + re.M | re.S, + ), + r"inet_aton((\3), &(\2));\t/* \1 */", + ), # old #define VTY_GET_IPV4_PREFIX(desc, v, str) - replaceEntry(re.compile(r'VTY_GET_IPV4_PREFIX[\s\(]*(.*?)\s*,\s*(.*?)\s*,\s*(.*?)(\s*|)(\)|,).*?;', re.M | re.S), - r'str2prefix_ipv4((\3), &(\2));\t/* \1 */'), + replaceEntry( + re.compile( + r"VTY_GET_IPV4_PREFIX[\s\(]*(.*?)\s*,\s*(.*?)\s*,\s*(.*?)(\s*|)(\)|,).*?;", + re.M | re.S, + ), + r"str2prefix_ipv4((\3), &(\2));\t/* \1 */", + ), # old #define vty_outln(vty, str, ...) - replaceEntry(re.compile(r'vty_outln[\s\(]*(.*?)\s*,\s*(".*?"|.*?)\s*(\)|,)', re.M | re.S), - r'vty_out(\1, \2 "\\n"\3'), - ] + replaceEntry( + re.compile(r'vty_outln[\s\(]*(.*?)\s*,\s*(".*?"|.*?)\s*(\)|,)', re.M | re.S), + r'vty_out(\1, \2 "\\n"\3', + ), +] + def fixup_file(fn): - with open(fn, 'r') as fd: + with open(fn, "r") as fd: text = fd.read() for re in rList: - text = re.compiled.sub(re.repl,text) + text = re.compiled.sub(re.repl, text) - tmpname = fn + '.fixup' - with open(tmpname, 'w') as ofd: + tmpname = fn + ".fixup" + with open(tmpname, "w") as ofd: ofd.write(text) os.rename(tmpname, fn) -if __name__ == '__main__': + +if __name__ == "__main__": for fn in sys.argv[1:]: fixup_file(fn) diff --git a/tools/frr-reload.py b/tools/frr-reload.py index 88873da904..951383beb2 100755 --- a/tools/frr-reload.py +++ b/tools/frr-reload.py @@ -39,6 +39,7 @@ import string import subprocess import sys from collections import OrderedDict + try: from ipaddress import IPv6Address, ip_network except ImportError: @@ -51,45 +52,49 @@ except AttributeError: # Python 3 def iteritems(d): return iter(d.items()) + + else: # Python 2 def iteritems(d): return d.iteritems() + log = logging.getLogger(__name__) class VtyshException(Exception): pass + class Vtysh(object): def __init__(self, bindir=None, confdir=None, sockdir=None, pathspace=None): self.bindir = bindir self.confdir = confdir self.pathspace = pathspace - self.common_args = [os.path.join(bindir or '', 'vtysh')] + self.common_args = [os.path.join(bindir or "", "vtysh")] if confdir: - self.common_args.extend(['--config_dir', confdir]) + self.common_args.extend(["--config_dir", confdir]) if sockdir: - self.common_args.extend(['--vty_socket', sockdir]) + self.common_args.extend(["--vty_socket", sockdir]) if pathspace: - self.common_args.extend(['-N', pathspace]) + self.common_args.extend(["-N", pathspace]) def _call(self, args, stdin=None, stdout=None, stderr=None): kwargs = {} if stdin is not None: - kwargs['stdin'] = stdin + kwargs["stdin"] = stdin if stdout is not None: - kwargs['stdout'] = stdout + kwargs["stdout"] = stdout if stderr is not None: - kwargs['stderr'] = stderr + kwargs["stderr"] = stderr return subprocess.Popen(self.common_args + args, **kwargs) def _call_cmd(self, command, stdin=None, stdout=None, stderr=None): if isinstance(command, list): - args = [item for sub in command for item in ['-c', sub]] + args = [item for sub in command for item in ["-c", sub]] else: - args = ['-c', command] + args = ["-c", command] return self._call(args, stdin, stdout, stderr) def __call__(self, command): @@ -102,9 +107,10 @@ class Vtysh(object): proc = self._call_cmd(command, stdout=subprocess.PIPE) stdout, stderr = proc.communicate() if proc.wait() != 0: - raise VtyshException('vtysh returned status %d for command "%s"' - % (proc.returncode, command)) - return stdout.decode('UTF-8') + raise VtyshException( + 'vtysh returned status %d for command "%s"' % (proc.returncode, command) + ) + return stdout.decode("UTF-8") def is_config_available(self): """ @@ -113,56 +119,69 @@ class Vtysh(object): configuration changes. """ - output = self('configure') + output = self("configure") - if 'VTY configuration is locked by other VTY' in output: + if "VTY configuration is locked by other VTY" in output: log.error("vtysh 'configure' returned\n%s\n" % (output)) return False return True def exec_file(self, filename): - child = self._call(['-f', filename]) + child = self._call(["-f", filename]) if child.wait() != 0: - raise VtyshException('vtysh (exec file) exited with status %d' - % (child.returncode)) + raise VtyshException( + "vtysh (exec file) exited with status %d" % (child.returncode) + ) def mark_file(self, filename, stdin=None): - child = self._call(['-m', '-f', filename], - stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) + child = self._call( + ["-m", "-f", filename], + stdout=subprocess.PIPE, + stdin=subprocess.PIPE, + stderr=subprocess.PIPE, + ) try: stdout, stderr = child.communicate() except subprocess.TimeoutExpired: child.kill() stdout, stderr = child.communicate() - raise VtyshException('vtysh call timed out!') + raise VtyshException("vtysh call timed out!") if child.wait() != 0: - raise VtyshException('vtysh (mark file) exited with status %d:\n%s' - % (child.returncode, stderr)) + raise VtyshException( + "vtysh (mark file) exited with status %d:\n%s" + % (child.returncode, stderr) + ) - return stdout.decode('UTF-8') + return stdout.decode("UTF-8") - def mark_show_run(self, daemon = None): - cmd = 'show running-config' + def mark_show_run(self, daemon=None): + cmd = "show running-config" if daemon: - cmd += ' %s' % daemon - cmd += ' no-header' + cmd += " %s" % daemon + cmd += " no-header" show_run = self._call_cmd(cmd, stdout=subprocess.PIPE) - mark = self._call(['-m', '-f', '-'], stdin=show_run.stdout, stdout=subprocess.PIPE) + mark = self._call( + ["-m", "-f", "-"], stdin=show_run.stdout, stdout=subprocess.PIPE + ) show_run.wait() stdout, stderr = mark.communicate() mark.wait() if show_run.returncode != 0: - raise VtyshException('vtysh (show running-config) exited with status %d:' - % (show_run.returncode)) + raise VtyshException( + "vtysh (show running-config) exited with status %d:" + % (show_run.returncode) + ) if mark.returncode != 0: - raise VtyshException('vtysh (mark running-config) exited with status %d' - % (mark.returncode)) + raise VtyshException( + "vtysh (mark running-config) exited with status %d" % (mark.returncode) + ) + + return stdout.decode("UTF-8") - return stdout.decode('UTF-8') class Context(object): @@ -222,15 +241,15 @@ class Config(object): The internal representation has been marked appropriately by passing it through vtysh with the -m parameter """ - log.info('Loading Config object from file %s', filename) + log.info("Loading Config object from file %s", filename) file_output = self.vtysh.mark_file(filename) - for line in file_output.split('\n'): + for line in file_output.split("\n"): line = line.strip() # Compress duplicate whitespaces - line = ' '.join(line.split()) + line = " ".join(line.split()) if ":" in line and not "ipv6 add": qv6_line = get_normalized_ipv6_line(line) @@ -246,16 +265,18 @@ class Config(object): The internal representation has been marked appropriately by passing it through vtysh with the -m parameter """ - log.info('Loading Config object from vtysh show running') + log.info("Loading Config object from vtysh show running") config_text = self.vtysh.mark_show_run(daemon) - for line in config_text.split('\n'): + for line in config_text.split("\n"): line = line.strip() - if (line == 'Building configuration...' or - line == 'Current configuration:' or - not line): + if ( + line == "Building configuration..." + or line == "Current configuration:" + or not line + ): continue self.lines.append(line) @@ -267,7 +288,7 @@ class Config(object): Return the lines read in from the configuration """ - return '\n'.join(self.lines) + return "\n".join(self.lines) def get_contexts(self): """ @@ -275,7 +296,7 @@ class Config(object): """ for (_, ctx) in sorted(iteritems(self.contexts)): - print(str(ctx) + '\n') + print(str(ctx) + "\n") def save_contexts(self, key, lines): """ @@ -285,99 +306,116 @@ class Config(object): if not key: return - ''' + """ IP addresses specified in "network" statements, "ip prefix-lists" etc. can differ in the host part of the specification the user provides and what the running config displays. For example, user can specify 11.1.1.1/24, and the running config displays this as 11.1.1.0/24. Ensure we don't do a needless operation for such lines. IS-IS & OSPFv3 have no "network" support. - ''' - re_key_rt = re.match(r'(ip|ipv6)\s+route\s+([A-Fa-f:.0-9/]+)(.*)$', key[0]) + """ + re_key_rt = re.match(r"(ip|ipv6)\s+route\s+([A-Fa-f:.0-9/]+)(.*)$", key[0]) if re_key_rt: addr = re_key_rt.group(2) - if '/' in addr: + if "/" in addr: try: - if 'ipaddress' not in sys.modules: + if "ipaddress" not in sys.modules: newaddr = IPNetwork(addr) - key[0] = '%s route %s/%s%s' % (re_key_rt.group(1), - newaddr.network, - newaddr.prefixlen, - re_key_rt.group(3)) + key[0] = "%s route %s/%s%s" % ( + re_key_rt.group(1), + newaddr.network, + newaddr.prefixlen, + re_key_rt.group(3), + ) else: newaddr = ip_network(addr, strict=False) - key[0] = '%s route %s/%s%s' % (re_key_rt.group(1), - str(newaddr.network_address), - newaddr.prefixlen, - re_key_rt.group(3)) + key[0] = "%s route %s/%s%s" % ( + re_key_rt.group(1), + str(newaddr.network_address), + newaddr.prefixlen, + re_key_rt.group(3), + ) except ValueError: pass re_key_rt = re.match( - r'(ip|ipv6)\s+prefix-list(.*)(permit|deny)\s+([A-Fa-f:.0-9/]+)(.*)$', - key[0] + r"(ip|ipv6)\s+prefix-list(.*)(permit|deny)\s+([A-Fa-f:.0-9/]+)(.*)$", key[0] ) if re_key_rt: addr = re_key_rt.group(4) - if '/' in addr: + if "/" in addr: try: - if 'ipaddress' not in sys.modules: - newaddr = '%s/%s' % (IPNetwork(addr).network, - IPNetwork(addr).prefixlen) + if "ipaddress" not in sys.modules: + newaddr = "%s/%s" % ( + IPNetwork(addr).network, + IPNetwork(addr).prefixlen, + ) else: network_addr = ip_network(addr, strict=False) - newaddr = '%s/%s' % (str(network_addr.network_address), - network_addr.prefixlen) + newaddr = "%s/%s" % ( + str(network_addr.network_address), + network_addr.prefixlen, + ) except ValueError: newaddr = addr else: newaddr = addr legestr = re_key_rt.group(5) - re_lege = re.search(r'(.*)le\s+(\d+)\s+ge\s+(\d+)(.*)', legestr) + re_lege = re.search(r"(.*)le\s+(\d+)\s+ge\s+(\d+)(.*)", legestr) if re_lege: - legestr = '%sge %s le %s%s' % (re_lege.group(1), - re_lege.group(3), - re_lege.group(2), - re_lege.group(4)) - re_lege = re.search(r'(.*)ge\s+(\d+)\s+le\s+(\d+)(.*)', legestr) + legestr = "%sge %s le %s%s" % ( + re_lege.group(1), + re_lege.group(3), + re_lege.group(2), + re_lege.group(4), + ) + re_lege = re.search(r"(.*)ge\s+(\d+)\s+le\s+(\d+)(.*)", legestr) - if (re_lege and ((re_key_rt.group(1) == "ip" and - re_lege.group(3) == "32") or - (re_key_rt.group(1) == "ipv6" and - re_lege.group(3) == "128"))): - legestr = '%sge %s%s' % (re_lege.group(1), - re_lege.group(2), - re_lege.group(4)) + if re_lege and ( + (re_key_rt.group(1) == "ip" and re_lege.group(3) == "32") + or (re_key_rt.group(1) == "ipv6" and re_lege.group(3) == "128") + ): + legestr = "%sge %s%s" % ( + re_lege.group(1), + re_lege.group(2), + re_lege.group(4), + ) - key[0] = '%s prefix-list%s%s %s%s' % (re_key_rt.group(1), - re_key_rt.group(2), - re_key_rt.group(3), - newaddr, - legestr) + key[0] = "%s prefix-list%s%s %s%s" % ( + re_key_rt.group(1), + re_key_rt.group(2), + re_key_rt.group(3), + newaddr, + legestr, + ) - if lines and key[0].startswith('router bgp'): + if lines and key[0].startswith("router bgp"): newlines = [] for line in lines: - re_net = re.match(r'network\s+([A-Fa-f:.0-9/]+)(.*)$', line) + re_net = re.match(r"network\s+([A-Fa-f:.0-9/]+)(.*)$", line) if re_net: addr = re_net.group(1) - if '/' not in addr and key[0].startswith('router bgp'): + if "/" not in addr and key[0].startswith("router bgp"): # This is most likely an error because with no # prefixlen, BGP treats the prefixlen as 8 - addr = addr + '/8' + addr = addr + "/8" try: - if 'ipaddress' not in sys.modules: + if "ipaddress" not in sys.modules: newaddr = IPNetwork(addr) - line = 'network %s/%s %s' % (newaddr.network, - newaddr.prefixlen, - re_net.group(2)) + line = "network %s/%s %s" % ( + newaddr.network, + newaddr.prefixlen, + re_net.group(2), + ) else: network_addr = ip_network(addr, strict=False) - line = 'network %s/%s %s' % (str(network_addr.network_address), - network_addr.prefixlen, - re_net.group(2)) + line = "network %s/%s %s" % ( + str(network_addr.network_address), + network_addr.prefixlen, + re_net.group(2), + ) newlines.append(line) except ValueError: # Really this should be an error. Whats a network @@ -387,13 +425,16 @@ class Config(object): newlines.append(line) lines = newlines - ''' + """ More fixups in user specification and what running config shows. "null0" in routes must be replaced by Null0. - ''' - if (key[0].startswith('ip route') or key[0].startswith('ipv6 route') and - 'null0' in key[0]): - key[0] = re.sub(r'\s+null0(\s*$)', ' Null0', key[0]) + """ + if ( + key[0].startswith("ip route") + or key[0].startswith("ipv6 route") + and "null0" in key[0] + ): + key[0] = re.sub(r"\s+null0(\s*$)", " Null0", key[0]) if lines: if tuple(key) not in self.contexts: @@ -416,7 +457,7 @@ class Config(object): current_context_lines = [] ctx_keys = [] - ''' + """ The end of a context is flagged via the 'end' keyword: ! @@ -460,7 +501,7 @@ router ospf timers throttle spf 0 50 5000 ! end - ''' + """ # The code assumes that its working on the output from the "vtysh -m" # command. That provides the appropriate markers to signify end of @@ -480,38 +521,40 @@ end # the keywords that we know are single line contexts. bgp in this case # is not the main router bgp block, but enabling multi-instance - oneline_ctx_keywords = ("access-list ", - "agentx", - "allow-external-route-update", - "bgp ", - "debug ", - "domainname ", - "dump ", - "enable ", - "frr ", - "hostname ", - "ip ", - "ipv6 ", - "log ", - "mpls lsp", - "mpls label", - "no ", - "password ", - "ptm-enable", - "router-id ", - "service ", - "table ", - "username ", - "zebra ", - "vrrp autoconfigure", - "evpn mh") + oneline_ctx_keywords = ( + "access-list ", + "agentx", + "allow-external-route-update", + "bgp ", + "debug ", + "domainname ", + "dump ", + "enable ", + "frr ", + "hostname ", + "ip ", + "ipv6 ", + "log ", + "mpls lsp", + "mpls label", + "no ", + "password ", + "ptm-enable", + "router-id ", + "service ", + "table ", + "username ", + "zebra ", + "vrrp autoconfigure", + "evpn mh", + ) for line in self.lines: if not line: continue - if line.startswith('!') or line.startswith('#'): + if line.startswith("!") or line.startswith("#"): continue # one line contexts @@ -519,22 +562,31 @@ end # as part of its 'mpls ldp' config context. If we are processing # ldp configuration and encounter a router-id we should NOT switch # to a new context - if new_ctx is True and any(line.startswith(keyword) for keyword in oneline_ctx_keywords) and not ( - ctx_keys and ctx_keys[0].startswith("mpls ldp") and line.startswith("router-id ")): + if ( + new_ctx is True + and any(line.startswith(keyword) for keyword in oneline_ctx_keywords) + and not ( + ctx_keys + and ctx_keys[0].startswith("mpls ldp") + and line.startswith("router-id ") + ) + ): self.save_contexts(ctx_keys, current_context_lines) # Start a new context main_ctx_key = [] - ctx_keys = [line, ] + ctx_keys = [ + line, + ] current_context_lines = [] - log.debug('LINE %-50s: entering new context, %-50s', line, ctx_keys) + log.debug("LINE %-50s: entering new context, %-50s", line, ctx_keys) self.save_contexts(ctx_keys, current_context_lines) new_ctx = True elif line == "end": self.save_contexts(ctx_keys, current_context_lines) - log.debug('LINE %-50s: exiting old context, %-50s', line, ctx_keys) + log.debug("LINE %-50s: exiting old context, %-50s", line, ctx_keys) # Start a new context new_ctx = True @@ -545,9 +597,11 @@ end elif line == "exit-vrf": self.save_contexts(ctx_keys, current_context_lines) current_context_lines.append(line) - log.debug('LINE %-50s: append to current_context_lines, %-50s', line, ctx_keys) + log.debug( + "LINE %-50s: append to current_context_lines, %-50s", line, ctx_keys + ) - #Start a new context + # Start a new context new_ctx = True main_ctx_key = [] ctx_keys = [] @@ -561,7 +615,11 @@ end # Start a new context ctx_keys = copy.deepcopy(main_ctx_key) current_context_lines = [] - log.debug('LINE %-50s: popping from subcontext to ctx%-50s', line, ctx_keys) + log.debug( + "LINE %-50s: popping from subcontext to ctx%-50s", + line, + ctx_keys, + ) elif line in ["exit-vni", "exit-ldp-if"]: if sub_main_ctx_key: @@ -570,70 +628,92 @@ end # Start a new context ctx_keys = copy.deepcopy(sub_main_ctx_key) current_context_lines = [] - log.debug('LINE %-50s: popping from sub-subcontext to ctx%-50s', line, ctx_keys) + log.debug( + "LINE %-50s: popping from sub-subcontext to ctx%-50s", + line, + ctx_keys, + ) elif new_ctx is True: if not main_ctx_key: - ctx_keys = [line, ] + ctx_keys = [ + line, + ] else: ctx_keys = copy.deepcopy(main_ctx_key) main_ctx_key = [] current_context_lines = [] new_ctx = False - log.debug('LINE %-50s: entering new context, %-50s', line, ctx_keys) - elif (line.startswith("address-family ") or - line.startswith("vnc defaults") or - line.startswith("vnc l2-group") or - line.startswith("vnc nve-group") or - line.startswith("peer") or - line.startswith("key ") or - line.startswith("member pseudowire")): + log.debug("LINE %-50s: entering new context, %-50s", line, ctx_keys) + elif ( + line.startswith("address-family ") + or line.startswith("vnc defaults") + or line.startswith("vnc l2-group") + or line.startswith("vnc nve-group") + or line.startswith("peer") + or line.startswith("key ") + or line.startswith("member pseudowire") + ): main_ctx_key = [] # Save old context first self.save_contexts(ctx_keys, current_context_lines) current_context_lines = [] main_ctx_key = copy.deepcopy(ctx_keys) - log.debug('LINE %-50s: entering sub-context, append to ctx_keys', line) + log.debug("LINE %-50s: entering sub-context, append to ctx_keys", line) - if line == "address-family ipv6" and not ctx_keys[0].startswith("mpls ldp"): + if line == "address-family ipv6" and not ctx_keys[0].startswith( + "mpls ldp" + ): ctx_keys.append("address-family ipv6 unicast") - elif line == "address-family ipv4" and not ctx_keys[0].startswith("mpls ldp"): + elif line == "address-family ipv4" and not ctx_keys[0].startswith( + "mpls ldp" + ): ctx_keys.append("address-family ipv4 unicast") elif line == "address-family evpn": ctx_keys.append("address-family l2vpn evpn") else: ctx_keys.append(line) - elif ((line.startswith("vni ") and - len(ctx_keys) == 2 and - ctx_keys[0].startswith('router bgp') and - ctx_keys[1] == 'address-family l2vpn evpn')): + elif ( + line.startswith("vni ") + and len(ctx_keys) == 2 + and ctx_keys[0].startswith("router bgp") + and ctx_keys[1] == "address-family l2vpn evpn" + ): # Save old context first self.save_contexts(ctx_keys, current_context_lines) current_context_lines = [] sub_main_ctx_key = copy.deepcopy(ctx_keys) - log.debug('LINE %-50s: entering sub-sub-context, append to ctx_keys', line) + log.debug( + "LINE %-50s: entering sub-sub-context, append to ctx_keys", line + ) ctx_keys.append(line) - - elif ((line.startswith("interface ") and - len(ctx_keys) == 2 and - ctx_keys[0].startswith('mpls ldp') and - ctx_keys[1].startswith('address-family'))): + + elif ( + line.startswith("interface ") + and len(ctx_keys) == 2 + and ctx_keys[0].startswith("mpls ldp") + and ctx_keys[1].startswith("address-family") + ): # Save old context first self.save_contexts(ctx_keys, current_context_lines) current_context_lines = [] sub_main_ctx_key = copy.deepcopy(ctx_keys) - log.debug('LINE %-50s: entering sub-sub-context, append to ctx_keys', line) + log.debug( + "LINE %-50s: entering sub-sub-context, append to ctx_keys", line + ) ctx_keys.append(line) else: # Continuing in an existing context, add non-commented lines to it current_context_lines.append(line) - log.debug('LINE %-50s: append to current_context_lines, %-50s', line, ctx_keys) + log.debug( + "LINE %-50s: append to current_context_lines, %-50s", line, ctx_keys + ) # Save the context of the last one self.save_contexts(ctx_keys, current_context_lines) @@ -647,20 +727,20 @@ def lines_to_config(ctx_keys, line, delete): if line: for (i, ctx_key) in enumerate(ctx_keys): - cmd.append(' ' * i + ctx_key) + cmd.append(" " * i + ctx_key) line = line.lstrip() - indent = len(ctx_keys) * ' ' + indent = len(ctx_keys) * " " # There are some commands that are on by default so their "no" form will be # displayed in the config. "no bgp default ipv4-unicast" is one of these. # If we need to remove this line we do so by adding "bgp default ipv4-unicast", # not by doing a "no no bgp default ipv4-unicast" if delete: - if line.startswith('no '): - cmd.append('%s%s' % (indent, line[3:])) + if line.startswith("no "): + cmd.append("%s%s" % (indent, line[3:])) else: - cmd.append('%sno %s' % (indent, line)) + cmd.append("%sno %s" % (indent, line)) else: cmd.append(indent + line) @@ -669,16 +749,16 @@ def lines_to_config(ctx_keys, line, delete): # context ('no router ospf' for example) else: for i, ctx_key in enumerate(ctx_keys[:-1]): - cmd.append('%s%s' % (' ' * i, ctx_key)) + cmd.append("%s%s" % (" " * i, ctx_key)) # Only put the 'no' on the last sub-context if delete: - if ctx_keys[-1].startswith('no '): - cmd.append('%s%s' % (' ' * (len(ctx_keys) - 1), ctx_keys[-1][3:])) + if ctx_keys[-1].startswith("no "): + cmd.append("%s%s" % (" " * (len(ctx_keys) - 1), ctx_keys[-1][3:])) else: - cmd.append('%sno %s' % (' ' * (len(ctx_keys) - 1), ctx_keys[-1])) + cmd.append("%sno %s" % (" " * (len(ctx_keys) - 1), ctx_keys[-1])) else: - cmd.append('%s%s' % (' ' * (len(ctx_keys) - 1), ctx_keys[-1])) + cmd.append("%s%s" % (" " * (len(ctx_keys) - 1), ctx_keys[-1])) return cmd @@ -691,23 +771,26 @@ def get_normalized_ipv6_line(line): the IPv6 word is a network """ norm_line = "" - words = line.split(' ') + words = line.split(" ") for word in words: if ":" in word: norm_word = None if "/" in word: try: - if 'ipaddress' not in sys.modules: + if "ipaddress" not in sys.modules: v6word = IPNetwork(word) - norm_word = '%s/%s' % (v6word.network, v6word.prefixlen) + norm_word = "%s/%s" % (v6word.network, v6word.prefixlen) else: v6word = ip_network(word, strict=False) - norm_word = '%s/%s' % (str(v6word.network_address), v6word.prefixlen) + norm_word = "%s/%s" % ( + str(v6word.network_address), + v6word.prefixlen, + ) except ValueError: pass if not norm_word: try: - norm_word = '%s' % IPv6Address(word) + norm_word = "%s" % IPv6Address(word) except ValueError: norm_word = word else: @@ -728,6 +811,7 @@ def line_exist(lines, target_ctx_keys, target_line, exact_match=True): return True return False + def check_for_exit_vrf(lines_to_add, lines_to_del): # exit-vrf is a bit tricky. If the new config is missing it but we @@ -740,25 +824,26 @@ def check_for_exit_vrf(lines_to_add, lines_to_del): for (ctx_keys, line) in lines_to_add: if add_exit_vrf == True: if ctx_keys[0] != prior_ctx_key: - insert_key=(prior_ctx_key), + insert_key = ((prior_ctx_key),) lines_to_add.insert(index, ((insert_key, "exit-vrf"))) add_exit_vrf = False - if ctx_keys[0].startswith('vrf') and line: + if ctx_keys[0].startswith("vrf") and line: if line is not "exit-vrf": add_exit_vrf = True - prior_ctx_key = (ctx_keys[0]) + prior_ctx_key = ctx_keys[0] else: add_exit_vrf = False - index+=1 + index += 1 for (ctx_keys, line) in lines_to_del: if line == "exit-vrf": - if (line_exist(lines_to_add, ctx_keys, line)): + if line_exist(lines_to_add, ctx_keys, line): lines_to_del.remove((ctx_keys, line)) return (lines_to_add, lines_to_del) + def ignore_delete_re_add_lines(lines_to_add, lines_to_del): # Quite possibly the most confusing (while accurate) variable names in history @@ -768,10 +853,10 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del): for (ctx_keys, line) in lines_to_del: deleted = False - if ctx_keys[0].startswith('router bgp') and line: + if ctx_keys[0].startswith("router bgp") and line: - if line.startswith('neighbor '): - ''' + if line.startswith("neighbor "): + """ BGP changed how it displays swpX peers that are part of peer-group. Older versions of frr would display these on separate lines: neighbor swp1 interface @@ -788,10 +873,14 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del): neighbor swp1 peer-group FOO If so then chop the del line and the corresponding add lines - ''' + """ - re_swpx_int_peergroup = re.search('neighbor (\S+) interface peer-group (\S+)', line) - re_swpx_int_v6only_peergroup = re.search('neighbor (\S+) interface v6only peer-group (\S+)', line) + re_swpx_int_peergroup = re.search( + "neighbor (\S+) interface peer-group (\S+)", line + ) + re_swpx_int_v6only_peergroup = re.search( + "neighbor (\S+) interface v6only peer-group (\S+)", line + ) if re_swpx_int_peergroup or re_swpx_int_v6only_peergroup: swpx_interface = None @@ -807,21 +896,29 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del): swpx_interface = "neighbor %s interface v6only" % swpx swpx_peergroup = "neighbor %s peer-group %s" % (swpx, peergroup) - found_add_swpx_interface = line_exist(lines_to_add, ctx_keys, swpx_interface) - found_add_swpx_peergroup = line_exist(lines_to_add, ctx_keys, swpx_peergroup) + found_add_swpx_interface = line_exist( + lines_to_add, ctx_keys, swpx_interface + ) + found_add_swpx_peergroup = line_exist( + lines_to_add, ctx_keys, swpx_peergroup + ) tmp_ctx_keys = tuple(list(ctx_keys)) if not found_add_swpx_peergroup: tmp_ctx_keys = list(ctx_keys) - tmp_ctx_keys.append('address-family ipv4 unicast') + tmp_ctx_keys.append("address-family ipv4 unicast") tmp_ctx_keys = tuple(tmp_ctx_keys) - found_add_swpx_peergroup = line_exist(lines_to_add, tmp_ctx_keys, swpx_peergroup) + found_add_swpx_peergroup = line_exist( + lines_to_add, tmp_ctx_keys, swpx_peergroup + ) if not found_add_swpx_peergroup: tmp_ctx_keys = list(ctx_keys) - tmp_ctx_keys.append('address-family ipv6 unicast') + tmp_ctx_keys.append("address-family ipv6 unicast") tmp_ctx_keys = tuple(tmp_ctx_keys) - found_add_swpx_peergroup = line_exist(lines_to_add, tmp_ctx_keys, swpx_peergroup) + found_add_swpx_peergroup = line_exist( + lines_to_add, tmp_ctx_keys, swpx_peergroup + ) if found_add_swpx_interface and found_add_swpx_peergroup: deleted = True @@ -829,30 +926,36 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del): lines_to_add_to_del.append((ctx_keys, swpx_interface)) lines_to_add_to_del.append((tmp_ctx_keys, swpx_peergroup)) - ''' + """ Changing the bfd timers on neighbors is allowed without doing a delete/add process. Since doing a "no neighbor blah bfd ..." will cause the peer to bounce unnecessarily, just skip the delete and just do the add. - ''' - re_nbr_bfd_timers = re.search(r'neighbor (\S+) bfd (\S+) (\S+) (\S+)', line) + """ + re_nbr_bfd_timers = re.search( + r"neighbor (\S+) bfd (\S+) (\S+) (\S+)", line + ) if re_nbr_bfd_timers: nbr = re_nbr_bfd_timers.group(1) bfd_nbr = "neighbor %s" % nbr - bfd_search_string = bfd_nbr + r' bfd (\S+) (\S+) (\S+)' + bfd_search_string = bfd_nbr + r" bfd (\S+) (\S+) (\S+)" for (ctx_keys, add_line) in lines_to_add: - if ctx_keys[0].startswith('router bgp'): - re_add_nbr_bfd_timers = re.search(bfd_search_string, add_line) + if ctx_keys[0].startswith("router bgp"): + re_add_nbr_bfd_timers = re.search( + bfd_search_string, add_line + ) if re_add_nbr_bfd_timers: - found_add_bfd_nbr = line_exist(lines_to_add, ctx_keys, bfd_nbr, False) + found_add_bfd_nbr = line_exist( + lines_to_add, ctx_keys, bfd_nbr, False + ) if found_add_bfd_nbr: lines_to_del_to_del.append((ctx_keys, line)) - ''' + """ We changed how we display the neighbor interface command. Older versions of frr would display the following: neighbor swp1 interface @@ -874,9 +977,13 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del): neighbor swp1 capability extended-nexthop If so then chop the del line and the corresponding add lines - ''' - re_swpx_int_remoteas = re.search('neighbor (\S+) interface remote-as (\S+)', line) - re_swpx_int_v6only_remoteas = re.search('neighbor (\S+) interface v6only remote-as (\S+)', line) + """ + re_swpx_int_remoteas = re.search( + "neighbor (\S+) interface remote-as (\S+)", line + ) + re_swpx_int_v6only_remoteas = re.search( + "neighbor (\S+) interface v6only remote-as (\S+)", line + ) if re_swpx_int_remoteas or re_swpx_int_v6only_remoteas: swpx_interface = None @@ -892,8 +999,12 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del): swpx_interface = "neighbor %s interface v6only" % swpx swpx_remoteas = "neighbor %s remote-as %s" % (swpx, remoteas) - found_add_swpx_interface = line_exist(lines_to_add, ctx_keys, swpx_interface) - found_add_swpx_remoteas = line_exist(lines_to_add, ctx_keys, swpx_remoteas) + found_add_swpx_interface = line_exist( + lines_to_add, ctx_keys, swpx_interface + ) + found_add_swpx_remoteas = line_exist( + lines_to_add, ctx_keys, swpx_remoteas + ) tmp_ctx_keys = tuple(list(ctx_keys)) if found_add_swpx_interface and found_add_swpx_remoteas: @@ -902,7 +1013,7 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del): lines_to_add_to_del.append((ctx_keys, swpx_interface)) lines_to_add_to_del.append((tmp_ctx_keys, swpx_remoteas)) - ''' + """ We made the 'bgp bestpath as-path multipath-relax' command automatically assume 'no-as-set' since the lack of this option caused weird routing problems. When the running config is shown in @@ -910,10 +1021,12 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del): is the default. This causes frr-reload to unnecessarily unapply this option only to apply it back again, causing unnecessary session resets. - ''' - if 'multipath-relax' in line: - re_asrelax_new = re.search('^bgp\s+bestpath\s+as-path\s+multipath-relax$', line) - old_asrelax_cmd = 'bgp bestpath as-path multipath-relax no-as-set' + """ + if "multipath-relax" in line: + re_asrelax_new = re.search( + "^bgp\s+bestpath\s+as-path\s+multipath-relax$", line + ) + old_asrelax_cmd = "bgp bestpath as-path multipath-relax no-as-set" found_asrelax_old = line_exist(lines_to_add, ctx_keys, old_asrelax_cmd) if re_asrelax_new and found_asrelax_old: @@ -921,34 +1034,36 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del): lines_to_del_to_del.append((ctx_keys, line)) lines_to_add_to_del.append((ctx_keys, old_asrelax_cmd)) - ''' + """ If we are modifying the BGP table-map we need to avoid a del/add and instead modify the table-map in place via an add. This is needed to avoid installing all routes in the RIB the second the 'no table-map' is issued. - ''' - if line.startswith('table-map'): - found_table_map = line_exist(lines_to_add, ctx_keys, 'table-map', False) + """ + if line.startswith("table-map"): + found_table_map = line_exist(lines_to_add, ctx_keys, "table-map", False) if found_table_map: lines_to_del_to_del.append((ctx_keys, line)) - ''' + """ More old-to-new config handling. ip import-table no longer accepts distance, but we honor the old syntax. But 'show running' shows only the new syntax. This causes an unnecessary 'no import-table' followed by the same old 'ip import-table' which causes perturbations in announced routes leading to traffic blackholes. Fix this issue. - ''' - re_importtbl = re.search('^ip\s+import-table\s+(\d+)$', ctx_keys[0]) + """ + re_importtbl = re.search("^ip\s+import-table\s+(\d+)$", ctx_keys[0]) if re_importtbl: table_num = re_importtbl.group(1) for ctx in lines_to_add: - if ctx[0][0].startswith('ip import-table %s distance' % table_num): - lines_to_del_to_del.append((('ip import-table %s' % table_num,), None)) + if ctx[0][0].startswith("ip import-table %s distance" % table_num): + lines_to_del_to_del.append( + (("ip import-table %s" % table_num,), None) + ) lines_to_add_to_del.append((ctx[0], None)) - ''' + """ ip/ipv6 prefix-list can be specified without a seq number. However, the running config always adds 'seq x', where x is a number incremented by 5 for every element, to the prefix list. So, ignore such lines as @@ -956,24 +1071,36 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del): ip prefix-list PR-TABLE-2 seq 5 permit 20.8.2.0/24 le 32 ip prefix-list PR-TABLE-2 seq 10 permit 20.8.2.0/24 le 32 ipv6 prefix-list vrfdev6-12 permit 2000:9:2::/64 gt 64 - ''' - re_ip_pfxlst = re.search('^(ip|ipv6)(\s+prefix-list\s+)(\S+\s+)(seq \d+\s+)(permit|deny)(.*)$', - ctx_keys[0]) + """ + re_ip_pfxlst = re.search( + "^(ip|ipv6)(\s+prefix-list\s+)(\S+\s+)(seq \d+\s+)(permit|deny)(.*)$", + ctx_keys[0], + ) if re_ip_pfxlst: - tmpline = (re_ip_pfxlst.group(1) + re_ip_pfxlst.group(2) + - re_ip_pfxlst.group(3) + re_ip_pfxlst.group(5) + - re_ip_pfxlst.group(6)) + tmpline = ( + re_ip_pfxlst.group(1) + + re_ip_pfxlst.group(2) + + re_ip_pfxlst.group(3) + + re_ip_pfxlst.group(5) + + re_ip_pfxlst.group(6) + ) for ctx in lines_to_add: if ctx[0][0] == tmpline: lines_to_del_to_del.append((ctx_keys, None)) lines_to_add_to_del.append(((tmpline,), None)) - if (len(ctx_keys) == 3 and - ctx_keys[0].startswith('router bgp') and - ctx_keys[1] == 'address-family l2vpn evpn' and - ctx_keys[2].startswith('vni')): + if ( + len(ctx_keys) == 3 + and ctx_keys[0].startswith("router bgp") + and ctx_keys[1] == "address-family l2vpn evpn" + and ctx_keys[2].startswith("vni") + ): - re_route_target = re.search('^route-target import (.*)$', line) if line is not None else False + re_route_target = ( + re.search("^route-target import (.*)$", line) + if line is not None + else False + ) if re_route_target: rt = re_route_target.group(1).strip() @@ -981,10 +1108,14 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del): route_target_export_line = "route-target export %s" % rt route_target_both_line = "route-target both %s" % rt - found_route_target_export_line = line_exist(lines_to_del, ctx_keys, route_target_export_line) - found_route_target_both_line = line_exist(lines_to_add, ctx_keys, route_target_both_line) + found_route_target_export_line = line_exist( + lines_to_del, ctx_keys, route_target_export_line + ) + found_route_target_both_line = line_exist( + lines_to_add, ctx_keys, route_target_both_line + ) - ''' + """ If the running configs has route-target import 1:1 route-target export 1:1 @@ -993,7 +1124,7 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del): route-target both 1:1 then we can ignore deleting the import/export and ignore adding the 'both' - ''' + """ if found_route_target_export_line and found_route_target_both_line: lines_to_del_to_del.append((ctx_keys, route_target_import_line)) lines_to_del_to_del.append((ctx_keys, route_target_export_line)) @@ -1002,10 +1133,9 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del): # Deleting static routes under a vrf can lead to time-outs if each is sent # as separate vtysh -c commands. Change them from being in lines_to_del and # put the "no" form in lines_to_add - if ctx_keys[0].startswith('vrf ') and line: - if (line.startswith('ip route') or - line.startswith('ipv6 route')): - add_cmd = ('no ' + line) + if ctx_keys[0].startswith("vrf ") and line: + if line.startswith("ip route") or line.startswith("ipv6 route"): + add_cmd = "no " + line lines_to_add.append((ctx_keys, add_cmd)) lines_to_del_to_del.append((ctx_keys, line)) @@ -1016,7 +1146,7 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del): lines_to_del_to_del.append((ctx_keys, line)) lines_to_add_to_del.append((ctx_keys, line)) else: - ''' + """ We have commands that used to be displayed in the global part of 'router bgp' that are now displayed under 'address-family ipv4 unicast' @@ -1032,8 +1162,12 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del): neighbor ISL advertisement-interval 0 Look to see if we are deleting it in one format just to add it back in the other - ''' - if ctx_keys[0].startswith('router bgp') and len(ctx_keys) > 1 and ctx_keys[1] == 'address-family ipv4 unicast': + """ + if ( + ctx_keys[0].startswith("router bgp") + and len(ctx_keys) > 1 + and ctx_keys[1] == "address-family ipv4 unicast" + ): tmp_ctx_keys = list(ctx_keys)[:-1] tmp_ctx_keys = tuple(tmp_ctx_keys) @@ -1061,16 +1195,18 @@ def ignore_unconfigurable_lines(lines_to_add, lines_to_del): for (ctx_keys, line) in lines_to_del: - if (ctx_keys[0].startswith('frr version') or - ctx_keys[0].startswith('frr defaults') or - ctx_keys[0].startswith('username') or - ctx_keys[0].startswith('password') or - ctx_keys[0].startswith('line vty') or - + if ( + ctx_keys[0].startswith("frr version") + or ctx_keys[0].startswith("frr defaults") + or ctx_keys[0].startswith("username") + or ctx_keys[0].startswith("password") + or ctx_keys[0].startswith("line vty") + or # This is technically "no"able but if we did so frr-reload would # stop working so do not let the user shoot themselves in the foot # by removing this. - ctx_keys[0].startswith('service integrated-vtysh-config')): + ctx_keys[0].startswith("service integrated-vtysh-config") + ): log.info('"%s" cannot be removed' % (ctx_keys[-1],)) lines_to_del_to_del.append((ctx_keys, line)) @@ -1106,25 +1242,35 @@ def compare_context_objects(newconf, running): lines_to_del.append((running_ctx_keys, None)) # We cannot do 'no interface' or 'no vrf' in FRR, and so deal with it - elif running_ctx_keys[0].startswith('interface') or running_ctx_keys[0].startswith('vrf'): + elif running_ctx_keys[0].startswith("interface") or running_ctx_keys[ + 0 + ].startswith("vrf"): for line in running_ctx.lines: lines_to_del.append((running_ctx_keys, line)) # If this is an address-family under 'router bgp' and we are already deleting the # entire 'router bgp' context then ignore this sub-context - elif "router bgp" in running_ctx_keys[0] and len(running_ctx_keys) > 1 and delete_bgpd: + elif ( + "router bgp" in running_ctx_keys[0] + and len(running_ctx_keys) > 1 + and delete_bgpd + ): continue # Delete an entire vni sub-context under "address-family l2vpn evpn" - elif ("router bgp" in running_ctx_keys[0] and - len(running_ctx_keys) > 2 and - running_ctx_keys[1].startswith('address-family l2vpn evpn') and - running_ctx_keys[2].startswith('vni ')): + elif ( + "router bgp" in running_ctx_keys[0] + and len(running_ctx_keys) > 2 + and running_ctx_keys[1].startswith("address-family l2vpn evpn") + and running_ctx_keys[2].startswith("vni ") + ): lines_to_del.append((running_ctx_keys, None)) - elif ("router bgp" in running_ctx_keys[0] and - len(running_ctx_keys) > 1 and - running_ctx_keys[1].startswith('address-family')): + elif ( + "router bgp" in running_ctx_keys[0] + and len(running_ctx_keys) > 1 + and running_ctx_keys[1].startswith("address-family") + ): # There's no 'no address-family' support and so we have to # delete each line individually again for line in running_ctx.lines: @@ -1134,24 +1280,31 @@ def compare_context_objects(newconf, running): # doing vtysh -c inefficient (and can time out.) For # these commands, instead of adding them to lines_to_del, # add the "no " version to lines_to_add. - elif (running_ctx_keys[0].startswith('ip route') or - running_ctx_keys[0].startswith('ipv6 route') or - running_ctx_keys[0].startswith('access-list') or - running_ctx_keys[0].startswith('ipv6 access-list') or - running_ctx_keys[0].startswith('ip prefix-list') or - running_ctx_keys[0].startswith('ipv6 prefix-list')): - add_cmd = ('no ' + running_ctx_keys[0],) + elif ( + running_ctx_keys[0].startswith("ip route") + or running_ctx_keys[0].startswith("ipv6 route") + or running_ctx_keys[0].startswith("access-list") + or running_ctx_keys[0].startswith("ipv6 access-list") + or running_ctx_keys[0].startswith("ip prefix-list") + or running_ctx_keys[0].startswith("ipv6 prefix-list") + ): + add_cmd = ("no " + running_ctx_keys[0],) lines_to_add.append((add_cmd, None)) # if this an interface sub-subcontext in an address-family block in ldpd and # we are already deleting the whole context, then ignore this - elif (len(running_ctx_keys) > 2 and running_ctx_keys[0].startswith('mpls ldp') and - running_ctx_keys[1].startswith('address-family') and - (running_ctx_keys[:2], None) in lines_to_del): + elif ( + len(running_ctx_keys) > 2 + and running_ctx_keys[0].startswith("mpls ldp") + and running_ctx_keys[1].startswith("address-family") + and (running_ctx_keys[:2], None) in lines_to_del + ): continue # Non-global context - elif running_ctx_keys and not any("address-family" in key for key in running_ctx_keys): + elif running_ctx_keys and not any( + "address-family" in key for key in running_ctx_keys + ): lines_to_del.append((running_ctx_keys, None)) elif running_ctx_keys and not any("vni" in key for key in running_ctx_keys): @@ -1186,33 +1339,78 @@ def compare_context_objects(newconf, running): lines_to_add.append((newconf_ctx_keys, line)) (lines_to_add, lines_to_del) = check_for_exit_vrf(lines_to_add, lines_to_del) - (lines_to_add, lines_to_del) = ignore_delete_re_add_lines(lines_to_add, lines_to_del) - (lines_to_add, lines_to_del) = ignore_unconfigurable_lines(lines_to_add, lines_to_del) + (lines_to_add, lines_to_del) = ignore_delete_re_add_lines( + lines_to_add, lines_to_del + ) + (lines_to_add, lines_to_del) = ignore_unconfigurable_lines( + lines_to_add, lines_to_del + ) return (lines_to_add, lines_to_del) -if __name__ == '__main__': +if __name__ == "__main__": # Command line options - parser = argparse.ArgumentParser(description='Dynamically apply diff in frr configs') - parser.add_argument('--input', help='Read running config from file instead of "show running"') + parser = argparse.ArgumentParser( + description="Dynamically apply diff in frr configs" + ) + parser.add_argument( + "--input", help='Read running config from file instead of "show running"' + ) group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('--reload', action='store_true', help='Apply the deltas', default=False) - group.add_argument('--test', action='store_true', help='Show the deltas', default=False) + group.add_argument( + "--reload", action="store_true", help="Apply the deltas", default=False + ) + group.add_argument( + "--test", action="store_true", help="Show the deltas", default=False + ) level_group = parser.add_mutually_exclusive_group() - level_group.add_argument('--debug', action='store_true', - help='Enable debugs (synonym for --log-level=debug)', default=False) - level_group.add_argument('--log-level', help='Log level', default="info", - choices=("critical", "error", "warning", "info", "debug")) - parser.add_argument('--stdout', action='store_true', help='Log to STDOUT', default=False) - parser.add_argument('--pathspace', '-N', metavar='NAME', help='Reload specified path/namespace', default=None) - parser.add_argument('filename', help='Location of new frr config file') - parser.add_argument('--overwrite', action='store_true', help='Overwrite frr.conf with running config output', default=False) - parser.add_argument('--bindir', help='path to the vtysh executable', default='/usr/bin') - parser.add_argument('--confdir', help='path to the daemon config files', default='/etc/frr') - parser.add_argument('--rundir', help='path for the temp config file', default='/var/run/frr') - parser.add_argument('--vty_socket', help='socket to be used by vtysh to connect to the daemons', default=None) - parser.add_argument('--daemon', help='daemon for which want to replace the config', default='') + level_group.add_argument( + "--debug", + action="store_true", + help="Enable debugs (synonym for --log-level=debug)", + default=False, + ) + level_group.add_argument( + "--log-level", + help="Log level", + default="info", + choices=("critical", "error", "warning", "info", "debug"), + ) + parser.add_argument( + "--stdout", action="store_true", help="Log to STDOUT", default=False + ) + parser.add_argument( + "--pathspace", + "-N", + metavar="NAME", + help="Reload specified path/namespace", + default=None, + ) + parser.add_argument("filename", help="Location of new frr config file") + parser.add_argument( + "--overwrite", + action="store_true", + help="Overwrite frr.conf with running config output", + default=False, + ) + parser.add_argument( + "--bindir", help="path to the vtysh executable", default="/usr/bin" + ) + parser.add_argument( + "--confdir", help="path to the daemon config files", default="/etc/frr" + ) + parser.add_argument( + "--rundir", help="path for the temp config file", default="/var/run/frr" + ) + parser.add_argument( + "--vty_socket", + help="socket to be used by vtysh to connect to the daemons", + default=None, + ) + parser.add_argument( + "--daemon", help="daemon for which want to replace the config", default="" + ) args = parser.parse_args() @@ -1220,22 +1418,28 @@ if __name__ == '__main__': # For --test log to stdout # For --reload log to /var/log/frr/frr-reload.log if args.test or args.stdout: - logging.basicConfig(format='%(asctime)s %(levelname)5s: %(message)s') + logging.basicConfig(format="%(asctime)s %(levelname)5s: %(message)s") # Color the errors and warnings in red - logging.addLevelName(logging.ERROR, "\033[91m %s\033[0m" % logging.getLevelName(logging.ERROR)) - logging.addLevelName(logging.WARNING, "\033[91m%s\033[0m" % logging.getLevelName(logging.WARNING)) + logging.addLevelName( + logging.ERROR, "\033[91m %s\033[0m" % logging.getLevelName(logging.ERROR) + ) + logging.addLevelName( + logging.WARNING, "\033[91m%s\033[0m" % logging.getLevelName(logging.WARNING) + ) elif args.reload: - if not os.path.isdir('/var/log/frr/'): - os.makedirs('/var/log/frr/') + if not os.path.isdir("/var/log/frr/"): + os.makedirs("/var/log/frr/") - logging.basicConfig(filename='/var/log/frr/frr-reload.log', - format='%(asctime)s %(levelname)5s: %(message)s') + logging.basicConfig( + filename="/var/log/frr/frr-reload.log", + format="%(asctime)s %(levelname)5s: %(message)s", + ) # argparse should prevent this from happening but just to be safe... else: - raise Exception('Must specify --reload or --test') + raise Exception("Must specify --reload or --test") log = logging.getLogger(__name__) if args.debug: @@ -1269,40 +1473,59 @@ if __name__ == '__main__': sys.exit(1) # Verify that bindir is correct - if not os.path.isdir(args.bindir) or not os.path.isfile(args.bindir + '/vtysh'): + if not os.path.isdir(args.bindir) or not os.path.isfile(args.bindir + "/vtysh"): log.error("Bindir %s is not a valid path to vtysh" % args.bindir) sys.exit(1) # verify that the vty_socket, if specified, is valid if args.vty_socket and not os.path.isdir(args.vty_socket): - log.error('vty_socket %s is not a valid path' % args.vty_socket) + log.error("vty_socket %s is not a valid path" % args.vty_socket) sys.exit(1) # verify that the daemon, if specified, is valid - if args.daemon and args.daemon not in ['zebra', 'bgpd', 'fabricd', 'isisd', 'ospf6d', 'ospfd', 'pbrd', 'pimd', 'ripd', 'ripngd', 'sharpd', 'staticd', 'vrrpd', 'ldpd']: - log.error("Daemon %s is not a valid option for 'show running-config'" % args.daemon) + if args.daemon and args.daemon not in [ + "zebra", + "bgpd", + "fabricd", + "isisd", + "ospf6d", + "ospfd", + "pbrd", + "pimd", + "ripd", + "ripngd", + "sharpd", + "staticd", + "vrrpd", + "ldpd", + ]: + log.error( + "Daemon %s is not a valid option for 'show running-config'" % args.daemon + ) sys.exit(1) vtysh = Vtysh(args.bindir, args.confdir, args.vty_socket, args.pathspace) # Verify that 'service integrated-vtysh-config' is configured if args.pathspace: - vtysh_filename = args.confdir + '/' + args.pathspace + '/vtysh.conf' + vtysh_filename = args.confdir + "/" + args.pathspace + "/vtysh.conf" else: - vtysh_filename = args.confdir + '/vtysh.conf' + vtysh_filename = args.confdir + "/vtysh.conf" service_integrated_vtysh_config = True if os.path.isfile(vtysh_filename): - with open(vtysh_filename, 'r') as fh: + with open(vtysh_filename, "r") as fh: for line in fh.readlines(): line = line.strip() - if line == 'no service integrated-vtysh-config': + if line == "no service integrated-vtysh-config": service_integrated_vtysh_config = False break if not service_integrated_vtysh_config and not args.daemon: - log.error("'service integrated-vtysh-config' is not configured, this is required for 'service frr reload'") + log.error( + "'service integrated-vtysh-config' is not configured, this is required for 'service frr reload'" + ) sys.exit(1) log.info('Called via "%s"', str(args)) @@ -1335,10 +1558,10 @@ if __name__ == '__main__': for (ctx_keys, line) in lines_to_del: - if line == '!': + if line == "!": continue - cmd = '\n'.join(lines_to_config(ctx_keys, line, True)) + cmd = "\n".join(lines_to_config(ctx_keys, line, True)) lines_to_configure.append(cmd) print(cmd) @@ -1348,10 +1571,10 @@ if __name__ == '__main__': for (ctx_keys, line) in lines_to_add: - if line == '!': + if line == "!": continue - cmd = '\n'.join(lines_to_config(ctx_keys, line, False)) + cmd = "\n".join(lines_to_config(ctx_keys, line, False)) lines_to_configure.append(cmd) print(cmd) @@ -1361,7 +1584,7 @@ if __name__ == '__main__': if not vtysh.is_config_available(): sys.exit(1) - log.debug('New Frr Config\n%s', newconf.get_lines()) + log.debug("New Frr Config\n%s", newconf.get_lines()) # This looks a little odd but we have to do this twice...here is why # If the user had this running bgp config: @@ -1403,7 +1626,7 @@ if __name__ == '__main__': for x in range(2): running = Config(vtysh) running.load_from_show_running(args.daemon) - log.debug('Running Frr Config (Pass #%d)\n%s', x, running.get_lines()) + log.debug("Running Frr Config (Pass #%d)\n%s", x, running.get_lines()) (lines_to_add, lines_to_del) = compare_context_objects(newconf, running) @@ -1428,7 +1651,7 @@ if __name__ == '__main__': if lines_to_del and x == 0: for (ctx_keys, line) in lines_to_del: - if line == '!': + if line == "!": continue # 'no' commands are tricky, we can't just put them in a file and @@ -1453,7 +1676,7 @@ if __name__ == '__main__': while True: try: - vtysh(['configure'] + cmd) + vtysh(["configure"] + cmd) except VtyshException: @@ -1461,17 +1684,20 @@ if __name__ == '__main__': # 'no ip ospf authentication message-digest 1.1.1.1' in # our example above # - Split that last entry by whitespace and drop the last word - log.info('Failed to execute %s', ' '.join(cmd)) - last_arg = cmd[-1].split(' ') + log.info("Failed to execute %s", " ".join(cmd)) + last_arg = cmd[-1].split(" ") if len(last_arg) <= 2: - log.error('"%s" we failed to remove this command', ' -- '.join(original_cmd)) + log.error( + '"%s" we failed to remove this command', + " -- ".join(original_cmd), + ) break new_last_arg = last_arg[0:-1] - cmd[-1] = ' '.join(new_last_arg) + cmd[-1] = " ".join(new_last_arg) else: - log.info('Executed "%s"', ' '.join(cmd)) + log.info('Executed "%s"', " ".join(cmd)) break if lines_to_add: @@ -1479,28 +1705,31 @@ if __name__ == '__main__': for (ctx_keys, line) in lines_to_add: - if line == '!': + if line == "!": continue # Don't run "no" commands twice since they can error # out the second time due to first deletion - if x == 1 and ctx_keys[0].startswith('no '): + if x == 1 and ctx_keys[0].startswith("no "): continue - cmd = '\n'.join(lines_to_config(ctx_keys, line, False)) + '\n' + cmd = "\n".join(lines_to_config(ctx_keys, line, False)) + "\n" lines_to_configure.append(cmd) if lines_to_configure: - random_string = ''.join(random.SystemRandom().choice( - string.ascii_uppercase + - string.digits) for _ in range(6)) + random_string = "".join( + random.SystemRandom().choice( + string.ascii_uppercase + string.digits + ) + for _ in range(6) + ) filename = args.rundir + "/reload-%s.txt" % random_string log.info("%s content\n%s" % (filename, pformat(lines_to_configure))) - with open(filename, 'w') as fh: + with open(filename, "w") as fh: for line in lines_to_configure: - fh.write(line + '\n') + fh.write(line + "\n") try: vtysh.exec_file(filename) @@ -1510,9 +1739,9 @@ if __name__ == '__main__': os.unlink(filename) # Make these changes persistent - target = str(args.confdir + '/frr.conf') + target = str(args.confdir + "/frr.conf") if args.overwrite or (not args.daemon and args.filename != target): - vtysh('write') + vtysh("write") if not reload_ok: sys.exit(1) diff --git a/tools/gcc-plugins/format-test.py b/tools/gcc-plugins/format-test.py index df2437d5bc..ddf71aa0ef 100644 --- a/tools/gcc-plugins/format-test.py +++ b/tools/gcc-plugins/format-test.py @@ -4,58 +4,71 @@ import shlex import os import re -os.environ['LC_ALL'] = 'C' -os.environ['LANG'] = 'C' +os.environ["LC_ALL"] = "C" +os.environ["LANG"] = "C" for k in list(os.environ.keys()): - if k.startswith('LC_'): + if k.startswith("LC_"): os.environ.pop(k) if len(sys.argv) < 2: - sys.stderr.write('start as format-test.py gcc-123.45 [-options ...]\n') + sys.stderr.write("start as format-test.py gcc-123.45 [-options ...]\n") sys.exit(1) -c_re = re.compile(r'//\s+(NO)?WARN') +c_re = re.compile(r"//\s+(NO)?WARN") expect = {} lines = {} -with open('format-test.c', 'r') as fd: +with open("format-test.c", "r") as fd: for lno, line in enumerate(fd.readlines(), 1): lines[lno] = line.strip() m = c_re.search(line) if m is None: continue if m.group(1) is None: - expect[lno] = 'warn' + expect[lno] = "warn" else: - expect[lno] = 'nowarn' + expect[lno] = "nowarn" -cmd = shlex.split('-Wall -Wextra -Wno-unused -fplugin=./frr-format.so -fno-diagnostics-show-caret -c -o format-test.o format-test.c') +cmd = shlex.split( + "-Wall -Wextra -Wno-unused -fplugin=./frr-format.so -fno-diagnostics-show-caret -c -o format-test.o format-test.c" +) -gcc = subprocess.Popen(sys.argv[1:] + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) +gcc = subprocess.Popen( + sys.argv[1:] + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE +) sout, serr = gcc.communicate() gcc.wait() -gcclines = serr.decode('UTF-8').splitlines() -line_re = re.compile(r'^format-test\.c:(\d+):(.*)$') +gcclines = serr.decode("UTF-8").splitlines() +line_re = re.compile(r"^format-test\.c:(\d+):(.*)$") gcc_warns = {} for line in gcclines: - if line.find('In function') >= 0: + if line.find("In function") >= 0: continue m = line_re.match(line) if m is None: - sys.stderr.write('cannot process GCC output: %s\n' % line) + sys.stderr.write("cannot process GCC output: %s\n" % line) continue lno = int(m.group(1)) gcc_warns.setdefault(lno, []).append(line) for lno, val in expect.items(): - if val == 'nowarn' and lno in gcc_warns: - sys.stderr.write('unexpected gcc warning on line %d:\n\t%s\n\t%s\n' % (lno, lines[lno], '\n\t'.join(gcc_warns[lno]))) - if val == 'warn' and lno not in gcc_warns: - sys.stderr.write('expected warning on line %d but did not get one\n\t%s\n' % (lno, lines[lno])) + if val == "nowarn" and lno in gcc_warns: + sys.stderr.write( + "unexpected gcc warning on line %d:\n\t%s\n\t%s\n" + % (lno, lines[lno], "\n\t".join(gcc_warns[lno])) + ) + if val == "warn" and lno not in gcc_warns: + sys.stderr.write( + "expected warning on line %d but did not get one\n\t%s\n" + % (lno, lines[lno]) + ) leftover = set(gcc_warns.keys()) - set(expect.keys()) for lno in sorted(leftover): - sys.stderr.write('unmarked gcc warning on line %d:\n\t%s\n\t%s\n' % (lno, lines[lno], '\n\t'.join(gcc_warns[lno]))) + sys.stderr.write( + "unmarked gcc warning on line %d:\n\t%s\n\t%s\n" + % (lno, lines[lno], "\n\t".join(gcc_warns[lno])) + ) diff --git a/tools/generate_support_bundle.py b/tools/generate_support_bundle.py index 540b7a1357..ae258bddfe 100755 --- a/tools/generate_support_bundle.py +++ b/tools/generate_support_bundle.py @@ -7,9 +7,9 @@ import os import subprocess import datetime -TOOLS_DIR="tools/" -ETC_DIR="/etc/frr/" -LOG_DIR="/var/log/frr/" +TOOLS_DIR = "tools/" +ETC_DIR = "/etc/frr/" +LOG_DIR = "/var/log/frr/" SUCCESS = 1 FAIL = 0 @@ -17,96 +17,103 @@ inputFile = ETC_DIR + "support_bundle_commands.conf" # Open support bundle configuration file def openConfFile(i_file): - try: - with open(i_file) as supportBundleConfFile: - lines = filter(None, (line.rstrip() for line in supportBundleConfFile)) - return lines - except IOError: - return ([]) + try: + with open(i_file) as supportBundleConfFile: + lines = filter(None, (line.rstrip() for line in supportBundleConfFile)) + return lines + except IOError: + return [] + # Create the output file name def createOutputFile(procName): - fileName = procName + "_support_bundle.log" - oldFile = LOG_DIR + fileName - cpFileCmd = "cp " + oldFile + " " + oldFile + ".prev" - rmFileCmd = "rm -rf " + oldFile - print("Making backup of " + oldFile) - os.system(cpFileCmd) - print("Removing " + oldFile) - os.system(rmFileCmd) - return fileName + fileName = procName + "_support_bundle.log" + oldFile = LOG_DIR + fileName + cpFileCmd = "cp " + oldFile + " " + oldFile + ".prev" + rmFileCmd = "rm -rf " + oldFile + print("Making backup of " + oldFile) + os.system(cpFileCmd) + print("Removing " + oldFile) + os.system(rmFileCmd) + return fileName + # Open the output file for this process def openOutputFile(fileName): - crt_file_cmd = LOG_DIR + fileName - print(crt_file_cmd) - try: - outputFile = open(crt_file_cmd, "w") - return outputFile - except IOError: - return () + crt_file_cmd = LOG_DIR + fileName + print(crt_file_cmd) + try: + outputFile = open(crt_file_cmd, "w") + return outputFile + except IOError: + return () + # Close the output file for this process def closeOutputFile(file): - try: - file.close() - return SUCCESS - except IOError: - return FAIL + try: + file.close() + return SUCCESS + except IOError: + return FAIL + # Execute the command over vtysh and store in the # output file def executeCommand(cmd, outputFile): - cmd_exec_str = "vtysh -c \"" + cmd + "\" " - try: - cmd_output = subprocess.check_output(cmd_exec_str, shell=True) + cmd_exec_str = 'vtysh -c "' + cmd + '" ' try: - dateTime = datetime.datetime.now() - outputFile.write(">>[" + str(dateTime) + "]" + cmd + "\n") - outputFile.write(cmd_output) - outputFile.write("########################################################\n") - outputFile.write('\n') - except: - print("Writing to ouptut file Failed") - except subprocess.CalledProcessError as e: - dateTime = datetime.datetime.now() - outputFile.write(">>[" + str(dateTime) + "]" + cmd + "\n") - outputFile.write(e.output) - outputFile.write("########################################################\n") - outputFile.write('\n') - print("Error:" + e.output) + cmd_output = subprocess.check_output(cmd_exec_str, shell=True) + try: + dateTime = datetime.datetime.now() + outputFile.write(">>[" + str(dateTime) + "]" + cmd + "\n") + outputFile.write(cmd_output) + outputFile.write( + "########################################################\n" + ) + outputFile.write("\n") + except: + print("Writing to ouptut file Failed") + except subprocess.CalledProcessError as e: + dateTime = datetime.datetime.now() + outputFile.write(">>[" + str(dateTime) + "]" + cmd + "\n") + outputFile.write(e.output) + outputFile.write("########################################################\n") + outputFile.write("\n") + print("Error:" + e.output) # Process the support bundle configuration file # and call appropriate functions def processConfFile(lines): - for line in lines: - if line[0][0] == '#': - continue - cmd_line = line.split(':') - if cmd_line[0] == "PROC_NAME": - outputFileName = createOutputFile(cmd_line[1]) - if outputFileName: - print(outputFileName, "created for", cmd_line[1]) - elif cmd_line[0] == "CMD_LIST_START": - outputFile = openOutputFile(outputFileName) - if outputFile: - print(outputFileName, "opened") - else: - print(outputFileName, "open failed") - return FAIL - elif cmd_line[0] == "CMD_LIST_END": - if closeOutputFile(outputFile): - print(outputFileName, "closed") - else: - print(outputFileName, "close failed") - else: - print("Execute:" , cmd_line[0]) - executeCommand(cmd_line[0], outputFile) - + for line in lines: + if line[0][0] == "#": + continue + cmd_line = line.split(":") + if cmd_line[0] == "PROC_NAME": + outputFileName = createOutputFile(cmd_line[1]) + if outputFileName: + print(outputFileName, "created for", cmd_line[1]) + elif cmd_line[0] == "CMD_LIST_START": + outputFile = openOutputFile(outputFileName) + if outputFile: + print(outputFileName, "opened") + else: + print(outputFileName, "open failed") + return FAIL + elif cmd_line[0] == "CMD_LIST_END": + if closeOutputFile(outputFile): + print(outputFileName, "closed") + else: + print(outputFileName, "close failed") + else: + print("Execute:", cmd_line[0]) + executeCommand(cmd_line[0], outputFile) + + # Main Function lines = openConfFile(inputFile) if not lines: - print("File support_bundle_commands.conf not present in /etc/frr/ directory") + print("File support_bundle_commands.conf not present in /etc/frr/ directory") else: - processConfFile(lines) + processConfFile(lines) diff --git a/tools/git-reindent-branch.py b/tools/git-reindent-branch.py index c207f5946f..5f821b06c7 100644 --- a/tools/git-reindent-branch.py +++ b/tools/git-reindent-branch.py @@ -5,85 +5,100 @@ import sys, os import subprocess, argparse, tempfile import indent + def run(cmd): - proc = subprocess.Popen(cmd, stdout = subprocess.PIPE) - rv = proc.communicate('')[0].decode('UTF-8') + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) + rv = proc.communicate("")[0].decode("UTF-8") proc.wait() return rv -clangfmt = run(['git', 'show', 'master:.clang-format']) -argp = argparse.ArgumentParser(description = 'git whitespace-fixing tool') -argp.add_argument('branch', metavar='BRANCH', type = str, nargs = '?', default = 'HEAD') +clangfmt = run(["git", "show", "master:.clang-format"]) + +argp = argparse.ArgumentParser(description="git whitespace-fixing tool") +argp.add_argument("branch", metavar="BRANCH", type=str, nargs="?", default="HEAD") args = argp.parse_args() branch = args.branch -commit = run(['git', 'rev-list', '-n', '1', branch, '--']).strip() +commit = run(["git", "rev-list", "-n", "1", branch, "--"]).strip() # frr-3.1-dev = first commit that is on master but not on stable/3.0 -masterid = run(['git', 'rev-list', '-n', '1', 'frr-3.1-dev', '--']).strip() -masterbase = run(['git', 'merge-base', commit, masterid]).strip() +masterid = run(["git", "rev-list", "-n", "1", "frr-3.1-dev", "--"]).strip() +masterbase = run(["git", "merge-base", commit, masterid]).strip() if masterbase == masterid: - refbranch = 'master' + refbranch = "master" else: - refbranch = '3.0' + refbranch = "3.0" -sys.stderr.write('autodetected base: %s (can be 3.0 or master)\n' % refbranch) +sys.stderr.write("autodetected base: %s (can be 3.0 or master)\n" % refbranch) -beforeid = run(['git', 'rev-list', '-n', '1', 'reindent-%s-before' % refbranch, '--']).strip() -afterid = run(['git', 'rev-list', '-n', '1', 'reindent-%s-after' % refbranch, '--']).strip() +beforeid = run( + ["git", "rev-list", "-n", "1", "reindent-%s-before" % refbranch, "--"] +).strip() +afterid = run( + ["git", "rev-list", "-n", "1", "reindent-%s-after" % refbranch, "--"] +).strip() -beforebase = run(['git', 'merge-base', commit, beforeid]).strip() -afterbase = run(['git', 'merge-base', commit, afterid]).strip() +beforebase = run(["git", "merge-base", commit, beforeid]).strip() +afterbase = run(["git", "merge-base", commit, afterid]).strip() if afterbase == afterid: - sys.stderr.write('this branch was already rebased\n') + sys.stderr.write("this branch was already rebased\n") sys.exit(1) if beforebase != beforeid: - sys.stderr.write('you need to rebase your branch onto the tag "reindent-%s-before"\n' % refbranch) + sys.stderr.write( + 'you need to rebase your branch onto the tag "reindent-%s-before"\n' % refbranch + ) sys.exit(1) -revs = run(['git', 'rev-list', 'reindent-%s-before..%s' % (refbranch, commit)]).strip().split('\n') +revs = ( + run(["git", "rev-list", "reindent-%s-before..%s" % (refbranch, commit)]) + .strip() + .split("\n") +) revs.reverse() srcdir = os.getcwd() -tmpdir = tempfile.mkdtemp('frrindent') +tmpdir = tempfile.mkdtemp("frrindent") os.chdir(tmpdir) -sys.stderr.write('using temporary directory %s; %d revisions\n' % (tmpdir, len(revs))) -run(['git', 'clone', '-s', '-b', 'reindent-%s-after' % refbranch, srcdir, 'repo']) -os.chdir('repo') +sys.stderr.write("using temporary directory %s; %d revisions\n" % (tmpdir, len(revs))) +run(["git", "clone", "-s", "-b", "reindent-%s-after" % refbranch, srcdir, "repo"]) +os.chdir("repo") -with open('.clang-format', 'w') as fd: +with open(".clang-format", "w") as fd: fd.write(clangfmt) prev = beforeid for rev in revs: - filestat = run(['git', 'diff', '-z', '--name-status', prev, rev]).rstrip('\0').split('\0') + filestat = ( + run(["git", "diff", "-z", "--name-status", prev, rev]).rstrip("\0").split("\0") + ) changes = zip(filestat[0::2], filestat[1::2]) - sys.stderr.write('%s: %d files\n' % (rev, len(changes))) + sys.stderr.write("%s: %d files\n" % (rev, len(changes))) for typ, name in changes: - if typ == 'D': - run(['git', 'rm', name]) - elif typ in ['A', 'M']: - run(['git', 'checkout', rev, '--', name]) - if name.endswith('.c') or name.endswith('.h'): - for d in ['babeld/', 'ldpd/', 'nhrpd/']: + if typ == "D": + run(["git", "rm", name]) + elif typ in ["A", "M"]: + run(["git", "checkout", rev, "--", name]) + if name.endswith(".c") or name.endswith(".h"): + for d in ["babeld/", "ldpd/", "nhrpd/"]: if name.startswith(d): break else: - sys.stderr.write('\t%s\n' % name) + sys.stderr.write("\t%s\n" % name) indent.wrap_file(name) - run(['git', 'add', name]) + run(["git", "add", name]) - run(['git', 'commit', '-C', rev]) + run(["git", "commit", "-C", rev]) prev = rev -run(['git', 'push', 'origin', 'HEAD:refs/heads/reindented-branch']) +run(["git", "push", "origin", "HEAD:refs/heads/reindented-branch"]) sys.stderr.write('\n\n"reindented-branch" should now be OK.\n') -sys.stderr.write('you could use "git reset --hard reindented-branch" to set your current branch to the reindented output\n') -sys.stderr.write('\033[31;1mplease always double-check the output\033[m\n') - +sys.stderr.write( + 'you could use "git reset --hard reindented-branch" to set your current branch to the reindented output\n' +) +sys.stderr.write("\033[31;1mplease always double-check the output\033[m\n") diff --git a/tools/indent.py b/tools/indent.py index d2c41e1865..61a0fd4454 100755 --- a/tools/indent.py +++ b/tools/indent.py @@ -6,42 +6,47 @@ import sys, re, subprocess, os # find all DEFUNs defun_re = re.compile( - r'^((DEF(UN(|_ATTR|_CMD_(ELEMENT|FUNC_(DECL|TEXT))|_DEPRECATED|_NOSH|_HIDDEN|SH(|_ATTR|_DEPRECATED|_HIDDEN))?|PY|PY_ATTR|PY_HIDDEN)|ALIAS)\s*\(.*?)^(?=\s*\{)', - re.M | re.S) -define_re = re.compile( - r'((^#\s*define[^\n]+[^\\]\n)+)', - re.M | re.S) + r"^((DEF(UN(|_ATTR|_CMD_(ELEMENT|FUNC_(DECL|TEXT))|_DEPRECATED|_NOSH|_HIDDEN|SH(|_ATTR|_DEPRECATED|_HIDDEN))?|PY|PY_ATTR|PY_HIDDEN)|ALIAS)\s*\(.*?)^(?=\s*\{)", + re.M | re.S, +) +define_re = re.compile(r"((^#\s*define[^\n]+[^\\]\n)+)", re.M | re.S) # find clang-format control that we just inserted clean_re = re.compile( - r'^.*/\* \$FRR indent\$ \*/\s*\n\s*/\* clang-format (on|off) \*/\s*\n', - re.M) + r"^.*/\* \$FRR indent\$ \*/\s*\n\s*/\* clang-format (on|off) \*/\s*\n", re.M +) + def wrap_file(fn): - with open(fn, 'r') as fd: + with open(fn, "r") as fd: text = fd.read() - repl = r'/* $FRR indent$ */\n/* clang-format off */\n' + \ - r'\1' + \ - r'/* $FRR indent$ */\n/* clang-format on */\n' + repl = ( + r"/* $FRR indent$ */\n/* clang-format off */\n" + + r"\1" + + r"/* $FRR indent$ */\n/* clang-format on */\n" + ) # around each DEFUN, insert an indent-on/off comment text = defun_re.sub(repl, text) text = define_re.sub(repl, text) - ci = subprocess.Popen(['clang-format'], stdin = subprocess.PIPE, stdout = subprocess.PIPE) + ci = subprocess.Popen( + ["clang-format"], stdin=subprocess.PIPE, stdout=subprocess.PIPE + ) stdout, ign = ci.communicate(text) ci.wait() if ci.returncode != 0: - raise IOError('clang-format returned %d' % (ci.returncode)) + raise IOError("clang-format returned %d" % (ci.returncode)) # remove the bits we inserted above - final = clean_re.sub('', stdout) + final = clean_re.sub("", stdout) - tmpname = fn + '.indent' - with open(tmpname, 'w') as ofd: + tmpname = fn + ".indent" + with open(tmpname, "w") as ofd: ofd.write(final) os.rename(tmpname, fn) -if __name__ == '__main__': + +if __name__ == "__main__": for fn in sys.argv[1:]: wrap_file(fn) diff --git a/tools/render_md.py b/tools/render_md.py index 16c4bbe8a3..7636496b62 100644 --- a/tools/render_md.py +++ b/tools/render_md.py @@ -2,7 +2,7 @@ # written 2016 by David Lamparter, placed in Public Domain. import sys, markdown -template = ''' %s -''' +""" -md = markdown.Markdown(extensions=['extra', 'toc']) +md = markdown.Markdown(extensions=["extra", "toc"]) for fn in sys.argv[1:]: - with open(fn, 'r') as ifd: - with open('%s.html' % (fn), 'w') as ofd: - ofd.write((template % (md.convert(ifd.read().decode('UTF-8')))).encode('UTF-8')) + with open(fn, "r") as ifd: + with open("%s.html" % (fn), "w") as ofd: + ofd.write( + (template % (md.convert(ifd.read().decode("UTF-8")))).encode("UTF-8") + ) diff --git a/tools/stringmangle.py b/tools/stringmangle.py index a2eb37336a..1c75c86a0d 100644 --- a/tools/stringmangle.py +++ b/tools/stringmangle.py @@ -6,23 +6,24 @@ import re import argparse wrap_res = [ - (re.compile(r'(? 0: - sys.stderr.write('changed: %s\n' % fn) - with open(fn + '.new', 'w') as ofd: + sys.stderr.write("changed: %s\n" % fn) + with open(fn + ".new", "w") as ofd: ofd.write(newdata) - os.rename(fn + '.new', fn) + os.rename(fn + ".new", fn) l += 1 - sys.stderr.write('%d files changed.\n' % (l)) + sys.stderr.write("%d files changed.\n" % (l)) + main() diff --git a/tools/symalyzer.py b/tools/symalyzer.py index cff21f9f93..ce0bfde0a5 100755 --- a/tools/symalyzer.py +++ b/tools/symalyzer.py @@ -21,18 +21,40 @@ import sys, os, subprocess import re from collections import namedtuple -sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'python')) +sys.path.insert( + 0, + os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "python"), +) from makevars import MakeVars -SymRowBase = namedtuple('SymRow', ['target', 'object', 'name', 'address', 'klass', 'typ', 'size', 'line', 'section', 'loc']) +SymRowBase = namedtuple( + "SymRow", + [ + "target", + "object", + "name", + "address", + "klass", + "typ", + "size", + "line", + "section", + "loc", + ], +) + + class SymRow(SymRowBase): - ''' + """ wrapper around a line of `nm` output - ''' - lib_re = re.compile(r'/lib[^/]+\.(so|la)$') + """ + + lib_re = re.compile(r"/lib[^/]+\.(so|la)$") + def is_global(self): - return self.klass.isupper() or self.klass in 'uvw' + return self.klass.isupper() or self.klass in "uvw" + def scope(self): if self.lib_re.search(self.target) is None: return self.target @@ -40,28 +62,29 @@ class SymRow(SymRowBase): return None def is_export(self): - ''' + """ FRR-specific list of symbols which are considered "externally used" e.g. hooks are by design APIs for external use, same for qobj_t_* frr_inet_ntop is here because it's used through an ELF alias to "inet_ntop()" - ''' - if self.name in ['main', 'frr_inet_ntop', '_libfrr_version']: + """ + if self.name in ["main", "frr_inet_ntop", "_libfrr_version"]: return True - if self.name.startswith('_hook_'): + if self.name.startswith("_hook_"): return True - if self.name.startswith('qobj_t_'): + if self.name.startswith("qobj_t_"): return True return False -class Symbols(dict): - ''' - dict of all symbols in all libs & executables - ''' - from_re = re.compile(r'^Symbols from (.*?):$') - lt_re = re.compile(r'^(.*/)([^/]+)\.l[oa]$') +class Symbols(dict): + """ + dict of all symbols in all libs & executables + """ + + from_re = re.compile(r"^Symbols from (.*?):$") + lt_re = re.compile(r"^(.*/)([^/]+)\.l[oa]$") def __init__(self): super().__init__() @@ -69,26 +92,35 @@ class Symbols(dict): class ReportSym(object): def __init__(self, sym): self.sym = sym + def __repr__(self): - return '<%-25s %-40s [%s]>' % (self.__class__.__name__ + ':', self.sym.name, self.sym.loc) + return "<%-25s %-40s [%s]>" % ( + self.__class__.__name__ + ":", + self.sym.name, + self.sym.loc, + ) + def __lt__(self, other): return self.sym.name.__lt__(other.sym.name) class ReportSymCouldBeStaticAlreadyLocal(ReportSym): - idshort = 'Z' - idlong = 'extrastatic' + idshort = "Z" + idlong = "extrastatic" title = "symbol is local to library, but only used in its source file (make static?)" + class ReportSymCouldBeStatic(ReportSym): - idshort = 'S' - idlong = 'static' + idshort = "S" + idlong = "static" title = "symbol is only used in its source file (make static?)" + class ReportSymCouldBeLibLocal(ReportSym): - idshort = 'L' - idlong = 'liblocal' + idshort = "L" + idlong = "liblocal" title = "symbol is only used inside of library" + class ReportSymModuleAPI(ReportSym): - idshort = 'A' - idlong = 'api' + idshort = "A" + idlong = "api" title = "symbol (in executable) is referenced externally from a module" class Symbol(object): @@ -100,31 +132,38 @@ class Symbols(dict): def process(self, row): scope = row.scope() - if row.section == '*UND*': + if row.section == "*UND*": self.refs.append(row) else: self.defs.setdefault(scope, []).append(row) def evaluate(self, out): - ''' + """ generate output report invoked after all object files have been read in, so it can look at inter-object-file relationships - ''' + """ if len(self.defs) == 0: out.extsyms.add(self.name) return for scopename, symdefs in self.defs.items(): - common_defs = [symdef for symdef in symdefs if symdef.section == '*COM*'] - proper_defs = [symdef for symdef in symdefs if symdef.section != '*COM*'] + common_defs = [ + symdef for symdef in symdefs if symdef.section == "*COM*" + ] + proper_defs = [ + symdef for symdef in symdefs if symdef.section != "*COM*" + ] if len(proper_defs) > 1: - print(self.name, ' DUPLICATE') - print('\tD: %s %s' % (scopename, '\n\t\t'.join([repr(s) for s in symdefs]))) + print(self.name, " DUPLICATE") + print( + "\tD: %s %s" + % (scopename, "\n\t\t".join([repr(s) for s in symdefs])) + ) for syms in self.refs: - print('\tR: %s' % (syms, )) + print("\tR: %s" % (syms,)) return if len(proper_defs): @@ -140,7 +179,9 @@ class Symbols(dict): if scopename is not None and len(self.refs) > 0: for ref in self.refs: - if ref.target != primary_def.target and ref.target.endswith('.la'): + if ref.target != primary_def.target and ref.target.endswith( + ".la" + ): outobj = out.report.setdefault(primary_def.object, []) outobj.append(out.ReportSymModuleAPI(primary_def)) break @@ -152,7 +193,9 @@ class Symbols(dict): if primary_def.visible: outobj.append(out.ReportSymCouldBeStatic(primary_def)) else: - outobj.append(out.ReportSymCouldBeStaticAlreadyLocal(primary_def)) + outobj.append( + out.ReportSymCouldBeStaticAlreadyLocal(primary_def) + ) continue if scopename is None and primary_def.visible: @@ -164,7 +207,6 @@ class Symbols(dict): outobj = out.report.setdefault(primary_def.object, []) outobj.append(out.ReportSymCouldBeLibLocal(primary_def)) - def evaluate(self): self.extsyms = set() self.report = {} @@ -177,14 +219,14 @@ class Symbols(dict): m = self.lt_re.match(fn) if m is None: return fn - return m.group(1) + '.libs/' + m.group(2) + '.o' + return m.group(1) + ".libs/" + m.group(2) + ".o" def libtooltargetmustdie(fn): m = self.lt_re.match(fn) if m is None: - a, b = fn.rsplit('/', 1) - return '%s/.libs/%s' % (a, b) - return m.group(1) + '.libs/' + m.group(2) + '.so' + a, b = fn.rsplit("/", 1) + return "%s/.libs/%s" % (a, b) + return m.group(1) + ".libs/" + m.group(2) + ".so" files = list(set([libtoolmustdie(fn) for fn in files])) @@ -192,30 +234,30 @@ class Symbols(dict): filename = None path_rel_to = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - for line in text.split('\n'): - if line.strip() == '': + for line in text.split("\n"): + if line.strip() == "": continue m = self.from_re.match(line) if m is not None: filename = m.group(1) continue - if line.startswith('Name'): + if line.startswith("Name"): continue - items = [i.strip() for i in line.split('|')] + items = [i.strip() for i in line.split("|")] loc = None - if '\t' in items[-1]: - items[-1], loc = items[-1].split('\t', 1) - fn, lno = loc.rsplit(':', 1) + if "\t" in items[-1]: + items[-1], loc = items[-1].split("\t", 1) + fn, lno = loc.rsplit(":", 1) fn = os.path.relpath(fn, path_rel_to) - loc = '%s:%s' % (fn, lno) + loc = "%s:%s" % (fn, lno) - items[1] = int(items[1] if items[1] != '' else '0', 16) - items[4] = int(items[4] if items[4] != '' else '0', 16) + items[1] = int(items[1] if items[1] != "" else "0", 16) + items[4] = int(items[4] if items[4] != "" else "0", 16) items.append(loc) row = SymRow(target, filename, *items) - if row.section == '.group' or row.name == '_GLOBAL_OFFSET_TABLE_': + if row.section == ".group" or row.name == "_GLOBAL_OFFSET_TABLE_": continue if not row.is_global(): continue @@ -230,14 +272,19 @@ class Symbols(dict): # in the linked result (this covers ELF "hidden"/"internal" linkage) libfile = libtooltargetmustdie(target) - nmlib = subprocess.Popen(['nm', '-l', '-g', '--defined-only', '-f', 'sysv', libfile], stdout = subprocess.PIPE) - out = nmlib.communicate()[0].decode('US-ASCII') + nmlib = subprocess.Popen( + ["nm", "-l", "-g", "--defined-only", "-f", "sysv", libfile], + stdout=subprocess.PIPE, + ) + out = nmlib.communicate()[0].decode("US-ASCII") for row in parse_nm_output(out): visible_syms.add(row.name) - nm = subprocess.Popen(['nm', '-l', '-f', 'sysv'] + files, stdout = subprocess.PIPE) - out = nm.communicate()[0].decode('US-ASCII') + nm = subprocess.Popen( + ["nm", "-l", "-f", "sysv"] + files, stdout=subprocess.PIPE + ) + out = nm.communicate()[0].decode("US-ASCII") for row in parse_nm_output(out): row.visible = row.name in visible_syms @@ -249,95 +296,111 @@ def write_html_report(syms): try: import jinja2 except ImportError: - sys.stderr.write('jinja2 could not be imported, not writing HTML report!\n') + sys.stderr.write("jinja2 could not be imported, not writing HTML report!\n") return self_path = os.path.dirname(os.path.abspath(__file__)) jenv = jinja2.Environment(loader=jinja2.FileSystemLoader(self_path)) - template = jenv.get_template('symalyzer.html') + template = jenv.get_template("symalyzer.html") dirgroups = {} for fn, reports in syms.report.items(): - dirname, filename = fn.replace('.libs/', '').rsplit('/', 1) + dirname, filename = fn.replace(".libs/", "").rsplit("/", 1) dirgroups.setdefault(dirname, {})[fn] = reports klasses = { - 'T': 'code / plain old regular function (Text)', - 'D': 'global variable, read-write, with nonzero initializer (Data)', - 'B': 'global variable, read-write, with zero initializer (BSS)', - 'C': 'global variable, read-write, with zero initializer (Common)', - 'R': 'global variable, read-only (Rodata)', + "T": "code / plain old regular function (Text)", + "D": "global variable, read-write, with nonzero initializer (Data)", + "B": "global variable, read-write, with zero initializer (BSS)", + "C": "global variable, read-write, with zero initializer (Common)", + "R": "global variable, read-only (Rodata)", } - with open('symalyzer_report.html.tmp', 'w') as fd: - fd.write(template.render(dirgroups = dirgroups, klasses = klasses)) - os.rename('symalyzer_report.html.tmp', 'symalyzer_report.html') + with open("symalyzer_report.html.tmp", "w") as fd: + fd.write(template.render(dirgroups=dirgroups, klasses=klasses)) + os.rename("symalyzer_report.html.tmp", "symalyzer_report.html") - if not os.path.exists('jquery-3.4.1.min.js'): - url = 'https://code.jquery.com/jquery-3.4.1.min.js' + if not os.path.exists("jquery-3.4.1.min.js"): + url = "https://code.jquery.com/jquery-3.4.1.min.js" sys.stderr.write( - 'trying to grab a copy of jquery from %s\nif this fails, please get it manually (the HTML output is done.)\n' % (url)) + "trying to grab a copy of jquery from %s\nif this fails, please get it manually (the HTML output is done.)\n" + % (url) + ) import requests - r = requests.get('https://code.jquery.com/jquery-3.4.1.min.js') + + r = requests.get("https://code.jquery.com/jquery-3.4.1.min.js") if r.status_code != 200: - sys.stderr.write('failed -- please download jquery-3.4.1.min.js and put it next to the HTML report\n') + sys.stderr.write( + "failed -- please download jquery-3.4.1.min.js and put it next to the HTML report\n" + ) else: - with open('jquery-3.4.1.min.js.tmp', 'w') as fd: + with open("jquery-3.4.1.min.js.tmp", "w") as fd: fd.write(r.text) - os.rename('jquery-3.4.1.min.js.tmp', 'jquery-3.4.1.min.js') - sys.stderr.write('done.\n') + os.rename("jquery-3.4.1.min.js.tmp", "jquery-3.4.1.min.js") + sys.stderr.write("done.\n") + def automake_escape(s): - return s.replace('.', '_').replace('/', '_') + return s.replace(".", "_").replace("/", "_") -if __name__ == '__main__': + +if __name__ == "__main__": mv = MakeVars() - if not (os.path.exists('config.version') and os.path.exists('lib/.libs/libfrr.so')): - sys.stderr.write('please execute this script in the root directory of an FRR build tree\n') - sys.stderr.write('./configure && make need to have completed successfully\n') + if not (os.path.exists("config.version") and os.path.exists("lib/.libs/libfrr.so")): + sys.stderr.write( + "please execute this script in the root directory of an FRR build tree\n" + ) + sys.stderr.write("./configure && make need to have completed successfully\n") sys.exit(1) - amtargets = ['bin_PROGRAMS', 'sbin_PROGRAMS', 'lib_LTLIBRARIES', 'module_LTLIBRARIES'] + amtargets = [ + "bin_PROGRAMS", + "sbin_PROGRAMS", + "lib_LTLIBRARIES", + "module_LTLIBRARIES", + ] targets = [] mv.getvars(amtargets) for amtarget in amtargets: - targets.extend([item for item in mv[amtarget].strip().split() if item != 'tools/ssd']) + targets.extend( + [item for item in mv[amtarget].strip().split() if item != "tools/ssd"] + ) - mv.getvars(['%s_LDADD' % automake_escape(t) for t in targets]) + mv.getvars(["%s_LDADD" % automake_escape(t) for t in targets]) ldobjs = targets[:] for t in targets: - ldadd = mv['%s_LDADD' % automake_escape(t)].strip().split() + ldadd = mv["%s_LDADD" % automake_escape(t)].strip().split() for item in ldadd: - if item.startswith('-'): + if item.startswith("-"): continue - if item.endswith('.a'): + if item.endswith(".a"): ldobjs.append(item) - mv.getvars(['%s_OBJECTS' % automake_escape(o) for o in ldobjs]) + mv.getvars(["%s_OBJECTS" % automake_escape(o) for o in ldobjs]) syms = Symbols() for t in targets: - objs = mv['%s_OBJECTS' % automake_escape(t)].strip().split() - ldadd = mv['%s_LDADD' % automake_escape(t)].strip().split() + objs = mv["%s_OBJECTS" % automake_escape(t)].strip().split() + ldadd = mv["%s_LDADD" % automake_escape(t)].strip().split() for item in ldadd: - if item.startswith('-'): + if item.startswith("-"): continue - if item.endswith('.a'): - objs.extend(mv['%s_OBJECTS' % automake_escape(item)].strip().split()) + if item.endswith(".a"): + objs.extend(mv["%s_OBJECTS" % automake_escape(item)].strip().split()) - sys.stderr.write('processing %s...\n' % t) + sys.stderr.write("processing %s...\n" % t) sys.stderr.flush() - #print(t, '\n\t', objs) + # print(t, '\n\t', objs) syms.load(t, objs) syms.evaluate() for obj, reports in sorted(syms.report.items()): - print('%s:' % obj) + print("%s:" % obj) for report in reports: - print('\t%r' % report) + print("\t%r" % report) write_html_report(syms) diff --git a/yang/embedmodel.py b/yang/embedmodel.py index 0a25c93da7..39bf2bb922 100644 --- a/yang/embedmodel.py +++ b/yang/embedmodel.py @@ -19,13 +19,13 @@ if not os.path.isdir(outdir): # or python-yang. Cross-compiling FRR is already somewhat involved, no need # to make it even harder. -re_name = re.compile(r'\bmodule\s+([^\s]+)\s+\{') -re_subname = re.compile(r'\bsubmodule\s+([^\s]+)\s+\{') -re_mainname = re.compile(r'\bbelongs-to\s+([^\s]+)\s+\{') -re_rev = re.compile(r'\brevision\s+([\d-]+)\s+\{') +re_name = re.compile(r"\bmodule\s+([^\s]+)\s+\{") +re_subname = re.compile(r"\bsubmodule\s+([^\s]+)\s+\{") +re_mainname = re.compile(r"\bbelongs-to\s+([^\s]+)\s+\{") +re_rev = re.compile(r"\brevision\s+([\d-]+)\s+\{") -template = '''/* autogenerated by embedmodel.py. DO NOT EDIT */ +template = """/* autogenerated by embedmodel.py. DO NOT EDIT */ #include #include "yang.h" @@ -47,23 +47,28 @@ static void embed_register(void) { \tyang_module_embed(&embed); } -''' +""" + +passchars = set(string.printable) - set("\\'\"%\r\n\t\x0b\x0c") + -passchars = set(string.printable) - set('\\\'"%\r\n\t\x0b\x0c') def escapech(char): if char in passchars: return char - if char == '\n': - return '\\n' - if char == '\t': - return '\\t' - if char in '"\\\'': - return '\\' + char - return '\\x%02x' % (ord(char)) -def escape(line): - return ''.join([escapech(i) for i in line]) + if char == "\n": + return "\\n" + if char == "\t": + return "\\t" + if char in "\"\\'": + return "\\" + char + return "\\x%02x" % (ord(char)) -with open(inname, 'r') as fd: + +def escape(line): + return "".join([escapech(i) for i in line]) + + +with open(inname, "r") as fd: data = fd.read() sub_name = "" @@ -72,29 +77,33 @@ sub_rev = "" # XML support isn't actively used currently, but it's here in case the need # arises. It does avoid the regex'ing. -if '