mirror of
https://git.proxmox.com/git/mirror_frr
synced 2025-04-28 19:10:16 +00:00
*: reformat python files
We are now using black. Signed-off-by: Quentin Young <qlyoung@nvidia.com>
This commit is contained in:
parent
bd407b54d2
commit
701a01920e
@ -21,48 +21,48 @@ from sphinx.highlighting import lexers
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
needs_sphinx = '1.0'
|
||||
needs_sphinx = "1.0"
|
||||
|
||||
# prolog for various variable substitutions
|
||||
rst_prolog = ''
|
||||
rst_prolog = ""
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = ['sphinx.ext.todo', 'sphinx.ext.graphviz']
|
||||
extensions = ["sphinx.ext.todo", "sphinx.ext.graphviz"]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
templates_path = ["_templates"]
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
# source_suffix = ['.rst']
|
||||
source_suffix = '.rst'
|
||||
source_suffix = ".rst"
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
# source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
master_doc = "index"
|
||||
|
||||
# General information about the project.
|
||||
project = u'FRR'
|
||||
copyright = u'2017, FRR'
|
||||
author = u'FRR authors'
|
||||
project = u"FRR"
|
||||
copyright = u"2017, FRR"
|
||||
author = u"FRR authors"
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
|
||||
# The short X.Y version.
|
||||
version = u'?.?'
|
||||
version = u"?.?"
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = u'?.?-?'
|
||||
release = u"?.?-?"
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
@ -72,48 +72,49 @@ release = u'?.?-?'
|
||||
# Various installation prefixes. Values are extracted from config.status.
|
||||
# Reasonable defaults are set in case that file does not exist.
|
||||
replace_vars = {
|
||||
'AUTHORS': author,
|
||||
'COPYRIGHT_YEAR': '1999-2005',
|
||||
'COPYRIGHT_STR': 'Copyright (c) 1999-2005',
|
||||
'PACKAGE_NAME': project.lower(),
|
||||
'PACKAGE_TARNAME': project.lower(),
|
||||
'PACKAGE_STRING': project.lower() + ' latest',
|
||||
'PACKAGE_URL': 'https://frrouting.org/',
|
||||
'PACKAGE_VERSION': 'latest',
|
||||
'INSTALL_PREFIX_ETC': '/etc/frr',
|
||||
'INSTALL_PREFIX_SBIN': '/usr/lib/frr',
|
||||
'INSTALL_PREFIX_STATE': '/var/run/frr',
|
||||
'INSTALL_PREFIX_MODULES': '/usr/lib/frr/modules',
|
||||
'INSTALL_USER': 'frr',
|
||||
'INSTALL_GROUP': 'frr',
|
||||
'INSTALL_VTY_GROUP': 'frrvty',
|
||||
'GROUP': 'frr',
|
||||
'USER': 'frr',
|
||||
"AUTHORS": author,
|
||||
"COPYRIGHT_YEAR": "1999-2005",
|
||||
"COPYRIGHT_STR": "Copyright (c) 1999-2005",
|
||||
"PACKAGE_NAME": project.lower(),
|
||||
"PACKAGE_TARNAME": project.lower(),
|
||||
"PACKAGE_STRING": project.lower() + " latest",
|
||||
"PACKAGE_URL": "https://frrouting.org/",
|
||||
"PACKAGE_VERSION": "latest",
|
||||
"INSTALL_PREFIX_ETC": "/etc/frr",
|
||||
"INSTALL_PREFIX_SBIN": "/usr/lib/frr",
|
||||
"INSTALL_PREFIX_STATE": "/var/run/frr",
|
||||
"INSTALL_PREFIX_MODULES": "/usr/lib/frr/modules",
|
||||
"INSTALL_USER": "frr",
|
||||
"INSTALL_GROUP": "frr",
|
||||
"INSTALL_VTY_GROUP": "frrvty",
|
||||
"GROUP": "frr",
|
||||
"USER": "frr",
|
||||
}
|
||||
|
||||
# extract version information, installation location, other stuff we need to
|
||||
# use when building final documents
|
||||
val = re.compile('^S\["([^"]+)"\]="(.*)"$')
|
||||
try:
|
||||
with open('../../config.status', 'r') as cfgstatus:
|
||||
with open("../../config.status", "r") as cfgstatus:
|
||||
for ln in cfgstatus.readlines():
|
||||
m = val.match(ln)
|
||||
if not m or m.group(1) not in replace_vars.keys(): continue
|
||||
if not m or m.group(1) not in replace_vars.keys():
|
||||
continue
|
||||
replace_vars[m.group(1)] = m.group(2)
|
||||
except IOError:
|
||||
# if config.status doesn't exist, just ignore it
|
||||
pass
|
||||
|
||||
# manually fill out some of these we can't get from config.status
|
||||
replace_vars['COPYRIGHT_STR'] = "Copyright (c)"
|
||||
replace_vars['COPYRIGHT_STR'] += ' {0}'.format(replace_vars['COPYRIGHT_YEAR'])
|
||||
replace_vars['COPYRIGHT_STR'] += ' {0}'.format(replace_vars['AUTHORS'])
|
||||
release = replace_vars['PACKAGE_VERSION']
|
||||
version = release.split('-')[0]
|
||||
replace_vars["COPYRIGHT_STR"] = "Copyright (c)"
|
||||
replace_vars["COPYRIGHT_STR"] += " {0}".format(replace_vars["COPYRIGHT_YEAR"])
|
||||
replace_vars["COPYRIGHT_STR"] += " {0}".format(replace_vars["AUTHORS"])
|
||||
release = replace_vars["PACKAGE_VERSION"]
|
||||
version = release.split("-")[0]
|
||||
|
||||
# add substitutions to prolog
|
||||
for key, value in replace_vars.items():
|
||||
rst_prolog += '.. |{0}| replace:: {1}\n'.format(key, value)
|
||||
rst_prolog += ".. |{0}| replace:: {1}\n".format(key, value)
|
||||
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
@ -125,37 +126,42 @@ language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
# today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build', 'building-libyang.rst', 'topotests-snippets.rst', 'include-compile.rst']
|
||||
exclude_patterns = [
|
||||
"_build",
|
||||
"building-libyang.rst",
|
||||
"topotests-snippets.rst",
|
||||
"include-compile.rst",
|
||||
]
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
#default_role = None
|
||||
# default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
# add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
# add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
# show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
pygments_style = "sphinx"
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
# modindex_common_prefix = []
|
||||
|
||||
# If true, keep warnings as "system message" paragraphs in the built documents.
|
||||
#keep_warnings = False
|
||||
# keep_warnings = False
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = True
|
||||
@ -165,165 +171,158 @@ todo_include_todos = True
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'default'
|
||||
html_theme = "default"
|
||||
|
||||
try:
|
||||
import sphinx_rtd_theme
|
||||
|
||||
html_theme = 'sphinx_rtd_theme'
|
||||
html_theme = "sphinx_rtd_theme"
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {
|
||||
# html_theme_options = {
|
||||
# 'sidebarbgcolor': '#374249'
|
||||
#}
|
||||
# }
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
# html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
# html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
# html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
html_logo = '../figures/frr-icon.svg'
|
||||
html_logo = "../figures/frr-icon.svg"
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
html_favicon = '../figures/frr-logo-icon.png'
|
||||
html_favicon = "../figures/frr-logo-icon.png"
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
html_static_path = ["_static"]
|
||||
|
||||
# Add any extra paths that contain custom files (such as robots.txt or
|
||||
# .htaccess) here, relative to this directory. These files are copied
|
||||
# directly to the root of the documentation.
|
||||
#html_extra_path = []
|
||||
# html_extra_path = []
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
# html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
# html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
# html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
# html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_domain_indices = True
|
||||
# html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
# html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
# html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
# html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#html_show_sphinx = True
|
||||
# html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
#html_show_copyright = True
|
||||
# html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
# html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
# html_file_suffix = None
|
||||
|
||||
# Language to be used for generating the HTML full-text search index.
|
||||
# Sphinx supports the following languages:
|
||||
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
|
||||
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
|
||||
#html_search_language = 'en'
|
||||
# html_search_language = 'en'
|
||||
|
||||
# A dictionary with options for the search language support, empty by default.
|
||||
# Now only 'ja' uses this config value
|
||||
#html_search_options = {'type': 'default'}
|
||||
# html_search_options = {'type': 'default'}
|
||||
|
||||
# The name of a javascript file (relative to the configuration directory) that
|
||||
# implements a search results scorer. If empty, the default will be used.
|
||||
#html_search_scorer = 'scorer.js'
|
||||
# html_search_scorer = 'scorer.js'
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'FRRdoc'
|
||||
htmlhelp_basename = "FRRdoc"
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#'figure_align': 'htbp',
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
# Latex figure (float) alignment
|
||||
#'figure_align': 'htbp',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
(master_doc, 'FRR.tex', u"FRR Developer's Manual",
|
||||
u'FRR', 'manual'),
|
||||
(master_doc, "FRR.tex", u"FRR Developer's Manual", u"FRR", "manual"),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
latex_logo = '../figures/frr-logo-medium.png'
|
||||
latex_logo = "../figures/frr-logo-medium.png"
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
# latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
# latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
# latex_show_urls = False
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
# latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_domain_indices = True
|
||||
# latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
(master_doc, 'frr', u"FRR Developer's Manual",
|
||||
[author], 1)
|
||||
]
|
||||
man_pages = [(master_doc, "frr", u"FRR Developer's Manual", [author], 1)]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#man_show_urls = False
|
||||
# man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
@ -332,38 +331,44 @@ man_pages = [
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, 'frr', u"FRR Developer's Manual",
|
||||
author, 'FRR', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
(
|
||||
master_doc,
|
||||
"frr",
|
||||
u"FRR Developer's Manual",
|
||||
author,
|
||||
"FRR",
|
||||
"One line description of project.",
|
||||
"Miscellaneous",
|
||||
),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#texinfo_appendices = []
|
||||
# texinfo_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#texinfo_domain_indices = True
|
||||
# texinfo_domain_indices = True
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
#texinfo_show_urls = 'footnote'
|
||||
# texinfo_show_urls = 'footnote'
|
||||
|
||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||
#texinfo_no_detailmenu = False
|
||||
# texinfo_no_detailmenu = False
|
||||
|
||||
# contents of ../extra/frrlexer.py.
|
||||
# This is read here to support VPATH build. Since this section is execfile()'d
|
||||
# with the file location, we can safely use a relative path here to save the
|
||||
# contents of the lexer file for later use even if our relative path changes
|
||||
# due to VPATH.
|
||||
with open('../extra/frrlexer.py', 'rb') as lex:
|
||||
with open("../extra/frrlexer.py", "rb") as lex:
|
||||
frrlexerpy = lex.read()
|
||||
|
||||
# custom extensions here
|
||||
def setup(app):
|
||||
# object type for FRR CLI commands, can be extended to document parent CLI
|
||||
# node later on
|
||||
app.add_object_type('clicmd', 'clicmd')
|
||||
app.add_object_type("clicmd", "clicmd")
|
||||
# css overrides for HTML theme
|
||||
app.add_stylesheet('overrides.css')
|
||||
app.add_stylesheet("overrides.css")
|
||||
# load Pygments lexer for FRR config syntax
|
||||
#
|
||||
# NB: in Pygments 2.2+ this can be done with `load_lexer_from_file`, but we
|
||||
@ -373,4 +378,4 @@ def setup(app):
|
||||
# frrlexer = pygments.lexers.load_lexer_from_file('../extra/frrlexer.py', lexername="FRRLexer")
|
||||
custom_namespace = {}
|
||||
exec(frrlexerpy, custom_namespace)
|
||||
lexers['frr'] = custom_namespace['FRRLexer']()
|
||||
lexers["frr"] = custom_namespace["FRRLexer"]()
|
||||
|
@ -22,17 +22,18 @@ class FRRLexer(RegexLexer):
|
||||
name = "frr"
|
||||
aliases = ["frr"]
|
||||
tokens = {
|
||||
'root': [
|
||||
(r'^[ \t]*!.*?\n', Comment.Singleline),
|
||||
"root": [
|
||||
(r"^[ \t]*!.*?\n", Comment.Singleline),
|
||||
(r'"(\\\\|\\"|[^"])*"', String.Double),
|
||||
(r'[a-f0-9]*:[a-f0-9]*:[a-f0-9:]*(:\d+\.\d+\.\d+\.\d+)?(/\d+)?',
|
||||
Number), # IPv6
|
||||
(r'\d+\.\d+\.\d+\.\d+(/\d+)?', Number), # IPv4
|
||||
(r'^([ \t]*)(no[ \t]+)?([-\w]+)',
|
||||
bygroups(Text, Keyword, Name.Function)),
|
||||
(r'[ \t]+', Text),
|
||||
(r'\n', Text),
|
||||
(r'\d+', Number),
|
||||
(r'\S+', Text),
|
||||
(
|
||||
r"[a-f0-9]*:[a-f0-9]*:[a-f0-9:]*(:\d+\.\d+\.\d+\.\d+)?(/\d+)?",
|
||||
Number,
|
||||
), # IPv6
|
||||
(r"\d+\.\d+\.\d+\.\d+(/\d+)?", Number), # IPv4
|
||||
(r"^([ \t]*)(no[ \t]+)?([-\w]+)", bygroups(Text, Keyword, Name.Function)),
|
||||
(r"[ \t]+", Text),
|
||||
(r"\n", Text),
|
||||
(r"\d+", Number),
|
||||
(r"\S+", Text),
|
||||
],
|
||||
}
|
||||
|
@ -19,48 +19,48 @@ import re
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
needs_sphinx = '1.0'
|
||||
needs_sphinx = "1.0"
|
||||
|
||||
# prolog for various variable substitutions
|
||||
rst_prolog = ''
|
||||
rst_prolog = ""
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = ['sphinx.ext.todo']
|
||||
extensions = ["sphinx.ext.todo"]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
templates_path = ["_templates"]
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
# source_suffix = ['.rst']
|
||||
source_suffix = '.rst'
|
||||
source_suffix = ".rst"
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
# source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
master_doc = "index"
|
||||
|
||||
# General information about the project.
|
||||
project = u'FRR'
|
||||
copyright = u'2017, FRR'
|
||||
author = u'FRR authors'
|
||||
project = u"FRR"
|
||||
copyright = u"2017, FRR"
|
||||
author = u"FRR authors"
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
|
||||
# The short X.Y version.
|
||||
version = u'?.?'
|
||||
version = u"?.?"
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = u'?.?-?'
|
||||
release = u"?.?-?"
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
@ -70,48 +70,49 @@ release = u'?.?-?'
|
||||
# Various installation prefixes. Values are extracted from config.status.
|
||||
# Reasonable defaults are set in case that file does not exist.
|
||||
replace_vars = {
|
||||
'AUTHORS': author,
|
||||
'COPYRIGHT_YEAR': '1999-2005',
|
||||
'COPYRIGHT_STR': 'Copyright (c) 1999-2005',
|
||||
'PACKAGE_NAME': project.lower(),
|
||||
'PACKAGE_TARNAME': project.lower(),
|
||||
'PACKAGE_STRING': project.lower() + ' latest',
|
||||
'PACKAGE_URL': 'https://frrouting.org/',
|
||||
'PACKAGE_VERSION': 'latest',
|
||||
'INSTALL_PREFIX_ETC': '/etc/frr',
|
||||
'INSTALL_PREFIX_SBIN': '/usr/lib/frr',
|
||||
'INSTALL_PREFIX_STATE': '/var/run/frr',
|
||||
'INSTALL_PREFIX_MODULES': '/usr/lib/frr/modules',
|
||||
'INSTALL_USER': 'frr',
|
||||
'INSTALL_GROUP': 'frr',
|
||||
'INSTALL_VTY_GROUP': 'frrvty',
|
||||
'GROUP': 'frr',
|
||||
'USER': 'frr',
|
||||
"AUTHORS": author,
|
||||
"COPYRIGHT_YEAR": "1999-2005",
|
||||
"COPYRIGHT_STR": "Copyright (c) 1999-2005",
|
||||
"PACKAGE_NAME": project.lower(),
|
||||
"PACKAGE_TARNAME": project.lower(),
|
||||
"PACKAGE_STRING": project.lower() + " latest",
|
||||
"PACKAGE_URL": "https://frrouting.org/",
|
||||
"PACKAGE_VERSION": "latest",
|
||||
"INSTALL_PREFIX_ETC": "/etc/frr",
|
||||
"INSTALL_PREFIX_SBIN": "/usr/lib/frr",
|
||||
"INSTALL_PREFIX_STATE": "/var/run/frr",
|
||||
"INSTALL_PREFIX_MODULES": "/usr/lib/frr/modules",
|
||||
"INSTALL_USER": "frr",
|
||||
"INSTALL_GROUP": "frr",
|
||||
"INSTALL_VTY_GROUP": "frrvty",
|
||||
"GROUP": "frr",
|
||||
"USER": "frr",
|
||||
}
|
||||
|
||||
# extract version information, installation location, other stuff we need to
|
||||
# use when building final documents
|
||||
val = re.compile('^S\["([^"]+)"\]="(.*)"$')
|
||||
try:
|
||||
with open('../../config.status', 'r') as cfgstatus:
|
||||
with open("../../config.status", "r") as cfgstatus:
|
||||
for ln in cfgstatus.readlines():
|
||||
m = val.match(ln)
|
||||
if not m or m.group(1) not in replace_vars.keys(): continue
|
||||
if not m or m.group(1) not in replace_vars.keys():
|
||||
continue
|
||||
replace_vars[m.group(1)] = m.group(2)
|
||||
except IOError:
|
||||
# if config.status doesn't exist, just ignore it
|
||||
pass
|
||||
|
||||
# manually fill out some of these we can't get from config.status
|
||||
replace_vars['COPYRIGHT_STR'] = "Copyright (c)"
|
||||
replace_vars['COPYRIGHT_STR'] += ' {0}'.format(replace_vars['COPYRIGHT_YEAR'])
|
||||
replace_vars['COPYRIGHT_STR'] += ' {0}'.format(replace_vars['AUTHORS'])
|
||||
release = replace_vars['PACKAGE_VERSION']
|
||||
version = release.split('-')[0]
|
||||
replace_vars["COPYRIGHT_STR"] = "Copyright (c)"
|
||||
replace_vars["COPYRIGHT_STR"] += " {0}".format(replace_vars["COPYRIGHT_YEAR"])
|
||||
replace_vars["COPYRIGHT_STR"] += " {0}".format(replace_vars["AUTHORS"])
|
||||
release = replace_vars["PACKAGE_VERSION"]
|
||||
version = release.split("-")[0]
|
||||
|
||||
# add substitutions to prolog
|
||||
for key, value in replace_vars.items():
|
||||
rst_prolog += '.. |{0}| replace:: {1}\n'.format(key, value)
|
||||
rst_prolog += ".. |{0}| replace:: {1}\n".format(key, value)
|
||||
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
@ -123,37 +124,43 @@ language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
# today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build', 'common-options.rst', 'epilogue.rst', 'defines.rst', 'bfd-options.rst']
|
||||
exclude_patterns = [
|
||||
"_build",
|
||||
"common-options.rst",
|
||||
"epilogue.rst",
|
||||
"defines.rst",
|
||||
"bfd-options.rst",
|
||||
]
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
#default_role = None
|
||||
# default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
# add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
# add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
# show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
pygments_style = "sphinx"
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
# modindex_common_prefix = []
|
||||
|
||||
# If true, keep warnings as "system message" paragraphs in the built documents.
|
||||
#keep_warnings = False
|
||||
# keep_warnings = False
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = True
|
||||
@ -163,31 +170,31 @@ todo_include_todos = True
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'default'
|
||||
html_theme = "default"
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
# html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
# html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
# html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
# html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
# html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#html_favicon = None
|
||||
# html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
@ -197,109 +204,105 @@ html_static_path = []
|
||||
# Add any extra paths that contain custom files (such as robots.txt or
|
||||
# .htaccess) here, relative to this directory. These files are copied
|
||||
# directly to the root of the documentation.
|
||||
#html_extra_path = []
|
||||
# html_extra_path = []
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
# html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
# html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
# html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
# html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_domain_indices = True
|
||||
# html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
# html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
# html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
# html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#html_show_sphinx = True
|
||||
# html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
#html_show_copyright = True
|
||||
# html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
# html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
# html_file_suffix = None
|
||||
|
||||
# Language to be used for generating the HTML full-text search index.
|
||||
# Sphinx supports the following languages:
|
||||
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
|
||||
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
|
||||
#html_search_language = 'en'
|
||||
# html_search_language = 'en'
|
||||
|
||||
# A dictionary with options for the search language support, empty by default.
|
||||
# Now only 'ja' uses this config value
|
||||
#html_search_options = {'type': 'default'}
|
||||
# html_search_options = {'type': 'default'}
|
||||
|
||||
# The name of a javascript file (relative to the configuration directory) that
|
||||
# implements a search results scorer. If empty, the default will be used.
|
||||
#html_search_scorer = 'scorer.js'
|
||||
# html_search_scorer = 'scorer.js'
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'FRRdoc'
|
||||
htmlhelp_basename = "FRRdoc"
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#'figure_align': 'htbp',
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
# Latex figure (float) alignment
|
||||
#'figure_align': 'htbp',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
(master_doc, 'FRR.tex', u'FRR User Manual',
|
||||
u'FRR', 'manual'),
|
||||
(master_doc, "FRR.tex", u"FRR User Manual", u"FRR", "manual"),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#latex_logo = None
|
||||
# latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
# latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
# latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
# latex_show_urls = False
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
# latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_domain_indices = True
|
||||
# latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
@ -308,33 +311,45 @@ latex_documents = [
|
||||
# (source start file, name, description, authors, manual section).
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#man_show_urls = False
|
||||
# man_show_urls = False
|
||||
|
||||
fwfrr = "{0} routing engine for use with FRRouting."
|
||||
|
||||
man_pages = [
|
||||
('frr-bfdd', 'frr-bfdd', fwfrr.format("a bfd"), [], 8),
|
||||
('frr-bgpd', 'frr-bgpd', fwfrr.format("a BGPv4, BGPv4+, BGPv4-"), [], 8),
|
||||
('frr-eigrpd', 'frr-eigrpd', fwfrr.format("an EIGRP"), [], 8),
|
||||
('frr-fabricd', 'frr-fabricd', fwfrr.format("an OpenFabric"), [], 8),
|
||||
('frr-isisd', 'frr-isisd', fwfrr.format("an IS-IS"), [], 8),
|
||||
('frr-ldpd', 'frr-ldpd', fwfrr.format("an LDP"), [], 8),
|
||||
('frr-nhrpd', 'frr-nhrpd', fwfrr.format("a Next Hop Routing Protocol"), [], 8),
|
||||
('frr-ospf6d', 'frr-ospf6d', fwfrr.format("an OSPFv3"), [], 8),
|
||||
('frr-ospfclient', 'frr-ospfclient', 'an example ospf-api client', [], 8),
|
||||
('frr-ospfd', 'frr-ospfd', fwfrr.format("an OSPFv2"), [], 8),
|
||||
('frr-pbrd', 'frr-pbrd', fwfrr.format("a PBR"), [], 8),
|
||||
('frr-pimd', 'frr-pimd', fwfrr.format("a PIM"), [], 8),
|
||||
('frr-ripd', 'frr-ripd', fwfrr.format("a RIP"), [], 8),
|
||||
('frr-ripngd', 'frr-ripngd', fwfrr.format("a RIPNG"), [], 8),
|
||||
('frr-sharpd', 'frr-sharpd', fwfrr.format("a SHARP"), [], 8),
|
||||
('frr-staticd', 'frr-staticd', fwfrr.format("a static route manager"), [], 8),
|
||||
('frr-vrrpd', 'frr-vrrpd', fwfrr.format("a VRRP"), [], 8),
|
||||
('frr-watchfrr', 'frr-watchfrr', 'a program to monitor the status of FRRouting daemons', [], 8),
|
||||
('frr-zebra', 'frr-zebra', 'a routing manager for use with associated FRRouting components.', [], 8),
|
||||
('frr', 'frr', 'a systemd interaction script', [], 1),
|
||||
('mtracebis', 'mtracebis', "a multicast trace client", [], 8),
|
||||
('vtysh', 'vtysh', 'an integrated shell for FRRouting.', [], 1),
|
||||
("frr-bfdd", "frr-bfdd", fwfrr.format("a bfd"), [], 8),
|
||||
("frr-bgpd", "frr-bgpd", fwfrr.format("a BGPv4, BGPv4+, BGPv4-"), [], 8),
|
||||
("frr-eigrpd", "frr-eigrpd", fwfrr.format("an EIGRP"), [], 8),
|
||||
("frr-fabricd", "frr-fabricd", fwfrr.format("an OpenFabric"), [], 8),
|
||||
("frr-isisd", "frr-isisd", fwfrr.format("an IS-IS"), [], 8),
|
||||
("frr-ldpd", "frr-ldpd", fwfrr.format("an LDP"), [], 8),
|
||||
("frr-nhrpd", "frr-nhrpd", fwfrr.format("a Next Hop Routing Protocol"), [], 8),
|
||||
("frr-ospf6d", "frr-ospf6d", fwfrr.format("an OSPFv3"), [], 8),
|
||||
("frr-ospfclient", "frr-ospfclient", "an example ospf-api client", [], 8),
|
||||
("frr-ospfd", "frr-ospfd", fwfrr.format("an OSPFv2"), [], 8),
|
||||
("frr-pbrd", "frr-pbrd", fwfrr.format("a PBR"), [], 8),
|
||||
("frr-pimd", "frr-pimd", fwfrr.format("a PIM"), [], 8),
|
||||
("frr-ripd", "frr-ripd", fwfrr.format("a RIP"), [], 8),
|
||||
("frr-ripngd", "frr-ripngd", fwfrr.format("a RIPNG"), [], 8),
|
||||
("frr-sharpd", "frr-sharpd", fwfrr.format("a SHARP"), [], 8),
|
||||
("frr-staticd", "frr-staticd", fwfrr.format("a static route manager"), [], 8),
|
||||
("frr-vrrpd", "frr-vrrpd", fwfrr.format("a VRRP"), [], 8),
|
||||
(
|
||||
"frr-watchfrr",
|
||||
"frr-watchfrr",
|
||||
"a program to monitor the status of FRRouting daemons",
|
||||
[],
|
||||
8,
|
||||
),
|
||||
(
|
||||
"frr-zebra",
|
||||
"frr-zebra",
|
||||
"a routing manager for use with associated FRRouting components.",
|
||||
[],
|
||||
8,
|
||||
),
|
||||
("frr", "frr", "a systemd interaction script", [], 1),
|
||||
("mtracebis", "mtracebis", "a multicast trace client", [], 8),
|
||||
("vtysh", "vtysh", "an integrated shell for FRRouting.", [], 1),
|
||||
]
|
||||
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
@ -344,15 +359,15 @@ man_pages = [
|
||||
# dir menu entry, description, category)
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#texinfo_appendices = []
|
||||
# texinfo_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#texinfo_domain_indices = True
|
||||
# texinfo_domain_indices = True
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
#texinfo_show_urls = 'footnote'
|
||||
# texinfo_show_urls = 'footnote'
|
||||
|
||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||
#texinfo_no_detailmenu = False
|
||||
# texinfo_no_detailmenu = False
|
||||
|
||||
# custom extensions here
|
||||
|
241
doc/user/conf.py
241
doc/user/conf.py
@ -22,48 +22,48 @@ from sphinx.highlighting import lexers
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
needs_sphinx = '1.0'
|
||||
needs_sphinx = "1.0"
|
||||
|
||||
# prolog for various variable substitutions
|
||||
rst_prolog = ''
|
||||
rst_prolog = ""
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = ['sphinx.ext.todo']
|
||||
extensions = ["sphinx.ext.todo"]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
templates_path = ["_templates"]
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
# source_suffix = ['.rst']
|
||||
source_suffix = '.rst'
|
||||
source_suffix = ".rst"
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
# source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
master_doc = "index"
|
||||
|
||||
# General information about the project.
|
||||
project = u'FRR'
|
||||
copyright = u'2017, FRR'
|
||||
author = u'FRR authors'
|
||||
project = u"FRR"
|
||||
copyright = u"2017, FRR"
|
||||
author = u"FRR authors"
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
|
||||
# The short X.Y version.
|
||||
version = u'?.?'
|
||||
version = u"?.?"
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = u'?.?-?'
|
||||
release = u"?.?-?"
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
@ -73,48 +73,49 @@ release = u'?.?-?'
|
||||
# Various installation prefixes. Values are extracted from config.status.
|
||||
# Reasonable defaults are set in case that file does not exist.
|
||||
replace_vars = {
|
||||
'AUTHORS': author,
|
||||
'COPYRIGHT_YEAR': '1999-2005',
|
||||
'COPYRIGHT_STR': 'Copyright (c) 1999-2005',
|
||||
'PACKAGE_NAME': project.lower(),
|
||||
'PACKAGE_TARNAME': project.lower(),
|
||||
'PACKAGE_STRING': project.lower() + ' latest',
|
||||
'PACKAGE_URL': 'https://frrouting.org/',
|
||||
'PACKAGE_VERSION': 'latest',
|
||||
'INSTALL_PREFIX_ETC': '/etc/frr',
|
||||
'INSTALL_PREFIX_SBIN': '/usr/lib/frr',
|
||||
'INSTALL_PREFIX_STATE': '/var/run/frr',
|
||||
'INSTALL_PREFIX_MODULES': '/usr/lib/frr/modules',
|
||||
'INSTALL_USER': 'frr',
|
||||
'INSTALL_GROUP': 'frr',
|
||||
'INSTALL_VTY_GROUP': 'frrvty',
|
||||
'GROUP': 'frr',
|
||||
'USER': 'frr',
|
||||
"AUTHORS": author,
|
||||
"COPYRIGHT_YEAR": "1999-2005",
|
||||
"COPYRIGHT_STR": "Copyright (c) 1999-2005",
|
||||
"PACKAGE_NAME": project.lower(),
|
||||
"PACKAGE_TARNAME": project.lower(),
|
||||
"PACKAGE_STRING": project.lower() + " latest",
|
||||
"PACKAGE_URL": "https://frrouting.org/",
|
||||
"PACKAGE_VERSION": "latest",
|
||||
"INSTALL_PREFIX_ETC": "/etc/frr",
|
||||
"INSTALL_PREFIX_SBIN": "/usr/lib/frr",
|
||||
"INSTALL_PREFIX_STATE": "/var/run/frr",
|
||||
"INSTALL_PREFIX_MODULES": "/usr/lib/frr/modules",
|
||||
"INSTALL_USER": "frr",
|
||||
"INSTALL_GROUP": "frr",
|
||||
"INSTALL_VTY_GROUP": "frrvty",
|
||||
"GROUP": "frr",
|
||||
"USER": "frr",
|
||||
}
|
||||
|
||||
# extract version information, installation location, other stuff we need to
|
||||
# use when building final documents
|
||||
val = re.compile('^S\["([^"]+)"\]="(.*)"$')
|
||||
try:
|
||||
with open('../../config.status', 'r') as cfgstatus:
|
||||
with open("../../config.status", "r") as cfgstatus:
|
||||
for ln in cfgstatus.readlines():
|
||||
m = val.match(ln)
|
||||
if not m or m.group(1) not in replace_vars.keys(): continue
|
||||
if not m or m.group(1) not in replace_vars.keys():
|
||||
continue
|
||||
replace_vars[m.group(1)] = m.group(2)
|
||||
except IOError:
|
||||
# if config.status doesn't exist, just ignore it
|
||||
pass
|
||||
|
||||
# manually fill out some of these we can't get from config.status
|
||||
replace_vars['COPYRIGHT_STR'] = "Copyright (c)"
|
||||
replace_vars['COPYRIGHT_STR'] += ' {0}'.format(replace_vars['COPYRIGHT_YEAR'])
|
||||
replace_vars['COPYRIGHT_STR'] += ' {0}'.format(replace_vars['AUTHORS'])
|
||||
release = replace_vars['PACKAGE_VERSION']
|
||||
version = release.split('-')[0]
|
||||
replace_vars["COPYRIGHT_STR"] = "Copyright (c)"
|
||||
replace_vars["COPYRIGHT_STR"] += " {0}".format(replace_vars["COPYRIGHT_YEAR"])
|
||||
replace_vars["COPYRIGHT_STR"] += " {0}".format(replace_vars["AUTHORS"])
|
||||
release = replace_vars["PACKAGE_VERSION"]
|
||||
version = release.split("-")[0]
|
||||
|
||||
# add substitutions to prolog
|
||||
for key, value in replace_vars.items():
|
||||
rst_prolog += '.. |{0}| replace:: {1}\n'.format(key, value)
|
||||
rst_prolog += ".. |{0}| replace:: {1}\n".format(key, value)
|
||||
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
@ -126,39 +127,45 @@ language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
# today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build', 'rpki.rst', 'routeserver.rst',
|
||||
'ospf_fundamentals.rst', 'flowspec.rst', 'snmptrap.rst',
|
||||
'wecmp_linkbw.rst']
|
||||
exclude_patterns = [
|
||||
"_build",
|
||||
"rpki.rst",
|
||||
"routeserver.rst",
|
||||
"ospf_fundamentals.rst",
|
||||
"flowspec.rst",
|
||||
"snmptrap.rst",
|
||||
"wecmp_linkbw.rst",
|
||||
]
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
#default_role = None
|
||||
# default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
# add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
# add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
# show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
pygments_style = "sphinx"
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
# modindex_common_prefix = []
|
||||
|
||||
# If true, keep warnings as "system message" paragraphs in the built documents.
|
||||
#keep_warnings = False
|
||||
# keep_warnings = False
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = True
|
||||
@ -168,165 +175,158 @@ todo_include_todos = True
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'default'
|
||||
html_theme = "default"
|
||||
|
||||
try:
|
||||
import sphinx_rtd_theme
|
||||
|
||||
html_theme = 'sphinx_rtd_theme'
|
||||
html_theme = "sphinx_rtd_theme"
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {
|
||||
# html_theme_options = {
|
||||
# 'sidebarbgcolor': '#374249'
|
||||
#}
|
||||
# }
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
# html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
# html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
# html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
html_logo = '../figures/frr-icon.svg'
|
||||
html_logo = "../figures/frr-icon.svg"
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
html_favicon = '../figures/frr-logo-icon.png'
|
||||
html_favicon = "../figures/frr-logo-icon.png"
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
html_static_path = ["_static"]
|
||||
|
||||
# Add any extra paths that contain custom files (such as robots.txt or
|
||||
# .htaccess) here, relative to this directory. These files are copied
|
||||
# directly to the root of the documentation.
|
||||
#html_extra_path = []
|
||||
# html_extra_path = []
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
# html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
# html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
# html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
# html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_domain_indices = True
|
||||
# html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
# html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
# html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
# html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#html_show_sphinx = True
|
||||
# html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
#html_show_copyright = True
|
||||
# html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
# html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
# html_file_suffix = None
|
||||
|
||||
# Language to be used for generating the HTML full-text search index.
|
||||
# Sphinx supports the following languages:
|
||||
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
|
||||
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
|
||||
#html_search_language = 'en'
|
||||
# html_search_language = 'en'
|
||||
|
||||
# A dictionary with options for the search language support, empty by default.
|
||||
# Now only 'ja' uses this config value
|
||||
#html_search_options = {'type': 'default'}
|
||||
# html_search_options = {'type': 'default'}
|
||||
|
||||
# The name of a javascript file (relative to the configuration directory) that
|
||||
# implements a search results scorer. If empty, the default will be used.
|
||||
#html_search_scorer = 'scorer.js'
|
||||
# html_search_scorer = 'scorer.js'
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'FRRdoc'
|
||||
htmlhelp_basename = "FRRdoc"
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#'figure_align': 'htbp',
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
# Latex figure (float) alignment
|
||||
#'figure_align': 'htbp',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
(master_doc, 'FRR.tex', u'FRR User Manual',
|
||||
u'FRR', 'manual'),
|
||||
(master_doc, "FRR.tex", u"FRR User Manual", u"FRR", "manual"),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
latex_logo = '../figures/frr-logo-medium.png'
|
||||
latex_logo = "../figures/frr-logo-medium.png"
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
# latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
# latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
# latex_show_urls = False
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
# latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_domain_indices = True
|
||||
# latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
(master_doc, 'frr', u'FRR User Manual',
|
||||
[author], 1)
|
||||
]
|
||||
man_pages = [(master_doc, "frr", u"FRR User Manual", [author], 1)]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#man_show_urls = False
|
||||
# man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
@ -335,29 +335,35 @@ man_pages = [
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, 'frr', u'FRR User Manual',
|
||||
author, 'FRR', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
(
|
||||
master_doc,
|
||||
"frr",
|
||||
u"FRR User Manual",
|
||||
author,
|
||||
"FRR",
|
||||
"One line description of project.",
|
||||
"Miscellaneous",
|
||||
),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#texinfo_appendices = []
|
||||
# texinfo_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#texinfo_domain_indices = True
|
||||
# texinfo_domain_indices = True
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
#texinfo_show_urls = 'footnote'
|
||||
# texinfo_show_urls = 'footnote'
|
||||
|
||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||
#texinfo_no_detailmenu = False
|
||||
# texinfo_no_detailmenu = False
|
||||
|
||||
# contents of ../extra/frrlexer.py.
|
||||
# This is read here to support VPATH build. Since this section is execfile()'d
|
||||
# with the file location, we can safely use a relative path here to save the
|
||||
# contents of the lexer file for later use even if our relative path changes
|
||||
# due to VPATH.
|
||||
with open('../extra/frrlexer.py', 'rb') as lex:
|
||||
with open("../extra/frrlexer.py", "rb") as lex:
|
||||
frrlexerpy = lex.read()
|
||||
|
||||
# Parse version string into int array
|
||||
@ -365,7 +371,7 @@ def vparse(s):
|
||||
a = []
|
||||
|
||||
for c in s:
|
||||
if c != '.':
|
||||
if c != ".":
|
||||
a.append(int(c))
|
||||
|
||||
while len(a) < 3:
|
||||
@ -373,22 +379,23 @@ def vparse(s):
|
||||
|
||||
return a[:3]
|
||||
|
||||
|
||||
# custom extensions here
|
||||
def setup(app):
|
||||
# object type for FRR CLI commands, can be extended to document parent CLI
|
||||
# node later on
|
||||
app.add_object_type('clicmd', 'clicmd')
|
||||
app.add_object_type("clicmd", "clicmd")
|
||||
|
||||
# css overrides for HTML theme
|
||||
# Note sphinx version differences
|
||||
sver = vparse(sphinx.__version__)
|
||||
|
||||
if sver < vparse('1.8.0') :
|
||||
app.add_stylesheet('overrides.css')
|
||||
app.add_javascript('overrides.js')
|
||||
if sver < vparse("1.8.0"):
|
||||
app.add_stylesheet("overrides.css")
|
||||
app.add_javascript("overrides.js")
|
||||
else:
|
||||
app.add_css_file('overrides.css')
|
||||
app.add_js_file('overrides.js')
|
||||
app.add_css_file("overrides.css")
|
||||
app.add_js_file("overrides.js")
|
||||
|
||||
# load Pygments lexer for FRR config syntax
|
||||
#
|
||||
@ -399,4 +406,4 @@ def setup(app):
|
||||
# frrlexer = pygments.lexers.load_lexer_from_file('../extra/frrlexer.py', lexername="FRRLexer")
|
||||
custom_namespace = {}
|
||||
exec(frrlexerpy, custom_namespace)
|
||||
lexers['frr'] = custom_namespace['FRRLexer']()
|
||||
lexers["frr"] = custom_namespace["FRRLexer"]()
|
||||
|
@ -20,6 +20,7 @@ import re
|
||||
import sys
|
||||
import json
|
||||
|
||||
|
||||
class FunctionNode(object):
|
||||
funcs = {}
|
||||
|
||||
@ -39,7 +40,7 @@ class FunctionNode(object):
|
||||
|
||||
def define(self, attrs):
|
||||
self.defined = True
|
||||
self.defs.append((attrs['filename'], attrs['line']))
|
||||
self.defs.append((attrs["filename"], attrs["line"]))
|
||||
return self
|
||||
|
||||
def add_call(self, called, attrs):
|
||||
@ -63,11 +64,12 @@ class FunctionNode(object):
|
||||
return cls.funcs[name]
|
||||
return FunctionNode(name)
|
||||
|
||||
|
||||
class CallEdge(object):
|
||||
def __init__(self, i, o, attrs):
|
||||
self.i = i
|
||||
self.o = o
|
||||
self.is_external = attrs['is_external']
|
||||
self.is_external = attrs["is_external"]
|
||||
self.attrs = attrs
|
||||
|
||||
i.out.append(self)
|
||||
@ -76,11 +78,13 @@ class CallEdge(object):
|
||||
def __repr__(self):
|
||||
return '<"%s()" -> "%s()">' % (self.i.name, self.o.name)
|
||||
|
||||
|
||||
def nameclean(n):
|
||||
if '.' in n:
|
||||
return n.split('.', 1)[0]
|
||||
if "." in n:
|
||||
return n.split(".", 1)[0]
|
||||
return n
|
||||
|
||||
|
||||
def calc_rank(queue, direction):
|
||||
nextq = queue
|
||||
|
||||
@ -98,7 +102,7 @@ def calc_rank(queue, direction):
|
||||
queue = nextq
|
||||
nextq = []
|
||||
|
||||
#sys.stderr.write('rank %d\n' % currank)
|
||||
# sys.stderr.write('rank %d\n' % currank)
|
||||
|
||||
cont = False
|
||||
|
||||
@ -123,6 +127,7 @@ def calc_rank(queue, direction):
|
||||
|
||||
return nextq
|
||||
|
||||
|
||||
class Graph(dict):
|
||||
class Subgraph(set):
|
||||
def __init__(self):
|
||||
@ -166,6 +171,7 @@ class Graph(dict):
|
||||
|
||||
def calls(self):
|
||||
return self._calls
|
||||
|
||||
def calld(self):
|
||||
return self._calld
|
||||
|
||||
@ -245,7 +251,7 @@ class Graph(dict):
|
||||
else:
|
||||
evalset.add(evnode)
|
||||
|
||||
#if len(candidates) > 1:
|
||||
# if len(candidates) > 1:
|
||||
# for candidate in candidates:
|
||||
# if candidate != node:
|
||||
# #node.merge(candidate)
|
||||
@ -266,7 +272,7 @@ class Graph(dict):
|
||||
self._linear_nodes = []
|
||||
|
||||
while len(nodes):
|
||||
sys.stderr.write('%d\n' % len(nodes))
|
||||
sys.stderr.write("%d\n" % len(nodes))
|
||||
node = nodes.pop(0)
|
||||
|
||||
down[node] = set()
|
||||
@ -304,106 +310,90 @@ class Graph(dict):
|
||||
return self._subgraphs, self._linear_nodes
|
||||
|
||||
|
||||
with open(sys.argv[1], 'r') as fd:
|
||||
with open(sys.argv[1], "r") as fd:
|
||||
data = json.load(fd)
|
||||
|
||||
extra_info = {
|
||||
# zebra - LSP WQ
|
||||
('lsp_processq_add', 'work_queue_add'): [
|
||||
'lsp_process',
|
||||
'lsp_processq_del',
|
||||
'lsp_processq_complete',
|
||||
("lsp_processq_add", "work_queue_add"): [
|
||||
"lsp_process",
|
||||
"lsp_processq_del",
|
||||
"lsp_processq_complete",
|
||||
],
|
||||
# zebra - main WQ
|
||||
('mq_add_handler', 'work_queue_add'): [
|
||||
'meta_queue_process',
|
||||
],
|
||||
('meta_queue_process', 'work_queue_add'): [
|
||||
'meta_queue_process',
|
||||
],
|
||||
("mq_add_handler", "work_queue_add"): ["meta_queue_process",],
|
||||
("meta_queue_process", "work_queue_add"): ["meta_queue_process",],
|
||||
# bgpd - label pool WQ
|
||||
('bgp_lp_get', 'work_queue_add'): [
|
||||
'lp_cbq_docallback',
|
||||
],
|
||||
('bgp_lp_event_chunk', 'work_queue_add'): [
|
||||
'lp_cbq_docallback',
|
||||
],
|
||||
('bgp_lp_event_zebra_up', 'work_queue_add'): [
|
||||
'lp_cbq_docallback',
|
||||
],
|
||||
("bgp_lp_get", "work_queue_add"): ["lp_cbq_docallback",],
|
||||
("bgp_lp_event_chunk", "work_queue_add"): ["lp_cbq_docallback",],
|
||||
("bgp_lp_event_zebra_up", "work_queue_add"): ["lp_cbq_docallback",],
|
||||
# bgpd - main WQ
|
||||
('bgp_process', 'work_queue_add'): [
|
||||
'bgp_process_wq',
|
||||
'bgp_processq_del',
|
||||
],
|
||||
('bgp_add_eoiu_mark', 'work_queue_add'): [
|
||||
'bgp_process_wq',
|
||||
'bgp_processq_del',
|
||||
],
|
||||
("bgp_process", "work_queue_add"): ["bgp_process_wq", "bgp_processq_del",],
|
||||
("bgp_add_eoiu_mark", "work_queue_add"): ["bgp_process_wq", "bgp_processq_del",],
|
||||
# clear node WQ
|
||||
('bgp_clear_route_table', 'work_queue_add'): [
|
||||
'bgp_clear_route_node',
|
||||
'bgp_clear_node_queue_del',
|
||||
'bgp_clear_node_complete',
|
||||
("bgp_clear_route_table", "work_queue_add"): [
|
||||
"bgp_clear_route_node",
|
||||
"bgp_clear_node_queue_del",
|
||||
"bgp_clear_node_complete",
|
||||
],
|
||||
# rfapi WQs
|
||||
('rfapi_close', 'work_queue_add'): [
|
||||
'rfapi_deferred_close_workfunc',
|
||||
],
|
||||
('rfapiRibUpdatePendingNode', 'work_queue_add'): [
|
||||
'rfapiRibDoQueuedCallback',
|
||||
'rfapiRibQueueItemDelete',
|
||||
("rfapi_close", "work_queue_add"): ["rfapi_deferred_close_workfunc",],
|
||||
("rfapiRibUpdatePendingNode", "work_queue_add"): [
|
||||
"rfapiRibDoQueuedCallback",
|
||||
"rfapiRibQueueItemDelete",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
for func, fdata in data['functions'].items():
|
||||
for func, fdata in data["functions"].items():
|
||||
func = nameclean(func)
|
||||
fnode = FunctionNode.get(func).define(fdata)
|
||||
|
||||
for call in fdata['calls']:
|
||||
if call.get('type') in [None, 'unnamed', 'thread_sched']:
|
||||
if call.get('target') is None:
|
||||
for call in fdata["calls"]:
|
||||
if call.get("type") in [None, "unnamed", "thread_sched"]:
|
||||
if call.get("target") is None:
|
||||
continue
|
||||
tgt = nameclean(call['target'])
|
||||
tgt = nameclean(call["target"])
|
||||
fnode.add_call(FunctionNode.get(tgt), call)
|
||||
for fptr in call.get('funcptrs', []):
|
||||
for fptr in call.get("funcptrs", []):
|
||||
fnode.add_call(FunctionNode.get(nameclean(fptr)), call)
|
||||
if tgt == 'work_queue_add':
|
||||
if tgt == "work_queue_add":
|
||||
if (func, tgt) not in extra_info:
|
||||
sys.stderr.write('%s:%d:%s(): work_queue_add() not handled\n' % (
|
||||
call['filename'], call['line'], func))
|
||||
sys.stderr.write(
|
||||
"%s:%d:%s(): work_queue_add() not handled\n"
|
||||
% (call["filename"], call["line"], func)
|
||||
)
|
||||
else:
|
||||
attrs = dict(call)
|
||||
attrs.update({'is_external': False, 'type': 'workqueue'})
|
||||
attrs.update({"is_external": False, "type": "workqueue"})
|
||||
for dst in extra_info[func, tgt]:
|
||||
fnode.add_call(FunctionNode.get(dst), call)
|
||||
elif call['type'] == 'install_element':
|
||||
vty_node = FunctionNode.get('VTY_NODE_%d' % call['vty_node'])
|
||||
vty_node.add_call(FunctionNode.get(nameclean(call['target'])), call)
|
||||
elif call['type'] == 'hook':
|
||||
elif call["type"] == "install_element":
|
||||
vty_node = FunctionNode.get("VTY_NODE_%d" % call["vty_node"])
|
||||
vty_node.add_call(FunctionNode.get(nameclean(call["target"])), call)
|
||||
elif call["type"] == "hook":
|
||||
# TODO: edges for hooks from data['hooks']
|
||||
pass
|
||||
|
||||
n = FunctionNode.funcs
|
||||
|
||||
# fix some very low end functions cycling back very far to the top
|
||||
if 'peer_free' in n:
|
||||
n['peer_free'].unlink(n['bgp_timer_set'])
|
||||
n['peer_free'].unlink(n['bgp_addpath_set_peer_type'])
|
||||
if 'bgp_path_info_extra_free' in n:
|
||||
n['bgp_path_info_extra_free'].rank = 0
|
||||
if "peer_free" in n:
|
||||
n["peer_free"].unlink(n["bgp_timer_set"])
|
||||
n["peer_free"].unlink(n["bgp_addpath_set_peer_type"])
|
||||
if "bgp_path_info_extra_free" in n:
|
||||
n["bgp_path_info_extra_free"].rank = 0
|
||||
|
||||
if 'zlog_ref' in n:
|
||||
n['zlog_ref'].rank = 0
|
||||
if 'mt_checkalloc' in n:
|
||||
n['mt_checkalloc'].rank = 0
|
||||
if "zlog_ref" in n:
|
||||
n["zlog_ref"].rank = 0
|
||||
if "mt_checkalloc" in n:
|
||||
n["mt_checkalloc"].rank = 0
|
||||
|
||||
queue = list(FunctionNode.funcs.values())
|
||||
queue = calc_rank(queue, 1)
|
||||
queue = calc_rank(queue, -1)
|
||||
|
||||
sys.stderr.write('%d functions in cyclic set\n' % len(queue))
|
||||
sys.stderr.write("%d functions in cyclic set\n" % len(queue))
|
||||
|
||||
graph = Graph(queue)
|
||||
graph.automerge()
|
||||
@ -411,10 +401,12 @@ graph.automerge()
|
||||
gv_nodes = []
|
||||
gv_edges = []
|
||||
|
||||
sys.stderr.write('%d groups after automerge\n' % len(graph._groups))
|
||||
sys.stderr.write("%d groups after automerge\n" % len(graph._groups))
|
||||
|
||||
|
||||
def is_vnc(n):
|
||||
return n.startswith('rfapi') or n.startswith('vnc') or ('_vnc_' in n)
|
||||
return n.startswith("rfapi") or n.startswith("vnc") or ("_vnc_" in n)
|
||||
|
||||
|
||||
_vncstyle = ',fillcolor="#ffffcc",style=filled'
|
||||
cyclic_set_names = set([fn.name for fn in graph.values()])
|
||||
@ -422,55 +414,76 @@ cyclic_set_names = set([fn.name for fn in graph.values()])
|
||||
for i, group in enumerate(graph._groups):
|
||||
if len(group) > 1:
|
||||
group.num = i
|
||||
gv_nodes.append('\tsubgraph cluster_%d {' % i)
|
||||
gv_nodes.append('\t\tcolor=blue;')
|
||||
gv_nodes.append("\tsubgraph cluster_%d {" % i)
|
||||
gv_nodes.append("\t\tcolor=blue;")
|
||||
for gn in group:
|
||||
has_cycle_callers = set(gn.calld()) - group
|
||||
has_ext_callers = set([edge.i.name for edge in gn._fn.inb]) - cyclic_set_names
|
||||
has_ext_callers = (
|
||||
set([edge.i.name for edge in gn._fn.inb]) - cyclic_set_names
|
||||
)
|
||||
|
||||
style = ''
|
||||
etext = ''
|
||||
style = ""
|
||||
etext = ""
|
||||
if is_vnc(gn.name):
|
||||
style += _vncstyle
|
||||
if has_cycle_callers:
|
||||
style += ',color=blue,penwidth=3'
|
||||
style += ",color=blue,penwidth=3"
|
||||
if has_ext_callers:
|
||||
style += ',fillcolor="#ffeebb",style=filled'
|
||||
etext += '<br/><font point-size="10">(%d other callers)</font>' % (len(has_ext_callers))
|
||||
etext += '<br/><font point-size="10">(%d other callers)</font>' % (
|
||||
len(has_ext_callers)
|
||||
)
|
||||
|
||||
gv_nodes.append('\t\t"%s" [shape=box,label=<%s%s>%s];' % (gn.name, '<br/>'.join([fn.name for fn in gn._fns]), etext, style))
|
||||
gv_nodes.append('\t}')
|
||||
gv_nodes.append(
|
||||
'\t\t"%s" [shape=box,label=<%s%s>%s];'
|
||||
% (gn.name, "<br/>".join([fn.name for fn in gn._fns]), etext, style)
|
||||
)
|
||||
gv_nodes.append("\t}")
|
||||
else:
|
||||
for gn in group:
|
||||
has_ext_callers = set([edge.i.name for edge in gn._fn.inb]) - cyclic_set_names
|
||||
has_ext_callers = (
|
||||
set([edge.i.name for edge in gn._fn.inb]) - cyclic_set_names
|
||||
)
|
||||
|
||||
style = ''
|
||||
etext = ''
|
||||
style = ""
|
||||
etext = ""
|
||||
if is_vnc(gn.name):
|
||||
style += _vncstyle
|
||||
if has_ext_callers:
|
||||
style += ',fillcolor="#ffeebb",style=filled'
|
||||
etext += '<br/><font point-size="10">(%d other callers)</font>' % (len(has_ext_callers))
|
||||
gv_nodes.append('\t"%s" [shape=box,label=<%s%s>%s];' % (gn.name, '<br/>'.join([fn.name for fn in gn._fns]), etext, style))
|
||||
etext += '<br/><font point-size="10">(%d other callers)</font>' % (
|
||||
len(has_ext_callers)
|
||||
)
|
||||
gv_nodes.append(
|
||||
'\t"%s" [shape=box,label=<%s%s>%s];'
|
||||
% (gn.name, "<br/>".join([fn.name for fn in gn._fns]), etext, style)
|
||||
)
|
||||
|
||||
edges = set()
|
||||
for gn in graph.values():
|
||||
for calls in gn.calls():
|
||||
if gn._group == calls._group:
|
||||
gv_edges.append('\t"%s" -> "%s" [color="#55aa55",style=dashed];' % (gn.name, calls.name))
|
||||
gv_edges.append(
|
||||
'\t"%s" -> "%s" [color="#55aa55",style=dashed];' % (gn.name, calls.name)
|
||||
)
|
||||
else:
|
||||
|
||||
def xname(nn):
|
||||
if len(nn._group) > 1:
|
||||
return 'cluster_%d' % nn._group.num
|
||||
return "cluster_%d" % nn._group.num
|
||||
else:
|
||||
return nn.name
|
||||
|
||||
tup = xname(gn), calls.name
|
||||
if tup[0] != tup[1] and tup not in edges:
|
||||
gv_edges.append('\t"%s" -> "%s" [weight=0.0,w=0.0,color=blue];' % tup)
|
||||
edges.add(tup)
|
||||
|
||||
with open(sys.argv[2], 'w') as fd:
|
||||
fd.write('''digraph {
|
||||
with open(sys.argv[2], "w") as fd:
|
||||
fd.write(
|
||||
"""digraph {
|
||||
node [fontsize=13,fontname="Fira Sans"];
|
||||
%s
|
||||
}''' % '\n'.join(gv_nodes + [''] + gv_edges))
|
||||
}"""
|
||||
% "\n".join(gv_nodes + [""] + gv_edges)
|
||||
)
|
||||
|
293
python/clidef.py
293
python/clidef.py
@ -26,39 +26,49 @@ from io import StringIO
|
||||
# the various handlers generate output C code for a particular type of
|
||||
# CLI token, choosing the most useful output C type.
|
||||
|
||||
|
||||
class RenderHandler(object):
|
||||
def __init__(self, token):
|
||||
pass
|
||||
|
||||
def combine(self, other):
|
||||
if type(self) == type(other):
|
||||
return other
|
||||
return StringHandler(None)
|
||||
|
||||
deref = ''
|
||||
deref = ""
|
||||
drop_str = False
|
||||
canfail = True
|
||||
canassert = False
|
||||
|
||||
|
||||
class StringHandler(RenderHandler):
|
||||
argtype = 'const char *'
|
||||
decl = Template('const char *$varname = NULL;')
|
||||
code = Template('$varname = (argv[_i]->type == WORD_TKN) ? argv[_i]->text : argv[_i]->arg;')
|
||||
argtype = "const char *"
|
||||
decl = Template("const char *$varname = NULL;")
|
||||
code = Template(
|
||||
"$varname = (argv[_i]->type == WORD_TKN) ? argv[_i]->text : argv[_i]->arg;"
|
||||
)
|
||||
drop_str = True
|
||||
canfail = False
|
||||
canassert = True
|
||||
|
||||
|
||||
class LongHandler(RenderHandler):
|
||||
argtype = 'long'
|
||||
decl = Template('long $varname = 0;')
|
||||
code = Template('''\
|
||||
argtype = "long"
|
||||
decl = Template("long $varname = 0;")
|
||||
code = Template(
|
||||
"""\
|
||||
char *_end;
|
||||
$varname = strtol(argv[_i]->arg, &_end, 10);
|
||||
_fail = (_end == argv[_i]->arg) || (*_end != '\\0');''')
|
||||
_fail = (_end == argv[_i]->arg) || (*_end != '\\0');"""
|
||||
)
|
||||
|
||||
|
||||
# A.B.C.D/M (prefix_ipv4) and
|
||||
# X:X::X:X/M (prefix_ipv6) are "compatible" and can merge into a
|
||||
# struct prefix:
|
||||
|
||||
|
||||
class PrefixBase(RenderHandler):
|
||||
def combine(self, other):
|
||||
if type(self) == type(other):
|
||||
@ -66,23 +76,33 @@ class PrefixBase(RenderHandler):
|
||||
if isinstance(other, PrefixBase):
|
||||
return PrefixGenHandler(None)
|
||||
return StringHandler(None)
|
||||
deref = '&'
|
||||
|
||||
deref = "&"
|
||||
|
||||
|
||||
class Prefix4Handler(PrefixBase):
|
||||
argtype = 'const struct prefix_ipv4 *'
|
||||
decl = Template('struct prefix_ipv4 $varname = { };')
|
||||
code = Template('_fail = !str2prefix_ipv4(argv[_i]->arg, &$varname);')
|
||||
argtype = "const struct prefix_ipv4 *"
|
||||
decl = Template("struct prefix_ipv4 $varname = { };")
|
||||
code = Template("_fail = !str2prefix_ipv4(argv[_i]->arg, &$varname);")
|
||||
|
||||
|
||||
class Prefix6Handler(PrefixBase):
|
||||
argtype = 'const struct prefix_ipv6 *'
|
||||
decl = Template('struct prefix_ipv6 $varname = { };')
|
||||
code = Template('_fail = !str2prefix_ipv6(argv[_i]->arg, &$varname);')
|
||||
argtype = "const struct prefix_ipv6 *"
|
||||
decl = Template("struct prefix_ipv6 $varname = { };")
|
||||
code = Template("_fail = !str2prefix_ipv6(argv[_i]->arg, &$varname);")
|
||||
|
||||
|
||||
class PrefixEthHandler(PrefixBase):
|
||||
argtype = 'struct prefix_eth *'
|
||||
decl = Template('struct prefix_eth $varname = { };')
|
||||
code = Template('_fail = !str2prefix_eth(argv[_i]->arg, &$varname);')
|
||||
argtype = "struct prefix_eth *"
|
||||
decl = Template("struct prefix_eth $varname = { };")
|
||||
code = Template("_fail = !str2prefix_eth(argv[_i]->arg, &$varname);")
|
||||
|
||||
|
||||
class PrefixGenHandler(PrefixBase):
|
||||
argtype = 'const struct prefix *'
|
||||
decl = Template('struct prefix $varname = { };')
|
||||
code = Template('_fail = !str2prefix(argv[_i]->arg, &$varname);')
|
||||
argtype = "const struct prefix *"
|
||||
decl = Template("struct prefix $varname = { };")
|
||||
code = Template("_fail = !str2prefix(argv[_i]->arg, &$varname);")
|
||||
|
||||
|
||||
# same for IP addresses. result is union sockunion.
|
||||
class IPBase(RenderHandler):
|
||||
@ -92,18 +112,27 @@ class IPBase(RenderHandler):
|
||||
if type(other) in [IP4Handler, IP6Handler, IPGenHandler]:
|
||||
return IPGenHandler(None)
|
||||
return StringHandler(None)
|
||||
|
||||
|
||||
class IP4Handler(IPBase):
|
||||
argtype = 'struct in_addr'
|
||||
decl = Template('struct in_addr $varname = { INADDR_ANY };')
|
||||
code = Template('_fail = !inet_aton(argv[_i]->arg, &$varname);')
|
||||
argtype = "struct in_addr"
|
||||
decl = Template("struct in_addr $varname = { INADDR_ANY };")
|
||||
code = Template("_fail = !inet_aton(argv[_i]->arg, &$varname);")
|
||||
|
||||
|
||||
class IP6Handler(IPBase):
|
||||
argtype = 'struct in6_addr'
|
||||
decl = Template('struct in6_addr $varname = {};')
|
||||
code = Template('_fail = !inet_pton(AF_INET6, argv[_i]->arg, &$varname);')
|
||||
argtype = "struct in6_addr"
|
||||
decl = Template("struct in6_addr $varname = {};")
|
||||
code = Template("_fail = !inet_pton(AF_INET6, argv[_i]->arg, &$varname);")
|
||||
|
||||
|
||||
class IPGenHandler(IPBase):
|
||||
argtype = 'const union sockunion *'
|
||||
decl = Template('''union sockunion s__$varname = { .sa.sa_family = AF_UNSPEC }, *$varname = NULL;''')
|
||||
code = Template('''\
|
||||
argtype = "const union sockunion *"
|
||||
decl = Template(
|
||||
"""union sockunion s__$varname = { .sa.sa_family = AF_UNSPEC }, *$varname = NULL;"""
|
||||
)
|
||||
code = Template(
|
||||
"""\
|
||||
if (argv[_i]->text[0] == 'X') {
|
||||
s__$varname.sa.sa_family = AF_INET6;
|
||||
_fail = !inet_pton(AF_INET6, argv[_i]->arg, &s__$varname.sin6.sin6_addr);
|
||||
@ -112,26 +141,30 @@ if (argv[_i]->text[0] == 'X') {
|
||||
s__$varname.sa.sa_family = AF_INET;
|
||||
_fail = !inet_aton(argv[_i]->arg, &s__$varname.sin.sin_addr);
|
||||
$varname = &s__$varname;
|
||||
}''')
|
||||
}"""
|
||||
)
|
||||
canassert = True
|
||||
|
||||
|
||||
def mix_handlers(handlers):
|
||||
def combine(a, b):
|
||||
if a is None:
|
||||
return b
|
||||
return a.combine(b)
|
||||
|
||||
return reduce(combine, handlers, None)
|
||||
|
||||
|
||||
handlers = {
|
||||
'WORD_TKN': StringHandler,
|
||||
'VARIABLE_TKN': StringHandler,
|
||||
'RANGE_TKN': LongHandler,
|
||||
'IPV4_TKN': IP4Handler,
|
||||
'IPV4_PREFIX_TKN': Prefix4Handler,
|
||||
'IPV6_TKN': IP6Handler,
|
||||
'IPV6_PREFIX_TKN': Prefix6Handler,
|
||||
'MAC_TKN': PrefixEthHandler,
|
||||
'MAC_PREFIX_TKN': PrefixEthHandler,
|
||||
"WORD_TKN": StringHandler,
|
||||
"VARIABLE_TKN": StringHandler,
|
||||
"RANGE_TKN": LongHandler,
|
||||
"IPV4_TKN": IP4Handler,
|
||||
"IPV4_PREFIX_TKN": Prefix4Handler,
|
||||
"IPV6_TKN": IP6Handler,
|
||||
"IPV6_PREFIX_TKN": Prefix6Handler,
|
||||
"MAC_TKN": PrefixEthHandler,
|
||||
"MAC_PREFIX_TKN": PrefixEthHandler,
|
||||
}
|
||||
|
||||
# core template invoked for each occurence of DEFPY.
|
||||
@ -139,7 +172,8 @@ handlers = {
|
||||
# the "#if $..." bits are there to keep this template unified into one
|
||||
# common form, without requiring a more advanced template engine (e.g.
|
||||
# jinja2)
|
||||
templ = Template('''/* $fnname => "$cmddef" */
|
||||
templ = Template(
|
||||
"""/* $fnname => "$cmddef" */
|
||||
DEFUN_CMD_FUNC_DECL($fnname)
|
||||
#define funcdecl_$fnname static int ${fnname}_magic(\\
|
||||
const struct cmd_element *self __attribute__ ((unused)),\\
|
||||
@ -178,18 +212,22 @@ $argassert
|
||||
return ${fnname}_magic(self, vty, argc, argv$arglist);
|
||||
}
|
||||
|
||||
''')
|
||||
"""
|
||||
)
|
||||
|
||||
# invoked for each named parameter
|
||||
argblock = Template('''
|
||||
argblock = Template(
|
||||
"""
|
||||
if (!strcmp(argv[_i]->varname, \"$varname\")) {$strblock
|
||||
$code
|
||||
}''')
|
||||
}"""
|
||||
)
|
||||
|
||||
def get_always_args(token, always_args, args = [], stack = []):
|
||||
|
||||
def get_always_args(token, always_args, args=[], stack=[]):
|
||||
if token in stack:
|
||||
return
|
||||
if token.type == 'END_TKN':
|
||||
if token.type == "END_TKN":
|
||||
for arg in list(always_args):
|
||||
if arg not in args:
|
||||
always_args.remove(arg)
|
||||
@ -201,38 +239,45 @@ def get_always_args(token, always_args, args = [], stack = []):
|
||||
for nexttkn in token.next():
|
||||
get_always_args(nexttkn, always_args, args, stack)
|
||||
|
||||
|
||||
class Macros(dict):
|
||||
def load(self, filename):
|
||||
filedata = clippy.parse(filename)
|
||||
for entry in filedata['data']:
|
||||
if entry['type'] != 'PREPROC':
|
||||
for entry in filedata["data"]:
|
||||
if entry["type"] != "PREPROC":
|
||||
continue
|
||||
ppdir = entry['line'].lstrip().split(None, 1)
|
||||
if ppdir[0] != 'define' or len(ppdir) != 2:
|
||||
ppdir = entry["line"].lstrip().split(None, 1)
|
||||
if ppdir[0] != "define" or len(ppdir) != 2:
|
||||
continue
|
||||
ppdef = ppdir[1].split(None, 1)
|
||||
name = ppdef[0]
|
||||
if '(' in name:
|
||||
if "(" in name:
|
||||
continue
|
||||
val = ppdef[1] if len(ppdef) == 2 else ''
|
||||
val = ppdef[1] if len(ppdef) == 2 else ""
|
||||
|
||||
val = val.strip(' \t\n\\')
|
||||
val = val.strip(" \t\n\\")
|
||||
if name in self:
|
||||
sys.stderr.write('warning: macro %s redefined!\n' % (name))
|
||||
sys.stderr.write("warning: macro %s redefined!\n" % (name))
|
||||
self[name] = val
|
||||
|
||||
|
||||
def process_file(fn, ofd, dumpfd, all_defun, macros):
|
||||
errors = 0
|
||||
filedata = clippy.parse(fn)
|
||||
|
||||
for entry in filedata['data']:
|
||||
if entry['type'].startswith('DEFPY') or (all_defun and entry['type'].startswith('DEFUN')):
|
||||
if len(entry['args'][0]) != 1:
|
||||
sys.stderr.write('%s:%d: DEFPY function name not parseable (%r)\n' % (fn, entry['lineno'], entry['args'][0]))
|
||||
for entry in filedata["data"]:
|
||||
if entry["type"].startswith("DEFPY") or (
|
||||
all_defun and entry["type"].startswith("DEFUN")
|
||||
):
|
||||
if len(entry["args"][0]) != 1:
|
||||
sys.stderr.write(
|
||||
"%s:%d: DEFPY function name not parseable (%r)\n"
|
||||
% (fn, entry["lineno"], entry["args"][0])
|
||||
)
|
||||
errors += 1
|
||||
continue
|
||||
|
||||
cmddef = entry['args'][2]
|
||||
cmddef = entry["args"][2]
|
||||
cmddefx = []
|
||||
for i in cmddef:
|
||||
while i in macros:
|
||||
@ -241,13 +286,16 @@ def process_file(fn, ofd, dumpfd, all_defun, macros):
|
||||
cmddefx.append(i[1:-1])
|
||||
continue
|
||||
|
||||
sys.stderr.write('%s:%d: DEFPY command string not parseable (%r)\n' % (fn, entry['lineno'], cmddef))
|
||||
sys.stderr.write(
|
||||
"%s:%d: DEFPY command string not parseable (%r)\n"
|
||||
% (fn, entry["lineno"], cmddef)
|
||||
)
|
||||
errors += 1
|
||||
cmddefx = None
|
||||
break
|
||||
if cmddefx is None:
|
||||
continue
|
||||
cmddef = ''.join([i for i in cmddefx])
|
||||
cmddef = "".join([i for i in cmddefx])
|
||||
|
||||
graph = clippy.Graph(cmddef)
|
||||
args = OrderedDict()
|
||||
@ -263,12 +311,12 @@ def process_file(fn, ofd, dumpfd, all_defun, macros):
|
||||
|
||||
get_always_args(graph.first(), always_args)
|
||||
|
||||
#print('-' * 76)
|
||||
#pprint(entry)
|
||||
#clippy.dump(graph)
|
||||
#pprint(args)
|
||||
# print('-' * 76)
|
||||
# pprint(entry)
|
||||
# clippy.dump(graph)
|
||||
# pprint(args)
|
||||
|
||||
params = { 'cmddef': cmddef, 'fnname': entry['args'][0][0] }
|
||||
params = {"cmddef": cmddef, "fnname": entry["args"][0][0]}
|
||||
argdefs = []
|
||||
argdecls = []
|
||||
arglist = []
|
||||
@ -277,63 +325,96 @@ def process_file(fn, ofd, dumpfd, all_defun, macros):
|
||||
doc = []
|
||||
canfail = 0
|
||||
|
||||
def do_add(handler, basename, varname, attr = ''):
|
||||
argdefs.append(',\\\n\t%s %s%s' % (handler.argtype, varname, attr))
|
||||
argdecls.append('\t%s\n' % (handler.decl.substitute({'varname': varname}).replace('\n', '\n\t')))
|
||||
arglist.append(', %s%s' % (handler.deref, varname))
|
||||
def do_add(handler, basename, varname, attr=""):
|
||||
argdefs.append(",\\\n\t%s %s%s" % (handler.argtype, varname, attr))
|
||||
argdecls.append(
|
||||
"\t%s\n"
|
||||
% (
|
||||
handler.decl.substitute({"varname": varname}).replace(
|
||||
"\n", "\n\t"
|
||||
)
|
||||
)
|
||||
)
|
||||
arglist.append(", %s%s" % (handler.deref, varname))
|
||||
if basename in always_args and handler.canassert:
|
||||
argassert.append('''\tif (!%s) {
|
||||
argassert.append(
|
||||
"""\tif (!%s) {
|
||||
\t\tvty_out(vty, "Internal CLI error [%%s]\\n", "%s");
|
||||
\t\treturn CMD_WARNING;
|
||||
\t}\n''' % (varname, varname))
|
||||
if attr == '':
|
||||
\t}\n"""
|
||||
% (varname, varname)
|
||||
)
|
||||
if attr == "":
|
||||
at = handler.argtype
|
||||
if not at.startswith('const '):
|
||||
at = '. . . ' + at
|
||||
doc.append('\t%-26s %s %s' % (at, 'alw' if basename in always_args else 'opt', varname))
|
||||
if not at.startswith("const "):
|
||||
at = ". . . " + at
|
||||
doc.append(
|
||||
"\t%-26s %s %s"
|
||||
% (at, "alw" if basename in always_args else "opt", varname)
|
||||
)
|
||||
|
||||
for varname in args.keys():
|
||||
handler = mix_handlers(args[varname])
|
||||
#print(varname, handler)
|
||||
if handler is None: continue
|
||||
# print(varname, handler)
|
||||
if handler is None:
|
||||
continue
|
||||
do_add(handler, varname, varname)
|
||||
code = handler.code.substitute({'varname': varname}).replace('\n', '\n\t\t\t')
|
||||
code = handler.code.substitute({"varname": varname}).replace(
|
||||
"\n", "\n\t\t\t"
|
||||
)
|
||||
if handler.canfail:
|
||||
canfail = 1
|
||||
strblock = ''
|
||||
strblock = ""
|
||||
if not handler.drop_str:
|
||||
do_add(StringHandler(None), varname, '%s_str' % (varname), ' __attribute__ ((unused))')
|
||||
strblock = '\n\t\t\t%s_str = argv[_i]->arg;' % (varname)
|
||||
argblocks.append(argblock.substitute({'varname': varname, 'strblock': strblock, 'code': code}))
|
||||
do_add(
|
||||
StringHandler(None),
|
||||
varname,
|
||||
"%s_str" % (varname),
|
||||
" __attribute__ ((unused))",
|
||||
)
|
||||
strblock = "\n\t\t\t%s_str = argv[_i]->arg;" % (varname)
|
||||
argblocks.append(
|
||||
argblock.substitute(
|
||||
{"varname": varname, "strblock": strblock, "code": code}
|
||||
)
|
||||
)
|
||||
|
||||
if dumpfd is not None:
|
||||
if len(arglist) > 0:
|
||||
dumpfd.write('"%s":\n%s\n\n' % (cmddef, '\n'.join(doc)))
|
||||
dumpfd.write('"%s":\n%s\n\n' % (cmddef, "\n".join(doc)))
|
||||
else:
|
||||
dumpfd.write('"%s":\n\t---- no magic arguments ----\n\n' % (cmddef))
|
||||
|
||||
params['argdefs'] = ''.join(argdefs)
|
||||
params['argdecls'] = ''.join(argdecls)
|
||||
params['arglist'] = ''.join(arglist)
|
||||
params['argblocks'] = ''.join(argblocks)
|
||||
params['canfail'] = canfail
|
||||
params['nonempty'] = len(argblocks)
|
||||
params['argassert'] = ''.join(argassert)
|
||||
params["argdefs"] = "".join(argdefs)
|
||||
params["argdecls"] = "".join(argdecls)
|
||||
params["arglist"] = "".join(arglist)
|
||||
params["argblocks"] = "".join(argblocks)
|
||||
params["canfail"] = canfail
|
||||
params["nonempty"] = len(argblocks)
|
||||
params["argassert"] = "".join(argassert)
|
||||
ofd.write(templ.substitute(params))
|
||||
|
||||
return errors
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
|
||||
argp = argparse.ArgumentParser(description = 'FRR CLI preprocessor in Python')
|
||||
argp.add_argument('--all-defun', action = 'store_const', const = True,
|
||||
help = 'process DEFUN() statements in addition to DEFPY()')
|
||||
argp.add_argument('--show', action = 'store_const', const = True,
|
||||
help = 'print out list of arguments and types for each definition')
|
||||
argp.add_argument('-o', type = str, metavar = 'OUTFILE',
|
||||
help = 'output C file name')
|
||||
argp.add_argument('cfile', type = str)
|
||||
argp = argparse.ArgumentParser(description="FRR CLI preprocessor in Python")
|
||||
argp.add_argument(
|
||||
"--all-defun",
|
||||
action="store_const",
|
||||
const=True,
|
||||
help="process DEFUN() statements in addition to DEFPY()",
|
||||
)
|
||||
argp.add_argument(
|
||||
"--show",
|
||||
action="store_const",
|
||||
const=True,
|
||||
help="print out list of arguments and types for each definition",
|
||||
)
|
||||
argp.add_argument("-o", type=str, metavar="OUTFILE", help="output C file name")
|
||||
argp.add_argument("cfile", type=str)
|
||||
args = argp.parse_args()
|
||||
|
||||
dumpfd = None
|
||||
@ -349,15 +430,17 @@ if __name__ == '__main__':
|
||||
basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
macros = Macros()
|
||||
macros.load('lib/route_types.h')
|
||||
macros.load(os.path.join(basepath, 'lib/command.h'))
|
||||
macros.load(os.path.join(basepath, 'bgpd/bgp_vty.h'))
|
||||
macros.load("lib/route_types.h")
|
||||
macros.load(os.path.join(basepath, "lib/command.h"))
|
||||
macros.load(os.path.join(basepath, "bgpd/bgp_vty.h"))
|
||||
# sigh :(
|
||||
macros['PROTO_REDIST_STR'] = 'FRR_REDIST_STR_ISISD'
|
||||
macros["PROTO_REDIST_STR"] = "FRR_REDIST_STR_ISISD"
|
||||
|
||||
errors = process_file(args.cfile, ofd, dumpfd, args.all_defun, macros)
|
||||
if errors != 0:
|
||||
sys.exit(1)
|
||||
|
||||
if args.o is not None:
|
||||
clippy.wrdiff(args.o, ofd, [args.cfile, os.path.realpath(__file__), sys.executable])
|
||||
clippy.wrdiff(
|
||||
args.o, ofd, [args.cfile, os.path.realpath(__file__), sys.executable]
|
||||
)
|
||||
|
@ -20,11 +20,12 @@ import os, stat
|
||||
import _clippy
|
||||
from _clippy import parse, Graph, GraphNode
|
||||
|
||||
|
||||
def graph_iterate(graph):
|
||||
'''iterator yielding all nodes of a graph
|
||||
"""iterator yielding all nodes of a graph
|
||||
|
||||
nodes arrive in input/definition order, graph circles are avoided.
|
||||
'''
|
||||
"""
|
||||
|
||||
queue = [(graph.first(), frozenset(), 0)]
|
||||
while len(queue) > 0:
|
||||
@ -42,21 +43,25 @@ def graph_iterate(graph):
|
||||
if n not in stop and n is not node:
|
||||
queue.insert(0, (n, stop, depth + 1))
|
||||
|
||||
|
||||
def dump(graph):
|
||||
'''print out clippy.Graph'''
|
||||
"""print out clippy.Graph"""
|
||||
|
||||
for i, depth in graph_iterate(graph):
|
||||
print('\t%s%s %r' % (' ' * (depth * 2), i.type, i.text))
|
||||
print("\t%s%s %r" % (" " * (depth * 2), i.type, i.text))
|
||||
|
||||
def wrdiff(filename, buf, reffiles = []):
|
||||
'''write buffer to file if contents changed'''
|
||||
|
||||
expl = ''
|
||||
if hasattr(buf, 'getvalue'):
|
||||
def wrdiff(filename, buf, reffiles=[]):
|
||||
"""write buffer to file if contents changed"""
|
||||
|
||||
expl = ""
|
||||
if hasattr(buf, "getvalue"):
|
||||
buf = buf.getvalue()
|
||||
old = None
|
||||
try: old = open(filename, 'r').read()
|
||||
except: pass
|
||||
try:
|
||||
old = open(filename, "r").read()
|
||||
except:
|
||||
pass
|
||||
if old == buf:
|
||||
for reffile in reffiles:
|
||||
# ensure output timestamp is newer than inputs, for make
|
||||
@ -67,7 +72,7 @@ def wrdiff(filename, buf, reffiles = []):
|
||||
# sys.stderr.write('%s unchanged, not written\n' % (filename))
|
||||
return
|
||||
|
||||
newname = '%s.new-%d' % (filename, os.getpid())
|
||||
with open(newname, 'w') as out:
|
||||
newname = "%s.new-%d" % (filename, os.getpid())
|
||||
with open(newname, "w") as out:
|
||||
out.write(buf)
|
||||
os.rename(newname, filename)
|
||||
|
@ -9,21 +9,21 @@ include_re = re.compile('^#\s*include\s+["<]([^ ">]+)[">]', re.M)
|
||||
|
||||
errors = 0
|
||||
|
||||
files = subprocess.check_output(['git', 'ls-files']).decode('ASCII')
|
||||
files = subprocess.check_output(["git", "ls-files"]).decode("ASCII")
|
||||
for fn in files.splitlines():
|
||||
if not fn.endswith('.c'):
|
||||
if not fn.endswith(".c"):
|
||||
continue
|
||||
if fn.startswith('tools/'):
|
||||
if fn.startswith("tools/"):
|
||||
continue
|
||||
with open(fn, 'r') as fd:
|
||||
with open(fn, "r") as fd:
|
||||
data = fd.read()
|
||||
m = include_re.search(data)
|
||||
if m is None:
|
||||
#sys.stderr.write('no #include in %s?\n' % (fn))
|
||||
# sys.stderr.write('no #include in %s?\n' % (fn))
|
||||
continue
|
||||
if m.group(1) in ['config.h', 'zebra.h', 'lib/zebra.h']:
|
||||
if m.group(1) in ["config.h", "zebra.h", "lib/zebra.h"]:
|
||||
continue
|
||||
sys.stderr.write('%s: %s\n' % (fn, m.group(0)))
|
||||
sys.stderr.write("%s: %s\n" % (fn, m.group(0)))
|
||||
errors += 1
|
||||
|
||||
if errors:
|
||||
|
@ -13,69 +13,91 @@ import argparse
|
||||
from string import Template
|
||||
from makevars import MakeReVars
|
||||
|
||||
argp = argparse.ArgumentParser(description = 'FRR Makefile extensions')
|
||||
argp.add_argument('--dev-build', action = 'store_const', const = True,
|
||||
help = 'run additional developer checks')
|
||||
argp = argparse.ArgumentParser(description="FRR Makefile extensions")
|
||||
argp.add_argument(
|
||||
"--dev-build",
|
||||
action="store_const",
|
||||
const=True,
|
||||
help="run additional developer checks",
|
||||
)
|
||||
args = argp.parse_args()
|
||||
|
||||
with open('Makefile', 'r') as fd:
|
||||
with open("Makefile", "r") as fd:
|
||||
before = fd.read()
|
||||
|
||||
mv = MakeReVars(before)
|
||||
|
||||
clippy_scan = mv['clippy_scan'].strip().split()
|
||||
clippy_scan = mv["clippy_scan"].strip().split()
|
||||
for clippy_file in clippy_scan:
|
||||
assert clippy_file.endswith('.c')
|
||||
assert clippy_file.endswith(".c")
|
||||
|
||||
# check for files using clippy but not listed in clippy_scan
|
||||
if args.dev_build:
|
||||
basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
if os.path.exists(os.path.join(basepath, '.git')):
|
||||
clippy_ref = subprocess.check_output([
|
||||
'git', '-C', basepath, 'grep', '-l', '-P', '^#\s*include.*_clippy.c', '--', '**.c']).decode('US-ASCII')
|
||||
if os.path.exists(os.path.join(basepath, ".git")):
|
||||
clippy_ref = subprocess.check_output(
|
||||
[
|
||||
"git",
|
||||
"-C",
|
||||
basepath,
|
||||
"grep",
|
||||
"-l",
|
||||
"-P",
|
||||
"^#\s*include.*_clippy.c",
|
||||
"--",
|
||||
"**.c",
|
||||
]
|
||||
).decode("US-ASCII")
|
||||
|
||||
clippy_ref = set(clippy_ref.splitlines())
|
||||
missing = clippy_ref - set(clippy_scan)
|
||||
|
||||
if len(missing) > 0:
|
||||
sys.stderr.write('error: files seem to be using clippy, but not listed in "clippy_scan" in subdir.am:\n\t%s\n' % ('\n\t'.join(sorted(missing))))
|
||||
sys.stderr.write(
|
||||
'error: files seem to be using clippy, but not listed in "clippy_scan" in subdir.am:\n\t%s\n'
|
||||
% ("\n\t".join(sorted(missing)))
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
clippydep = Template('''
|
||||
clippydep = Template(
|
||||
"""
|
||||
${clippybase}.$$(OBJEXT): ${clippybase}_clippy.c
|
||||
${clippybase}.lo: ${clippybase}_clippy.c
|
||||
${clippybase}_clippy.c: $$(CLIPPY_DEPS)''')
|
||||
${clippybase}_clippy.c: $$(CLIPPY_DEPS)"""
|
||||
)
|
||||
|
||||
clippyauxdep = Template('''# clippy{
|
||||
clippyauxdep = Template(
|
||||
"""# clippy{
|
||||
# auxiliary clippy target
|
||||
${target}: ${clippybase}_clippy.c
|
||||
# }clippy''')
|
||||
# }clippy"""
|
||||
)
|
||||
|
||||
lines = before.splitlines()
|
||||
autoderp = '#AUTODERP# '
|
||||
autoderp = "#AUTODERP# "
|
||||
out_lines = []
|
||||
bcdeps = []
|
||||
make_rule_re = re.compile('^([^:\s]+):\s*([^:\s]+)\s*($|\n)')
|
||||
make_rule_re = re.compile("^([^:\s]+):\s*([^:\s]+)\s*($|\n)")
|
||||
|
||||
while lines:
|
||||
line = lines.pop(0)
|
||||
if line.startswith(autoderp):
|
||||
line = line[len(autoderp):]
|
||||
line = line[len(autoderp) :]
|
||||
|
||||
if line == '# clippy{':
|
||||
if line == "# clippy{":
|
||||
while lines:
|
||||
line = lines.pop(0)
|
||||
if line == '# }clippy':
|
||||
if line == "# }clippy":
|
||||
break
|
||||
continue
|
||||
|
||||
if line.startswith('#'):
|
||||
if line.startswith("#"):
|
||||
out_lines.append(line)
|
||||
continue
|
||||
|
||||
full_line = line
|
||||
full_lines = lines[:]
|
||||
while full_line.endswith('\\'):
|
||||
while full_line.endswith("\\"):
|
||||
full_line = full_line[:-1] + full_lines.pop(0)
|
||||
|
||||
m = make_rule_re.match(full_line)
|
||||
@ -87,43 +109,51 @@ while lines:
|
||||
|
||||
target, dep = m.group(1), m.group(2)
|
||||
|
||||
if target.endswith('.lo') or target.endswith('.o'):
|
||||
if not dep.endswith('.h'):
|
||||
bcdeps.append('%s.bc: %s' % (target, target))
|
||||
bcdeps.append('\t$(AM_V_LLVM_BC)$(COMPILE) -emit-llvm -c -o $@ %s' % (dep))
|
||||
if target.endswith(".lo") or target.endswith(".o"):
|
||||
if not dep.endswith(".h"):
|
||||
bcdeps.append("%s.bc: %s" % (target, target))
|
||||
bcdeps.append("\t$(AM_V_LLVM_BC)$(COMPILE) -emit-llvm -c -o $@ %s" % (dep))
|
||||
if m.group(2) in clippy_scan:
|
||||
out_lines.append(clippyauxdep.substitute(target=m.group(1), clippybase=m.group(2)[:-2]))
|
||||
out_lines.append(
|
||||
clippyauxdep.substitute(target=m.group(1), clippybase=m.group(2)[:-2])
|
||||
)
|
||||
|
||||
out_lines.append(line)
|
||||
|
||||
out_lines.append('# clippy{\n# main clippy targets')
|
||||
out_lines.append("# clippy{\n# main clippy targets")
|
||||
for clippy_file in clippy_scan:
|
||||
out_lines.append(clippydep.substitute(clippybase = clippy_file[:-2]))
|
||||
out_lines.append(clippydep.substitute(clippybase=clippy_file[:-2]))
|
||||
|
||||
out_lines.append('')
|
||||
out_lines.append("")
|
||||
out_lines.extend(bcdeps)
|
||||
out_lines.append('')
|
||||
out_lines.append("")
|
||||
bc_targets = []
|
||||
for varname in ['bin_PROGRAMS', 'sbin_PROGRAMS', 'lib_LTLIBRARIES', 'module_LTLIBRARIES', 'noinst_LIBRARIES']:
|
||||
for varname in [
|
||||
"bin_PROGRAMS",
|
||||
"sbin_PROGRAMS",
|
||||
"lib_LTLIBRARIES",
|
||||
"module_LTLIBRARIES",
|
||||
"noinst_LIBRARIES",
|
||||
]:
|
||||
bc_targets.extend(mv[varname].strip().split())
|
||||
for target in bc_targets:
|
||||
amtgt = target.replace('/', '_').replace('.', '_').replace('-', '_')
|
||||
objs = mv[amtgt + '_OBJECTS'].strip().split()
|
||||
objs = [obj + '.bc' for obj in objs]
|
||||
deps = mv.get(amtgt + '_DEPENDENCIES', '').strip().split()
|
||||
deps = [d + '.bc' for d in deps if d.endswith('.a')]
|
||||
amtgt = target.replace("/", "_").replace(".", "_").replace("-", "_")
|
||||
objs = mv[amtgt + "_OBJECTS"].strip().split()
|
||||
objs = [obj + ".bc" for obj in objs]
|
||||
deps = mv.get(amtgt + "_DEPENDENCIES", "").strip().split()
|
||||
deps = [d + ".bc" for d in deps if d.endswith(".a")]
|
||||
objs.extend(deps)
|
||||
out_lines.append('%s.bc: %s' % (target, ' '.join(objs)))
|
||||
out_lines.append('\t$(AM_V_LLVM_LD)$(LLVM_LINK) -o $@ $^')
|
||||
out_lines.append('')
|
||||
out_lines.append("%s.bc: %s" % (target, " ".join(objs)))
|
||||
out_lines.append("\t$(AM_V_LLVM_LD)$(LLVM_LINK) -o $@ $^")
|
||||
out_lines.append("")
|
||||
|
||||
out_lines.append('# }clippy')
|
||||
out_lines.append('')
|
||||
out_lines.append("# }clippy")
|
||||
out_lines.append("")
|
||||
|
||||
after = '\n'.join(out_lines)
|
||||
after = "\n".join(out_lines)
|
||||
if after == before:
|
||||
sys.exit(0)
|
||||
|
||||
with open('Makefile.pyout', 'w') as fd:
|
||||
with open("Makefile.pyout", "w") as fd:
|
||||
fd.write(after)
|
||||
os.rename('Makefile.pyout', 'Makefile')
|
||||
os.rename("Makefile.pyout", "Makefile")
|
||||
|
@ -6,10 +6,12 @@ import os
|
||||
import subprocess
|
||||
import re
|
||||
|
||||
|
||||
class MakeVarsBase(object):
|
||||
'''
|
||||
"""
|
||||
common code between MakeVars and MakeReVars
|
||||
'''
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._data = dict()
|
||||
|
||||
@ -18,31 +20,35 @@ class MakeVarsBase(object):
|
||||
self.getvars([k])
|
||||
return self._data[k]
|
||||
|
||||
def get(self, k, defval = None):
|
||||
def get(self, k, defval=None):
|
||||
if k not in self._data:
|
||||
self.getvars([k])
|
||||
return self._data.get(k) or defval
|
||||
|
||||
|
||||
class MakeVars(MakeVarsBase):
|
||||
'''
|
||||
"""
|
||||
makevars['FOO_CFLAGS'] gets you "FOO_CFLAGS" from Makefile
|
||||
|
||||
This variant works by invoking make as a subprocess, i.e. Makefile must
|
||||
be valid and working. (This is sometimes a problem if depfiles have not
|
||||
been generated.)
|
||||
'''
|
||||
"""
|
||||
|
||||
def getvars(self, varlist):
|
||||
'''
|
||||
"""
|
||||
get a batch list of variables from make. faster than individual calls.
|
||||
'''
|
||||
"""
|
||||
rdfd, wrfd = os.pipe()
|
||||
|
||||
shvars = ['shvar-%s' % s for s in varlist]
|
||||
make = subprocess.Popen(['make', '-s', 'VARFD=%d' % wrfd] + shvars, pass_fds = [wrfd])
|
||||
shvars = ["shvar-%s" % s for s in varlist]
|
||||
make = subprocess.Popen(
|
||||
["make", "-s", "VARFD=%d" % wrfd] + shvars, pass_fds=[wrfd]
|
||||
)
|
||||
os.close(wrfd)
|
||||
data = b''
|
||||
data = b""
|
||||
|
||||
rdf = os.fdopen(rdfd, 'rb')
|
||||
rdf = os.fdopen(rdfd, "rb")
|
||||
while True:
|
||||
rdata = rdf.read()
|
||||
if len(rdata) == 0:
|
||||
@ -52,30 +58,34 @@ class MakeVars(MakeVarsBase):
|
||||
del rdf
|
||||
make.wait()
|
||||
|
||||
data = data.decode('US-ASCII').strip().split('\n')
|
||||
data = data.decode("US-ASCII").strip().split("\n")
|
||||
for row in data:
|
||||
k, v = row.split('=', 1)
|
||||
k, v = row.split("=", 1)
|
||||
v = v[1:-1]
|
||||
self._data[k] = v
|
||||
|
||||
|
||||
class MakeReVars(MakeVarsBase):
|
||||
'''
|
||||
"""
|
||||
makevars['FOO_CFLAGS'] gets you "FOO_CFLAGS" from Makefile
|
||||
|
||||
This variant works by regexing through Makefile. This means the Makefile
|
||||
does not need to be fully working, but on the other hand it doesn't support
|
||||
fancy complicated make expressions.
|
||||
'''
|
||||
var_re = re.compile(r'^([^=#\n\s]+)[ \t]*=[ \t]*([^#\n]*)(?:#.*)?$', flags=re.MULTILINE)
|
||||
repl_re = re.compile(r'\$(?:([A-Za-z])|\(([^\)]+)\))')
|
||||
"""
|
||||
|
||||
var_re = re.compile(
|
||||
r"^([^=#\n\s]+)[ \t]*=[ \t]*([^#\n]*)(?:#.*)?$", flags=re.MULTILINE
|
||||
)
|
||||
repl_re = re.compile(r"\$(?:([A-Za-z])|\(([^\)]+)\))")
|
||||
|
||||
def __init__(self, maketext):
|
||||
super(MakeReVars, self).__init__()
|
||||
self._vars = dict(self.var_re.findall(maketext.replace('\\\n', '')))
|
||||
self._vars = dict(self.var_re.findall(maketext.replace("\\\n", "")))
|
||||
|
||||
def replacevar(self, match):
|
||||
varname = match.group(1) or match.group(2)
|
||||
return self._vars.get(varname, '')
|
||||
return self._vars.get(varname, "")
|
||||
|
||||
def getvars(self, varlist):
|
||||
for varname in varlist:
|
||||
|
@ -1,14 +1,16 @@
|
||||
import frrtest
|
||||
import re
|
||||
|
||||
re_okfail = re.compile(r'^(?:\x1b\[3[12]m)?(?P<ret>OK|failed)'.encode('utf8'),
|
||||
re.MULTILINE)
|
||||
re_okfail = re.compile(
|
||||
r"^(?:\x1b\[3[12]m)?(?P<ret>OK|failed)".encode("utf8"), re.MULTILINE
|
||||
)
|
||||
|
||||
|
||||
class TestAspath(frrtest.TestMultiOut):
|
||||
program = './test_aspath'
|
||||
program = "./test_aspath"
|
||||
|
||||
def _parsertest(self, line):
|
||||
if not hasattr(self, 'parserno'):
|
||||
if not hasattr(self, "parserno"):
|
||||
self.parserno = -1
|
||||
self.parserno += 1
|
||||
|
||||
@ -17,13 +19,14 @@ class TestAspath(frrtest.TestMultiOut):
|
||||
self._okfail("empty prepend %s:" % line, okfail=re_okfail)
|
||||
|
||||
def _attrtest(self, line):
|
||||
if not hasattr(self, 'attrno'):
|
||||
if not hasattr(self, "attrno"):
|
||||
self.attrno = -1
|
||||
self.attrno += 1
|
||||
|
||||
self._onesimple("aspath_attr test %d" % self.attrno)
|
||||
self._okfail(line, okfail=re_okfail)
|
||||
|
||||
|
||||
TestAspath.parsertest("seq1")
|
||||
TestAspath.parsertest("seq2")
|
||||
TestAspath.parsertest("seq3")
|
||||
|
@ -1,7 +1,9 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestTable(frrtest.TestMultiOut):
|
||||
program = './test_bgp_table'
|
||||
program = "./test_bgp_table"
|
||||
|
||||
|
||||
for i in range(7):
|
||||
TestTable.onesimple('Checks successfull')
|
||||
TestTable.onesimple("Checks successfull")
|
||||
|
@ -1,7 +1,9 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestCapability(frrtest.TestMultiOut):
|
||||
program = './test_capability'
|
||||
program = "./test_capability"
|
||||
|
||||
|
||||
TestCapability.okfail("MP4: MP IP/Uni")
|
||||
TestCapability.okfail("MPv6: MP IPv6/Uni")
|
||||
@ -43,5 +45,9 @@ TestCapability.okfail("AS4real2: AS4 capability, in series of capabilities")
|
||||
TestCapability.okfail("DynCap: Dynamic Capability Message, IP/Multicast")
|
||||
TestCapability.okfail("DynCapLong: Dynamic Capability Message, IP/Multicast, truncated")
|
||||
TestCapability.okfail("DynCapPadded: Dynamic Capability Message, IP/Multicast, padded")
|
||||
TestCapability.okfail("DynCapMPCpadded: Dynamic Capability Message, IP/Multicast, cap data padded")
|
||||
TestCapability.okfail("DynCapMPCoverflow: Dynamic Capability Message, IP/Multicast, cap data != length")
|
||||
TestCapability.okfail(
|
||||
"DynCapMPCpadded: Dynamic Capability Message, IP/Multicast, cap data padded"
|
||||
)
|
||||
TestCapability.okfail(
|
||||
"DynCapMPCoverflow: Dynamic Capability Message, IP/Multicast, cap data != length"
|
||||
)
|
||||
|
@ -1,9 +1,11 @@
|
||||
import frrtest
|
||||
|
||||
class TestEcommunity(frrtest.TestMultiOut):
|
||||
program = './test_ecommunity'
|
||||
|
||||
TestEcommunity.okfail('ipaddr')
|
||||
TestEcommunity.okfail('ipaddr-so')
|
||||
TestEcommunity.okfail('asn')
|
||||
TestEcommunity.okfail('asn4')
|
||||
class TestEcommunity(frrtest.TestMultiOut):
|
||||
program = "./test_ecommunity"
|
||||
|
||||
|
||||
TestEcommunity.okfail("ipaddr")
|
||||
TestEcommunity.okfail("ipaddr-so")
|
||||
TestEcommunity.okfail("asn")
|
||||
TestEcommunity.okfail("asn4")
|
||||
|
@ -1,7 +1,9 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestMpAttr(frrtest.TestMultiOut):
|
||||
program = './test_mp_attr'
|
||||
program = "./test_mp_attr"
|
||||
|
||||
|
||||
TestMpAttr.okfail("IPv6: IPV6 MP Reach, global nexthop, 1 NLRI")
|
||||
TestMpAttr.okfail("IPv6-2: IPV6 MP Reach, global nexthop, 2 NLRIs")
|
||||
@ -16,13 +18,27 @@ TestMpAttr.okfail("IPv4: IPv4 MP Reach, 2 NLRIs + default")
|
||||
TestMpAttr.okfail("IPv4-nhlen: IPv4 MP Reach, nexthop lenth overflow")
|
||||
TestMpAttr.okfail("IPv4-nlrilen: IPv4 MP Reach, nlri lenth overflow")
|
||||
TestMpAttr.okfail("IPv4-VPNv4: IPv4/VPNv4 MP Reach, RD, Nexthop, 2 NLRIs")
|
||||
TestMpAttr.okfail("IPv4-VPNv4-bogus-plen: IPv4/MPLS-labeled VPN MP Reach, RD, Nexthop, NLRI / bogus p'len")
|
||||
TestMpAttr.okfail("IPv4-VPNv4-plen1-short: IPv4/VPNv4 MP Reach, RD, Nexthop, 2 NLRIs, 1st plen short")
|
||||
TestMpAttr.okfail("IPv4-VPNv4-plen1-long: IPv4/VPNv4 MP Reach, RD, Nexthop, 2 NLRIs, 1st plen long")
|
||||
TestMpAttr.okfail("IPv4-VPNv4-plenn-long: IPv4/VPNv4 MP Reach, RD, Nexthop, 3 NLRIs, last plen long")
|
||||
TestMpAttr.okfail("IPv4-VPNv4-plenn-short: IPv4/VPNv4 MP Reach, RD, Nexthop, 2 NLRIs, last plen short")
|
||||
TestMpAttr.okfail("IPv4-VPNv4-bogus-rd-type: IPv4/VPNv4 MP Reach, RD, NH, 2 NLRI, unknown RD in 1st (log, but parse)")
|
||||
TestMpAttr.okfail("IPv4-VPNv4-0-nlri: IPv4/VPNv4 MP Reach, RD, Nexthop, 3 NLRI, 3rd 0 bogus")
|
||||
TestMpAttr.okfail(
|
||||
"IPv4-VPNv4-bogus-plen: IPv4/MPLS-labeled VPN MP Reach, RD, Nexthop, NLRI / bogus p'len"
|
||||
)
|
||||
TestMpAttr.okfail(
|
||||
"IPv4-VPNv4-plen1-short: IPv4/VPNv4 MP Reach, RD, Nexthop, 2 NLRIs, 1st plen short"
|
||||
)
|
||||
TestMpAttr.okfail(
|
||||
"IPv4-VPNv4-plen1-long: IPv4/VPNv4 MP Reach, RD, Nexthop, 2 NLRIs, 1st plen long"
|
||||
)
|
||||
TestMpAttr.okfail(
|
||||
"IPv4-VPNv4-plenn-long: IPv4/VPNv4 MP Reach, RD, Nexthop, 3 NLRIs, last plen long"
|
||||
)
|
||||
TestMpAttr.okfail(
|
||||
"IPv4-VPNv4-plenn-short: IPv4/VPNv4 MP Reach, RD, Nexthop, 2 NLRIs, last plen short"
|
||||
)
|
||||
TestMpAttr.okfail(
|
||||
"IPv4-VPNv4-bogus-rd-type: IPv4/VPNv4 MP Reach, RD, NH, 2 NLRI, unknown RD in 1st (log, but parse)"
|
||||
)
|
||||
TestMpAttr.okfail(
|
||||
"IPv4-VPNv4-0-nlri: IPv4/VPNv4 MP Reach, RD, Nexthop, 3 NLRI, 3rd 0 bogus"
|
||||
)
|
||||
TestMpAttr.okfail("IPv6-bug: IPv6, global nexthop, 1 default NLRI")
|
||||
TestMpAttr.okfail("IPv6-unreach: IPV6 MP Unreach, 1 NLRI")
|
||||
TestMpAttr.okfail("IPv6-unreach2: IPV6 MP Unreach, 2 NLRIs")
|
||||
|
@ -1,9 +1,10 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestMpath(frrtest.TestMultiOut):
|
||||
program = './test_mpath'
|
||||
program = "./test_mpath"
|
||||
|
||||
|
||||
TestMpath.okfail("bgp maximum-paths config")
|
||||
TestMpath.okfail("bgp_mp_list")
|
||||
TestMpath.okfail("bgp_path_info_mpath_update")
|
||||
|
||||
|
@ -1,196 +1,198 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestFlag(frrtest.TestMultiOut):
|
||||
program = './test_peer_attr'
|
||||
program = "./test_peer_attr"
|
||||
|
||||
|
||||
# List of tests can be generated by executing:
|
||||
# $> ./test_peer_attr 2>&1 | sed -n 's/\\/\\\\/g; s/\S\+ \[test\] \(.\+\)/TestFlag.okfail(\x27\1\x27)/pg'
|
||||
#
|
||||
TestFlag.okfail('peer\\advertisement-interval')
|
||||
TestFlag.okfail('peer\\capability dynamic')
|
||||
TestFlag.okfail('peer\\capability extended-nexthop')
|
||||
#TestFlag.okfail('peer\\capability extended-nexthop')
|
||||
TestFlag.okfail('peer\\description')
|
||||
TestFlag.okfail('peer\\disable-connected-check')
|
||||
TestFlag.okfail('peer\\dont-capability-negotiate')
|
||||
TestFlag.okfail('peer\\enforce-first-as')
|
||||
TestFlag.okfail('peer\\local-as')
|
||||
TestFlag.okfail('peer\\local-as 1 no-prepend')
|
||||
TestFlag.okfail('peer\\local-as 1 no-prepend replace-as')
|
||||
TestFlag.okfail('peer\\override-capability')
|
||||
TestFlag.okfail('peer\\passive')
|
||||
TestFlag.okfail('peer\\password')
|
||||
TestFlag.okfail('peer\\shutdown')
|
||||
TestFlag.okfail('peer\\strict-capability-match')
|
||||
TestFlag.okfail('peer\\timers')
|
||||
TestFlag.okfail('peer\\timers connect')
|
||||
TestFlag.okfail('peer\\update-source')
|
||||
TestFlag.okfail('peer\\update-source')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\addpath')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\addpath')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\addpath')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\addpath')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\allowas-in')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\allowas-in')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\allowas-in')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\allowas-in')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\allowas-in origin')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\allowas-in origin')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\allowas-in origin')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\allowas-in origin')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\as-override')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\as-override')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\as-override')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\as-override')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\attribute-unchanged as-path')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\attribute-unchanged as-path')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\attribute-unchanged as-path')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\attribute-unchanged as-path')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\attribute-unchanged next-hop')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\attribute-unchanged next-hop')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\attribute-unchanged next-hop')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\attribute-unchanged next-hop')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\attribute-unchanged med')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\attribute-unchanged med')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\attribute-unchanged med')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\attribute-unchanged med')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\attribute-unchanged as-path next-hop')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\attribute-unchanged as-path next-hop')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\attribute-unchanged as-path next-hop')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\attribute-unchanged as-path next-hop')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\attribute-unchanged as-path med')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\attribute-unchanged as-path med')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\attribute-unchanged as-path med')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\attribute-unchanged as-path med')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\attribute-unchanged as-path next-hop med')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\attribute-unchanged as-path next-hop med')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\attribute-unchanged as-path next-hop med')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\attribute-unchanged as-path next-hop med')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\capability orf prefix-list send')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\capability orf prefix-list send')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\capability orf prefix-list send')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\capability orf prefix-list send')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\capability orf prefix-list receive')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\capability orf prefix-list receive')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\capability orf prefix-list receive')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\capability orf prefix-list receive')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\capability orf prefix-list both')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\capability orf prefix-list both')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\capability orf prefix-list both')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\capability orf prefix-list both')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\default-originate')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\default-originate')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\default-originate')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\default-originate')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\default-originate route-map')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\default-originate route-map')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\default-originate route-map')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\default-originate route-map')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\distribute-list')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\distribute-list')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\distribute-list')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\distribute-list')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\distribute-list')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\distribute-list')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\distribute-list')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\distribute-list')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\filter-list')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\filter-list')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\filter-list')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\filter-list')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\filter-list')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\filter-list')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\filter-list')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\filter-list')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\maximum-prefix')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\next-hop-self')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\next-hop-self')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\next-hop-self')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\next-hop-self')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\next-hop-self force')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\next-hop-self force')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\next-hop-self force')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\next-hop-self force')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\prefix-list')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\prefix-list')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\prefix-list')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\prefix-list')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\prefix-list')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\prefix-list')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\prefix-list')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\prefix-list')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\remove-private-AS')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\remove-private-AS')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\remove-private-AS')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\remove-private-AS')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\remove-private-AS all')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\remove-private-AS all')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\remove-private-AS all')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\remove-private-AS all')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\remove-private-AS replace-AS')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\remove-private-AS replace-AS')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\remove-private-AS replace-AS')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\remove-private-AS replace-AS')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\remove-private-AS all replace-AS')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\remove-private-AS all replace-AS')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\remove-private-AS all replace-AS')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\remove-private-AS all replace-AS')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\route-map')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\route-map')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\route-map')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\route-map')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\route-map')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\route-map')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\route-map')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\route-map')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\route-reflector-client')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\route-reflector-client')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\route-reflector-client')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\route-reflector-client')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\route-server-client')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\route-server-client')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\route-server-client')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\route-server-client')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\send-community')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\send-community')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\send-community')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\send-community')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\send-community extended')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\send-community extended')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\send-community extended')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\send-community extended')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\send-community large')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\send-community large')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\send-community large')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\send-community large')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\soft-reconfiguration inbound')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\soft-reconfiguration inbound')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\soft-reconfiguration inbound')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\soft-reconfiguration inbound')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\unsuppress-map')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\unsuppress-map')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\unsuppress-map')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\unsuppress-map')
|
||||
TestFlag.okfail('peer\\ipv4-unicast\\weight')
|
||||
TestFlag.okfail('peer\\ipv4-multicast\\weight')
|
||||
TestFlag.okfail('peer\\ipv6-unicast\\weight')
|
||||
TestFlag.okfail('peer\\ipv6-multicast\\weight')
|
||||
TestFlag.okfail("peer\\advertisement-interval")
|
||||
TestFlag.okfail("peer\\capability dynamic")
|
||||
TestFlag.okfail("peer\\capability extended-nexthop")
|
||||
# TestFlag.okfail('peer\\capability extended-nexthop')
|
||||
TestFlag.okfail("peer\\description")
|
||||
TestFlag.okfail("peer\\disable-connected-check")
|
||||
TestFlag.okfail("peer\\dont-capability-negotiate")
|
||||
TestFlag.okfail("peer\\enforce-first-as")
|
||||
TestFlag.okfail("peer\\local-as")
|
||||
TestFlag.okfail("peer\\local-as 1 no-prepend")
|
||||
TestFlag.okfail("peer\\local-as 1 no-prepend replace-as")
|
||||
TestFlag.okfail("peer\\override-capability")
|
||||
TestFlag.okfail("peer\\passive")
|
||||
TestFlag.okfail("peer\\password")
|
||||
TestFlag.okfail("peer\\shutdown")
|
||||
TestFlag.okfail("peer\\strict-capability-match")
|
||||
TestFlag.okfail("peer\\timers")
|
||||
TestFlag.okfail("peer\\timers connect")
|
||||
TestFlag.okfail("peer\\update-source")
|
||||
TestFlag.okfail("peer\\update-source")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\addpath")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\addpath")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\addpath")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\addpath")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\allowas-in")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\allowas-in")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\allowas-in")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\allowas-in")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\allowas-in origin")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\allowas-in origin")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\allowas-in origin")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\allowas-in origin")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\as-override")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\as-override")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\as-override")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\as-override")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\attribute-unchanged as-path")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\attribute-unchanged as-path")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\attribute-unchanged as-path")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\attribute-unchanged as-path")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\attribute-unchanged next-hop")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\attribute-unchanged next-hop")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\attribute-unchanged next-hop")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\attribute-unchanged next-hop")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\attribute-unchanged med")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\attribute-unchanged med")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\attribute-unchanged med")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\attribute-unchanged med")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\attribute-unchanged as-path next-hop")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\attribute-unchanged as-path next-hop")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\attribute-unchanged as-path next-hop")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\attribute-unchanged as-path next-hop")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\attribute-unchanged as-path med")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\attribute-unchanged as-path med")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\attribute-unchanged as-path med")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\attribute-unchanged as-path med")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\attribute-unchanged as-path next-hop med")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\attribute-unchanged as-path next-hop med")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\attribute-unchanged as-path next-hop med")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\attribute-unchanged as-path next-hop med")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\capability orf prefix-list send")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\capability orf prefix-list send")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\capability orf prefix-list send")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\capability orf prefix-list send")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\capability orf prefix-list receive")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\capability orf prefix-list receive")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\capability orf prefix-list receive")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\capability orf prefix-list receive")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\capability orf prefix-list both")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\capability orf prefix-list both")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\capability orf prefix-list both")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\capability orf prefix-list both")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\default-originate")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\default-originate")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\default-originate")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\default-originate")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\default-originate route-map")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\default-originate route-map")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\default-originate route-map")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\default-originate route-map")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\distribute-list")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\distribute-list")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\distribute-list")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\distribute-list")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\distribute-list")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\distribute-list")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\distribute-list")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\distribute-list")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\filter-list")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\filter-list")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\filter-list")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\filter-list")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\filter-list")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\filter-list")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\filter-list")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\filter-list")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\maximum-prefix")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\next-hop-self")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\next-hop-self")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\next-hop-self")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\next-hop-self")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\next-hop-self force")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\next-hop-self force")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\next-hop-self force")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\next-hop-self force")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\prefix-list")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\prefix-list")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\prefix-list")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\prefix-list")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\prefix-list")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\prefix-list")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\prefix-list")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\prefix-list")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\remove-private-AS")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\remove-private-AS")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\remove-private-AS")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\remove-private-AS")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\remove-private-AS all")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\remove-private-AS all")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\remove-private-AS all")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\remove-private-AS all")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\remove-private-AS replace-AS")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\remove-private-AS replace-AS")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\remove-private-AS replace-AS")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\remove-private-AS replace-AS")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\remove-private-AS all replace-AS")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\remove-private-AS all replace-AS")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\remove-private-AS all replace-AS")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\remove-private-AS all replace-AS")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\route-map")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\route-map")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\route-map")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\route-map")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\route-map")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\route-map")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\route-map")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\route-map")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\route-reflector-client")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\route-reflector-client")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\route-reflector-client")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\route-reflector-client")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\route-server-client")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\route-server-client")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\route-server-client")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\route-server-client")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\send-community")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\send-community")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\send-community")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\send-community")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\send-community extended")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\send-community extended")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\send-community extended")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\send-community extended")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\send-community large")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\send-community large")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\send-community large")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\send-community large")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\soft-reconfiguration inbound")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\soft-reconfiguration inbound")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\soft-reconfiguration inbound")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\soft-reconfiguration inbound")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\unsuppress-map")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\unsuppress-map")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\unsuppress-map")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\unsuppress-map")
|
||||
TestFlag.okfail("peer\\ipv4-unicast\\weight")
|
||||
TestFlag.okfail("peer\\ipv4-multicast\\weight")
|
||||
TestFlag.okfail("peer\\ipv6-unicast\\weight")
|
||||
TestFlag.okfail("peer\\ipv6-multicast\\weight")
|
||||
|
@ -29,24 +29,29 @@ import sys
|
||||
PY2 = sys.version_info[0] == 2
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
|
||||
def add_metaclass(metaclass):
|
||||
"""Class decorator for creating a class with a metaclass."""
|
||||
|
||||
def wrapper(cls):
|
||||
orig_vars = cls.__dict__.copy()
|
||||
slots = orig_vars.get('__slots__')
|
||||
slots = orig_vars.get("__slots__")
|
||||
if slots is not None:
|
||||
if isinstance(slots, str):
|
||||
slots = [slots]
|
||||
for slots_var in slots:
|
||||
orig_vars.pop(slots_var)
|
||||
orig_vars.pop('__dict__', None)
|
||||
orig_vars.pop('__weakref__', None)
|
||||
orig_vars.pop("__dict__", None)
|
||||
orig_vars.pop("__weakref__", None)
|
||||
return metaclass(cls.__name__, cls.__bases__, orig_vars)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
if PY3:
|
||||
import builtins
|
||||
exec_ = getattr(builtins,'exec')
|
||||
|
||||
exec_ = getattr(builtins, "exec")
|
||||
|
||||
def reraise(tp, value, tb=None):
|
||||
try:
|
||||
@ -59,7 +64,9 @@ if PY3:
|
||||
value = None
|
||||
tb = None
|
||||
|
||||
|
||||
else:
|
||||
|
||||
def exec_(_code_, _globs_=None, _locs_=None):
|
||||
"""Execute code in a namespace."""
|
||||
if _globs_ is None:
|
||||
@ -72,9 +79,11 @@ else:
|
||||
_locs_ = _globs_
|
||||
exec("""exec _code_ in _globs_, _locs_""")
|
||||
|
||||
exec_("""def reraise(tp, value, tb=None):
|
||||
exec_(
|
||||
"""def reraise(tp, value, tb=None):
|
||||
try:
|
||||
raise tp, value, tb
|
||||
finally:
|
||||
tb = None
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
@ -39,35 +39,41 @@ import frrsix
|
||||
srcbase = os.path.abspath(inspect.getsourcefile(frrsix))
|
||||
for i in range(0, 3):
|
||||
srcbase = os.path.dirname(srcbase)
|
||||
|
||||
|
||||
def binpath(srcpath):
|
||||
return os.path.relpath(os.path.abspath(srcpath), srcbase)
|
||||
|
||||
|
||||
class MultiTestFailure(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class MetaTestMultiOut(type):
|
||||
def __getattr__(cls, name):
|
||||
if name.startswith('_'):
|
||||
if name.startswith("_"):
|
||||
raise AttributeError
|
||||
|
||||
internal_name = '_{}'.format(name)
|
||||
internal_name = "_{}".format(name)
|
||||
if internal_name not in dir(cls):
|
||||
raise AttributeError
|
||||
|
||||
def registrar(*args, **kwargs):
|
||||
cls._add_test(getattr(cls,internal_name), *args, **kwargs)
|
||||
cls._add_test(getattr(cls, internal_name), *args, **kwargs)
|
||||
|
||||
return registrar
|
||||
|
||||
|
||||
@frrsix.add_metaclass(MetaTestMultiOut)
|
||||
class _TestMultiOut(object):
|
||||
def _run_tests(self):
|
||||
if 'tests_run' in dir(self.__class__) and self.tests_run:
|
||||
if "tests_run" in dir(self.__class__) and self.tests_run:
|
||||
return
|
||||
self.__class__.tests_run = True
|
||||
basedir = os.path.dirname(inspect.getsourcefile(type(self)))
|
||||
program = os.path.join(basedir, self.program)
|
||||
proc = subprocess.Popen([binpath(program)], stdout=subprocess.PIPE)
|
||||
self.output,_ = proc.communicate('')
|
||||
self.output, _ = proc.communicate("")
|
||||
self.exitcode = proc.wait()
|
||||
|
||||
self.__class__.testresults = {}
|
||||
@ -85,13 +91,14 @@ class _TestMultiOut(object):
|
||||
|
||||
@classmethod
|
||||
def _add_test(cls, method, *args, **kwargs):
|
||||
if 'tests' not in dir(cls):
|
||||
setattr(cls,'tests',[])
|
||||
if "tests" not in dir(cls):
|
||||
setattr(cls, "tests", [])
|
||||
if method is not cls._exit_cleanly:
|
||||
cls._add_test(cls._exit_cleanly)
|
||||
|
||||
def matchfunction(self):
|
||||
method(self, *args, **kwargs)
|
||||
|
||||
cls.tests.append(matchfunction)
|
||||
|
||||
def testfunction(self):
|
||||
@ -100,17 +107,18 @@ class _TestMultiOut(object):
|
||||
if result is not None:
|
||||
frrsix.reraise(*result)
|
||||
|
||||
testname = re.sub(r'[^A-Za-z0-9]', '_', '%r%r' % (args, kwargs))
|
||||
testname = re.sub(r'__*', '_', testname)
|
||||
testname = testname.strip('_')
|
||||
testname = re.sub(r"[^A-Za-z0-9]", "_", "%r%r" % (args, kwargs))
|
||||
testname = re.sub(r"__*", "_", testname)
|
||||
testname = testname.strip("_")
|
||||
if not testname:
|
||||
testname = method.__name__.strip('_')
|
||||
testname = method.__name__.strip("_")
|
||||
if "test_%s" % testname in dir(cls):
|
||||
index = 2
|
||||
while "test_%s_%d" % (testname,index) in dir(cls):
|
||||
while "test_%s_%d" % (testname, index) in dir(cls):
|
||||
index += 1
|
||||
testname = "%s_%d" % (testname, index)
|
||||
setattr(cls,"test_%s" % testname, testfunction)
|
||||
setattr(cls, "test_%s" % testname, testfunction)
|
||||
|
||||
|
||||
#
|
||||
# This class houses the actual TestMultiOut tests types.
|
||||
@ -127,15 +135,16 @@ class _TestMultiOut(object):
|
||||
# modified according to consumed content.
|
||||
#
|
||||
|
||||
re_okfail = re.compile(r'(?:[3[12]m|^)?(?P<ret>OK|failed)'.encode('utf8'),
|
||||
re.MULTILINE)
|
||||
re_okfail = re.compile(r"(?:[3[12]m|^)?(?P<ret>OK|failed)".encode("utf8"), re.MULTILINE)
|
||||
|
||||
|
||||
class TestMultiOut(_TestMultiOut):
|
||||
def _onesimple(self, line):
|
||||
if type(line) is str:
|
||||
line = line.encode('utf8')
|
||||
line = line.encode("utf8")
|
||||
idx = self.output.find(line)
|
||||
if idx != -1:
|
||||
self.output = self.output[idx+len(line):]
|
||||
self.output = self.output[idx + len(line) :]
|
||||
else:
|
||||
raise MultiTestFailure("%r could not be found" % line)
|
||||
|
||||
@ -144,58 +153,67 @@ class TestMultiOut(_TestMultiOut):
|
||||
|
||||
m = okfail.search(self.output)
|
||||
if m is None:
|
||||
raise MultiTestFailure('OK/fail not found')
|
||||
self.output = self.output[m.end():]
|
||||
raise MultiTestFailure("OK/fail not found")
|
||||
self.output = self.output[m.end() :]
|
||||
|
||||
if m.group("ret") != "OK".encode("utf8"):
|
||||
raise MultiTestFailure("Test output indicates failure")
|
||||
|
||||
if m.group('ret') != 'OK'.encode('utf8'):
|
||||
raise MultiTestFailure('Test output indicates failure')
|
||||
|
||||
#
|
||||
# This class implements a test comparing the output of a program against
|
||||
# an existing reference output
|
||||
#
|
||||
|
||||
|
||||
class TestRefMismatch(Exception):
|
||||
def __init__(self, _test, outtext, reftext):
|
||||
self.outtext = outtext.decode('utf8') if type(outtext) is bytes else outtext
|
||||
self.reftext = reftext.decode('utf8') if type(reftext) is bytes else reftext
|
||||
self.outtext = outtext.decode("utf8") if type(outtext) is bytes else outtext
|
||||
self.reftext = reftext.decode("utf8") if type(reftext) is bytes else reftext
|
||||
|
||||
def __str__(self):
|
||||
rv = 'Expected output and actual output differ:\n'
|
||||
rv += '\n'.join(difflib.unified_diff(self.reftext.splitlines(),
|
||||
rv = "Expected output and actual output differ:\n"
|
||||
rv += "\n".join(
|
||||
difflib.unified_diff(
|
||||
self.reftext.splitlines(),
|
||||
self.outtext.splitlines(),
|
||||
'outtext', 'reftext',
|
||||
lineterm=''))
|
||||
"outtext",
|
||||
"reftext",
|
||||
lineterm="",
|
||||
)
|
||||
)
|
||||
return rv
|
||||
|
||||
|
||||
class TestExitNonzero(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class TestRefOut(object):
|
||||
def test_refout(self):
|
||||
basedir = os.path.dirname(inspect.getsourcefile(type(self)))
|
||||
program = os.path.join(basedir, self.program)
|
||||
|
||||
if getattr(self, 'built_refin', False):
|
||||
refin = binpath(program) + '.in'
|
||||
if getattr(self, "built_refin", False):
|
||||
refin = binpath(program) + ".in"
|
||||
else:
|
||||
refin = program + '.in'
|
||||
if getattr(self, 'built_refout', False):
|
||||
refout = binpath(program) + '.refout'
|
||||
refin = program + ".in"
|
||||
if getattr(self, "built_refout", False):
|
||||
refout = binpath(program) + ".refout"
|
||||
else:
|
||||
refout = program + '.refout'
|
||||
refout = program + ".refout"
|
||||
|
||||
intext = ''
|
||||
intext = ""
|
||||
if os.path.exists(refin):
|
||||
with open(refin, 'rb') as f:
|
||||
with open(refin, "rb") as f:
|
||||
intext = f.read()
|
||||
with open(refout, 'rb') as f:
|
||||
with open(refout, "rb") as f:
|
||||
reftext = f.read()
|
||||
|
||||
proc = subprocess.Popen([binpath(program)],
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE)
|
||||
outtext,_ = proc.communicate(intext)
|
||||
proc = subprocess.Popen(
|
||||
[binpath(program)], stdin=subprocess.PIPE, stdout=subprocess.PIPE
|
||||
)
|
||||
outtext, _ = proc.communicate(intext)
|
||||
if outtext != reftext:
|
||||
raise TestRefMismatch(self, outtext, reftext)
|
||||
if proc.wait() != 0:
|
||||
|
@ -9,18 +9,24 @@ import socket
|
||||
# on musl, ntop compresses a single :0: -> :: which is against RFC
|
||||
##
|
||||
def inet_ntop_broken():
|
||||
addr = '1:2:3:4:0:6:7:8'
|
||||
return socket.inet_ntop(socket.AF_INET6,
|
||||
socket.inet_pton(socket.AF_INET6, addr)) != addr
|
||||
addr = "1:2:3:4:0:6:7:8"
|
||||
return (
|
||||
socket.inet_ntop(socket.AF_INET6, socket.inet_pton(socket.AF_INET6, addr))
|
||||
!= addr
|
||||
)
|
||||
|
||||
|
||||
if platform.uname()[0] == 'SunOS' or inet_ntop_broken():
|
||||
if platform.uname()[0] == "SunOS" or inet_ntop_broken():
|
||||
|
||||
class TestFuzzIsisTLV:
|
||||
@pytest.mark.skipif(True, reason='Test unsupported')
|
||||
@pytest.mark.skipif(True, reason="Test unsupported")
|
||||
def test_exit_cleanly(self):
|
||||
pass
|
||||
|
||||
|
||||
else:
|
||||
|
||||
class TestFuzzIsisTLV(frrtest.TestMultiOut):
|
||||
program = './test_fuzz_isis_tlv'
|
||||
program = "./test_fuzz_isis_tlv"
|
||||
|
||||
TestFuzzIsisTLV.exit_cleanly()
|
||||
|
@ -1,6 +1,8 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestIsisLSPDB(frrtest.TestMultiOut):
|
||||
program = './test_isis_lspdb'
|
||||
program = "./test_isis_lspdb"
|
||||
|
||||
|
||||
TestIsisLSPDB.exit_cleanly()
|
||||
|
@ -1,4 +1,5 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestIsisSPF(frrtest.TestRefOut):
|
||||
program = './test_isis_spf'
|
||||
program = "./test_isis_spf"
|
||||
|
@ -1,6 +1,8 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestIsisVertexQueue(frrtest.TestMultiOut):
|
||||
program = './test_isis_vertex_queue'
|
||||
program = "./test_isis_vertex_queue"
|
||||
|
||||
|
||||
TestIsisVertexQueue.exit_cleanly()
|
||||
|
@ -1,5 +1,6 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestCli(frrtest.TestRefOut):
|
||||
program = './test_cli'
|
||||
program = "./test_cli"
|
||||
built_refout = True
|
||||
|
@ -2,10 +2,12 @@ import frrtest
|
||||
import pytest
|
||||
import os
|
||||
|
||||
class TestCommands(frrtest.TestRefOut):
|
||||
program = './test_commands'
|
||||
|
||||
@pytest.mark.skipif('QUAGGA_TEST_COMMANDS' not in os.environ,
|
||||
reason='QUAGGA_TEST_COMMANDS not set')
|
||||
class TestCommands(frrtest.TestRefOut):
|
||||
program = "./test_commands"
|
||||
|
||||
@pytest.mark.skipif(
|
||||
"QUAGGA_TEST_COMMANDS" not in os.environ, reason="QUAGGA_TEST_COMMANDS not set"
|
||||
)
|
||||
def test_refout(self):
|
||||
return super(TestCommands, self).test_refout()
|
||||
|
@ -1,4 +1,5 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestNbOperData(frrtest.TestRefOut):
|
||||
program = './test_oper_data'
|
||||
program = "./test_oper_data"
|
||||
|
@ -1,6 +1,8 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestAtomlist(frrtest.TestMultiOut):
|
||||
program = './test_atomlist'
|
||||
program = "./test_atomlist"
|
||||
|
||||
|
||||
TestAtomlist.exit_cleanly()
|
||||
|
@ -1,4 +1,5 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestGraph(frrtest.TestRefOut):
|
||||
program = './test_graph'
|
||||
program = "./test_graph"
|
||||
|
@ -1,6 +1,8 @@
|
||||
import frrtest
|
||||
|
||||
class TestIDAlloc(frrtest.TestMultiOut):
|
||||
program = './test_idalloc'
|
||||
|
||||
TestIDAlloc.onesimple('ID Allocator test successful.')
|
||||
class TestIDAlloc(frrtest.TestMultiOut):
|
||||
program = "./test_idalloc"
|
||||
|
||||
|
||||
TestIDAlloc.onesimple("ID Allocator test successful.")
|
||||
|
@ -1,7 +1,9 @@
|
||||
import frrtest
|
||||
|
||||
class TestNexthopIter(frrtest.TestMultiOut):
|
||||
program = './test_nexthop_iter'
|
||||
|
||||
TestNexthopIter.onesimple('Simple test passed.')
|
||||
TestNexthopIter.onesimple('PRNG test passed.')
|
||||
class TestNexthopIter(frrtest.TestMultiOut):
|
||||
program = "./test_nexthop_iter"
|
||||
|
||||
|
||||
TestNexthopIter.onesimple("Simple test passed.")
|
||||
TestNexthopIter.onesimple("PRNG test passed.")
|
||||
|
@ -1,6 +1,8 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestNtop(frrtest.TestMultiOut):
|
||||
program = './test_ntop'
|
||||
program = "./test_ntop"
|
||||
|
||||
|
||||
TestNtop.exit_cleanly()
|
||||
|
@ -1,6 +1,8 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestPrefix2str(frrtest.TestMultiOut):
|
||||
program = './test_prefix2str'
|
||||
program = "./test_prefix2str"
|
||||
|
||||
|
||||
TestPrefix2str.exit_cleanly()
|
||||
|
@ -1,6 +1,8 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestPrintfrr(frrtest.TestMultiOut):
|
||||
program = './test_printfrr'
|
||||
program = "./test_printfrr"
|
||||
|
||||
|
||||
TestPrintfrr.exit_cleanly()
|
||||
|
@ -1,6 +1,8 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestRingbuf(frrtest.TestMultiOut):
|
||||
program = './test_ringbuf'
|
||||
program = "./test_ringbuf"
|
||||
|
||||
|
||||
TestRingbuf.exit_cleanly()
|
||||
|
@ -1,6 +1,8 @@
|
||||
import frrtest
|
||||
|
||||
class TestSrcdestTable(frrtest.TestMultiOut):
|
||||
program = './test_srcdest_table'
|
||||
|
||||
TestSrcdestTable.onesimple('PRNG Test successful.')
|
||||
class TestSrcdestTable(frrtest.TestMultiOut):
|
||||
program = "./test_srcdest_table"
|
||||
|
||||
|
||||
TestSrcdestTable.onesimple("PRNG Test successful.")
|
||||
|
@ -1,4 +1,5 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestStream(frrtest.TestRefOut):
|
||||
program = './test_stream'
|
||||
program = "./test_stream"
|
||||
|
@ -1,10 +1,12 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestTable(frrtest.TestMultiOut):
|
||||
program = './test_table'
|
||||
program = "./test_table"
|
||||
|
||||
|
||||
for i in range(6):
|
||||
TestTable.onesimple('Verifying cmp')
|
||||
TestTable.onesimple("Verifying cmp")
|
||||
for i in range(11):
|
||||
TestTable.onesimple('Verifying successor')
|
||||
TestTable.onesimple('Verified pausing')
|
||||
TestTable.onesimple("Verifying successor")
|
||||
TestTable.onesimple("Verified pausing")
|
||||
|
@ -1,6 +1,8 @@
|
||||
import frrtest
|
||||
|
||||
class TestTimerCorrectness(frrtest.TestMultiOut):
|
||||
program = './test_timer_correctness'
|
||||
|
||||
TestTimerCorrectness.onesimple('Expected output and actual output match.')
|
||||
class TestTimerCorrectness(frrtest.TestMultiOut):
|
||||
program = "./test_timer_correctness"
|
||||
|
||||
|
||||
TestTimerCorrectness.onesimple("Expected output and actual output match.")
|
||||
|
@ -1,4 +1,5 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestTTable(frrtest.TestRefOut):
|
||||
program = './test_ttable'
|
||||
program = "./test_ttable"
|
||||
|
@ -1,19 +1,21 @@
|
||||
import frrtest
|
||||
|
||||
class TestTypelist(frrtest.TestMultiOut):
|
||||
program = './test_typelist'
|
||||
|
||||
TestTypelist.onesimple('LIST end')
|
||||
TestTypelist.onesimple('DLIST end')
|
||||
TestTypelist.onesimple('ATOMLIST end')
|
||||
TestTypelist.onesimple('HEAP end')
|
||||
TestTypelist.onesimple('SORTLIST_UNIQ end')
|
||||
TestTypelist.onesimple('SORTLIST_NONUNIQ end')
|
||||
TestTypelist.onesimple('HASH end')
|
||||
TestTypelist.onesimple('HASH_collisions end')
|
||||
TestTypelist.onesimple('SKIPLIST_UNIQ end')
|
||||
TestTypelist.onesimple('SKIPLIST_NONUNIQ end')
|
||||
TestTypelist.onesimple('RBTREE_UNIQ end')
|
||||
TestTypelist.onesimple('RBTREE_NONUNIQ end')
|
||||
TestTypelist.onesimple('ATOMSORT_UNIQ end')
|
||||
TestTypelist.onesimple('ATOMSORT_NONUNIQ end')
|
||||
class TestTypelist(frrtest.TestMultiOut):
|
||||
program = "./test_typelist"
|
||||
|
||||
|
||||
TestTypelist.onesimple("LIST end")
|
||||
TestTypelist.onesimple("DLIST end")
|
||||
TestTypelist.onesimple("ATOMLIST end")
|
||||
TestTypelist.onesimple("HEAP end")
|
||||
TestTypelist.onesimple("SORTLIST_UNIQ end")
|
||||
TestTypelist.onesimple("SORTLIST_NONUNIQ end")
|
||||
TestTypelist.onesimple("HASH end")
|
||||
TestTypelist.onesimple("HASH_collisions end")
|
||||
TestTypelist.onesimple("SKIPLIST_UNIQ end")
|
||||
TestTypelist.onesimple("SKIPLIST_NONUNIQ end")
|
||||
TestTypelist.onesimple("RBTREE_UNIQ end")
|
||||
TestTypelist.onesimple("RBTREE_NONUNIQ end")
|
||||
TestTypelist.onesimple("ATOMSORT_UNIQ end")
|
||||
TestTypelist.onesimple("ATOMSORT_NONUNIQ end")
|
||||
|
@ -1,6 +1,8 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestVersionCmp(frrtest.TestMultiOut):
|
||||
program = './test_versioncmp'
|
||||
program = "./test_versioncmp"
|
||||
|
||||
|
||||
TestVersionCmp.exit_cleanly()
|
||||
|
@ -1,4 +1,5 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestZlog(frrtest.TestMultiOut):
|
||||
program = './test_zlog'
|
||||
program = "./test_zlog"
|
||||
|
@ -2,10 +2,13 @@ import frrtest
|
||||
import pytest
|
||||
import os
|
||||
|
||||
class TestZMQ(frrtest.TestRefOut):
|
||||
program = './test_zmq'
|
||||
|
||||
@pytest.mark.skipif('S["ZEROMQ_TRUE"]=""\n' not in open('../config.status').readlines(),
|
||||
reason='ZEROMQ not enabled')
|
||||
class TestZMQ(frrtest.TestRefOut):
|
||||
program = "./test_zmq"
|
||||
|
||||
@pytest.mark.skipif(
|
||||
'S["ZEROMQ_TRUE"]=""\n' not in open("../config.status").readlines(),
|
||||
reason="ZEROMQ not enabled",
|
||||
)
|
||||
def test_refout(self):
|
||||
return super(TestZMQ, self).test_refout()
|
||||
|
@ -1,4 +1,5 @@
|
||||
import frrtest
|
||||
|
||||
|
||||
class TestLSDB(frrtest.TestRefOut):
|
||||
program = './test_lsdb'
|
||||
program = "./test_lsdb"
|
||||
|
@ -2,5 +2,5 @@ import pytest
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), 'helpers','python'))
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers", "python"))
|
||||
raise SystemExit(pytest.main(sys.argv[1:]))
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -118,6 +118,7 @@ def teardown_module(_mod):
|
||||
tgen = get_topogen()
|
||||
tgen.stop_topology()
|
||||
|
||||
|
||||
def test_wait_protocols_convergence():
|
||||
"Wait for all protocols to converge"
|
||||
tgen = get_topogen()
|
||||
@ -128,41 +129,40 @@ def test_wait_protocols_convergence():
|
||||
|
||||
def expect_loopback_route(router, iptype, route, proto):
|
||||
"Wait until route is present on RIB for protocol."
|
||||
logger.info('waiting route {} in {}'.format(route, router))
|
||||
logger.info("waiting route {} in {}".format(route, router))
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp,
|
||||
tgen.gears[router],
|
||||
'show {} route json'.format(iptype),
|
||||
{ route: [{ 'protocol': proto }] }
|
||||
"show {} route json".format(iptype),
|
||||
{route: [{"protocol": proto}]},
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=130, wait=1)
|
||||
assertmsg = '"{}" OSPF convergence failure'.format(router)
|
||||
assert result is None, assertmsg
|
||||
|
||||
|
||||
# Wait for R1 <-> R6 convergence.
|
||||
expect_loopback_route('r1', 'ip', '10.254.254.6/32', 'ospf')
|
||||
expect_loopback_route("r1", "ip", "10.254.254.6/32", "ospf")
|
||||
|
||||
# Wait for R6 <-> R1 convergence.
|
||||
expect_loopback_route('r6', 'ip', '10.254.254.1/32', 'ospf')
|
||||
expect_loopback_route("r6", "ip", "10.254.254.1/32", "ospf")
|
||||
|
||||
# Wait for R2 <-> R3 convergence.
|
||||
expect_loopback_route('r2', 'ip', '10.254.254.3/32', 'bgp')
|
||||
expect_loopback_route("r2", "ip", "10.254.254.3/32", "bgp")
|
||||
|
||||
# Wait for R3 <-> R2 convergence.
|
||||
expect_loopback_route('r3', 'ip', '10.254.254.2/32', 'bgp')
|
||||
expect_loopback_route("r3", "ip", "10.254.254.2/32", "bgp")
|
||||
|
||||
# Wait for R3 <-> R4 convergence.
|
||||
expect_loopback_route('r3', 'ipv6', '2001:db8:3::/64', 'isis')
|
||||
expect_loopback_route("r3", "ipv6", "2001:db8:3::/64", "isis")
|
||||
|
||||
# Wait for R4 <-> R3 convergence.
|
||||
expect_loopback_route('r4', 'ipv6', '2001:db8:1::/64', 'isis')
|
||||
expect_loopback_route("r4", "ipv6", "2001:db8:1::/64", "isis")
|
||||
|
||||
# Wait for R4 <-> R5 convergence.
|
||||
expect_loopback_route('r4', 'ipv6', '2001:db8:3::/64', 'ospf6')
|
||||
expect_loopback_route("r4", "ipv6", "2001:db8:3::/64", "ospf6")
|
||||
|
||||
# Wait for R5 <-> R4 convergence.
|
||||
expect_loopback_route('r5', 'ipv6', '2001:db8:2::/64', 'ospf6')
|
||||
expect_loopback_route("r5", "ipv6", "2001:db8:2::/64", "ospf6")
|
||||
|
||||
|
||||
def test_bfd_profile_values():
|
||||
|
@ -103,44 +103,44 @@ def test_wait_bgp_convergence():
|
||||
|
||||
def expect_loopback_route(router, iptype, route, proto):
|
||||
"Wait until route is present on RIB for protocol."
|
||||
logger.info('waiting route {} in {}'.format(route, router))
|
||||
logger.info("waiting route {} in {}".format(route, router))
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp,
|
||||
tgen.gears[router],
|
||||
'show {} route json'.format(iptype),
|
||||
{ route: [{ 'protocol': proto }] }
|
||||
"show {} route json".format(iptype),
|
||||
{route: [{"protocol": proto}]},
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=130, wait=1)
|
||||
assertmsg = '"{}" OSPF convergence failure'.format(router)
|
||||
assert result is None, assertmsg
|
||||
|
||||
# Wait for R1 <-> R2 convergence.
|
||||
expect_loopback_route('r1', 'ip', '10.254.254.2/32', 'bgp')
|
||||
expect_loopback_route("r1", "ip", "10.254.254.2/32", "bgp")
|
||||
# Wait for R1 <-> R3 convergence.
|
||||
expect_loopback_route('r1', 'ip', '10.254.254.3/32', 'bgp')
|
||||
expect_loopback_route("r1", "ip", "10.254.254.3/32", "bgp")
|
||||
# Wait for R1 <-> R4 convergence.
|
||||
expect_loopback_route('r1', 'ip', '10.254.254.4/32', 'bgp')
|
||||
expect_loopback_route("r1", "ip", "10.254.254.4/32", "bgp")
|
||||
|
||||
# Wait for R2 <-> R1 convergence.
|
||||
expect_loopback_route('r2', 'ip', '10.254.254.1/32', 'bgp')
|
||||
expect_loopback_route("r2", "ip", "10.254.254.1/32", "bgp")
|
||||
# Wait for R2 <-> R3 convergence.
|
||||
expect_loopback_route('r2', 'ip', '10.254.254.3/32', 'bgp')
|
||||
expect_loopback_route("r2", "ip", "10.254.254.3/32", "bgp")
|
||||
# Wait for R2 <-> R4 convergence.
|
||||
expect_loopback_route('r2', 'ip', '10.254.254.4/32', 'bgp')
|
||||
expect_loopback_route("r2", "ip", "10.254.254.4/32", "bgp")
|
||||
|
||||
# Wait for R3 <-> R1 convergence.
|
||||
expect_loopback_route('r3', 'ip', '10.254.254.1/32', 'bgp')
|
||||
expect_loopback_route("r3", "ip", "10.254.254.1/32", "bgp")
|
||||
# Wait for R3 <-> R2 convergence.
|
||||
expect_loopback_route('r3', 'ip', '10.254.254.2/32', 'bgp')
|
||||
expect_loopback_route("r3", "ip", "10.254.254.2/32", "bgp")
|
||||
# Wait for R3 <-> R4 convergence.
|
||||
expect_loopback_route('r3', 'ip', '10.254.254.4/32', 'bgp')
|
||||
expect_loopback_route("r3", "ip", "10.254.254.4/32", "bgp")
|
||||
|
||||
# Wait for R4 <-> R1 convergence.
|
||||
expect_loopback_route('r4', 'ip', '10.254.254.1/32', 'bgp')
|
||||
expect_loopback_route("r4", "ip", "10.254.254.1/32", "bgp")
|
||||
# Wait for R4 <-> R2 convergence.
|
||||
expect_loopback_route('r4', 'ip', '10.254.254.2/32', 'bgp')
|
||||
expect_loopback_route("r4", "ip", "10.254.254.2/32", "bgp")
|
||||
# Wait for R4 <-> R3 convergence.
|
||||
expect_loopback_route('r4', 'ip', '10.254.254.3/32', 'bgp')
|
||||
expect_loopback_route("r4", "ip", "10.254.254.3/32", "bgp")
|
||||
|
||||
|
||||
def test_wait_bfd_convergence():
|
||||
@ -153,22 +153,22 @@ def test_wait_bfd_convergence():
|
||||
|
||||
def expect_bfd_configuration(router):
|
||||
"Load JSON file and compare with 'show bfd peer json'"
|
||||
logger.info('waiting BFD configuration on router {}'.format(router))
|
||||
bfd_config = json.loads(open('{}/{}/bfd-peers.json'.format(CWD, router)).read())
|
||||
logger.info("waiting BFD configuration on router {}".format(router))
|
||||
bfd_config = json.loads(open("{}/{}/bfd-peers.json".format(CWD, router)).read())
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp,
|
||||
tgen.gears[router],
|
||||
'show bfd peers json',
|
||||
bfd_config
|
||||
"show bfd peers json",
|
||||
bfd_config,
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=130, wait=1)
|
||||
assertmsg = '"{}" BFD configuration failure'.format(router)
|
||||
assert result is None, assertmsg
|
||||
|
||||
expect_bfd_configuration('r1')
|
||||
expect_bfd_configuration('r2')
|
||||
expect_bfd_configuration('r3')
|
||||
expect_bfd_configuration('r4')
|
||||
expect_bfd_configuration("r1")
|
||||
expect_bfd_configuration("r2")
|
||||
expect_bfd_configuration("r3")
|
||||
expect_bfd_configuration("r4")
|
||||
|
||||
|
||||
def teardown_module(_mod):
|
||||
|
@ -76,7 +76,7 @@ from lib.common_config import (
|
||||
create_prefix_lists,
|
||||
create_route_maps,
|
||||
verify_bgp_community,
|
||||
required_linux_kernel_version
|
||||
required_linux_kernel_version,
|
||||
)
|
||||
from lib.topolog import logger
|
||||
from lib.bgp import (
|
||||
@ -139,7 +139,7 @@ def setup_module(mod):
|
||||
"""
|
||||
|
||||
# Required linux kernel version for this suite to run.
|
||||
result = required_linux_kernel_version('4.15')
|
||||
result = required_linux_kernel_version("4.15")
|
||||
if result is not True:
|
||||
pytest.skip("Kernel requirements are not met")
|
||||
|
||||
@ -567,7 +567,7 @@ def test_BGP_attributes_with_vrf_default_keyword_p0(request):
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
#reset_config_on_routers(tgen)
|
||||
# reset_config_on_routers(tgen)
|
||||
|
||||
step("Configure static routes and redistribute in BGP on R3")
|
||||
for addr_type in ADDR_TYPES:
|
||||
|
@ -61,7 +61,7 @@ from lib.common_config import (
|
||||
check_address_types,
|
||||
interface_status,
|
||||
reset_config_on_routers,
|
||||
required_linux_kernel_version
|
||||
required_linux_kernel_version,
|
||||
)
|
||||
from lib.topolog import logger
|
||||
from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp
|
||||
@ -110,7 +110,7 @@ def setup_module(mod):
|
||||
global ADDR_TYPES
|
||||
|
||||
# Required linux kernel version for this suite to run.
|
||||
result = required_linux_kernel_version('4.15')
|
||||
result = required_linux_kernel_version("4.15")
|
||||
if result is not True:
|
||||
pytest.skip("Kernel requirements are not met")
|
||||
|
||||
@ -144,9 +144,7 @@ def setup_module(mod):
|
||||
)
|
||||
|
||||
link_data = [
|
||||
val
|
||||
for links, val in topo["routers"]["r2"]["links"].items()
|
||||
if "r3" in links
|
||||
val for links, val in topo["routers"]["r2"]["links"].items() if "r3" in links
|
||||
]
|
||||
for adt in ADDR_TYPES:
|
||||
NEXT_HOPS[adt] = [val[adt].split("/")[0] for val in link_data]
|
||||
@ -161,9 +159,7 @@ def setup_module(mod):
|
||||
INTF_LIST_R2 = sorted(INTF_LIST_R2, key=lambda x: int(x.split("eth")[1]))
|
||||
|
||||
link_data = [
|
||||
val
|
||||
for links, val in topo["routers"]["r3"]["links"].items()
|
||||
if "r2" in links
|
||||
val for links, val in topo["routers"]["r3"]["links"].items() if "r2" in links
|
||||
]
|
||||
INTF_LIST_R3 = [val["interface"].split("/")[0] for val in link_data]
|
||||
INTF_LIST_R3 = sorted(INTF_LIST_R3, key=lambda x: int(x.split("eth")[1]))
|
||||
|
@ -61,7 +61,7 @@ from lib.common_config import (
|
||||
check_address_types,
|
||||
interface_status,
|
||||
reset_config_on_routers,
|
||||
required_linux_kernel_version
|
||||
required_linux_kernel_version,
|
||||
)
|
||||
from lib.topolog import logger
|
||||
from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp
|
||||
@ -110,7 +110,7 @@ def setup_module(mod):
|
||||
global ADDR_TYPES
|
||||
|
||||
# Required linux kernel version for this suite to run.
|
||||
result = required_linux_kernel_version('4.15')
|
||||
result = required_linux_kernel_version("4.15")
|
||||
if result is not True:
|
||||
pytest.skip("Kernel requirements are not met")
|
||||
|
||||
@ -145,9 +145,7 @@ def setup_module(mod):
|
||||
)
|
||||
|
||||
link_data = [
|
||||
val
|
||||
for links, val in topo["routers"]["r2"]["links"].items()
|
||||
if "r3" in links
|
||||
val for links, val in topo["routers"]["r2"]["links"].items() if "r3" in links
|
||||
]
|
||||
for adt in ADDR_TYPES:
|
||||
NEXT_HOPS[adt] = [val[adt].split("/")[0] for val in link_data]
|
||||
@ -162,9 +160,7 @@ def setup_module(mod):
|
||||
INTF_LIST_R2 = sorted(INTF_LIST_R2, key=lambda x: int(x.split("eth")[1]))
|
||||
|
||||
link_data = [
|
||||
val
|
||||
for links, val in topo["routers"]["r3"]["links"].items()
|
||||
if "r2" in links
|
||||
val for links, val in topo["routers"]["r3"]["links"].items() if "r2" in links
|
||||
]
|
||||
INTF_LIST_R3 = [val["interface"].split("/")[0] for val in link_data]
|
||||
INTF_LIST_R3 = sorted(INTF_LIST_R3, key=lambda x: int(x.split("eth")[1]))
|
||||
|
@ -57,13 +57,13 @@ from mininet.topo import Topo
|
||||
|
||||
|
||||
class NetworkTopo(Topo):
|
||||
'''
|
||||
"""
|
||||
EVPN Multihoming Topology -
|
||||
1. Two level CLOS
|
||||
2. Two spine switches - spine1, spine2
|
||||
3. Two racks with Top-of-Rack switches per rack - tormx1, tormx2
|
||||
4. Two dual attached hosts per-rack - hostdx1, hostdx2
|
||||
'''
|
||||
"""
|
||||
|
||||
def build(self, **_opts):
|
||||
"Build function"
|
||||
@ -84,7 +84,6 @@ class NetworkTopo(Topo):
|
||||
# On main router
|
||||
# First switch is for a dummy interface (for local network)
|
||||
|
||||
|
||||
##################### spine1 ########################
|
||||
# spine1-eth0 is connected to torm11-eth0
|
||||
switch = tgen.add_switch("sw1")
|
||||
@ -178,38 +177,44 @@ class NetworkTopo(Topo):
|
||||
##
|
||||
#####################################################
|
||||
|
||||
tor_ips = {"torm11" : "192.168.100.15", \
|
||||
"torm12" : "192.168.100.16", \
|
||||
"torm21" : "192.168.100.17", \
|
||||
"torm22" : "192.168.100.18"}
|
||||
tor_ips = {
|
||||
"torm11": "192.168.100.15",
|
||||
"torm12": "192.168.100.16",
|
||||
"torm21": "192.168.100.17",
|
||||
"torm22": "192.168.100.18",
|
||||
}
|
||||
|
||||
svi_ips = {"torm11" : "45.0.0.2", \
|
||||
"torm12" : "45.0.0.3", \
|
||||
"torm21" : "45.0.0.4", \
|
||||
"torm22" : "45.0.0.5"}
|
||||
svi_ips = {
|
||||
"torm11": "45.0.0.2",
|
||||
"torm12": "45.0.0.3",
|
||||
"torm21": "45.0.0.4",
|
||||
"torm22": "45.0.0.5",
|
||||
}
|
||||
|
||||
tor_ips_rack_1 = {"torm11" : "192.168.100.15", \
|
||||
"torm12" : "192.168.100.16"}
|
||||
tor_ips_rack_1 = {"torm11": "192.168.100.15", "torm12": "192.168.100.16"}
|
||||
|
||||
tor_ips_rack_2 = {"torm21" : "192.168.100.17", \
|
||||
"torm22" : "192.168.100.18"}
|
||||
tor_ips_rack_2 = {"torm21": "192.168.100.17", "torm22": "192.168.100.18"}
|
||||
|
||||
host_es_map = {
|
||||
"hostd11": "03:44:38:39:ff:ff:01:00:00:01",
|
||||
"hostd12": "03:44:38:39:ff:ff:01:00:00:02",
|
||||
"hostd21": "03:44:38:39:ff:ff:02:00:00:01",
|
||||
"hostd22": "03:44:38:39:ff:ff:02:00:00:02",
|
||||
}
|
||||
|
||||
host_es_map = {"hostd11" : "03:44:38:39:ff:ff:01:00:00:01",
|
||||
"hostd12" : "03:44:38:39:ff:ff:01:00:00:02",
|
||||
"hostd21" : "03:44:38:39:ff:ff:02:00:00:01",
|
||||
"hostd22" : "03:44:38:39:ff:ff:02:00:00:02"}
|
||||
|
||||
def config_bond(node, bond_name, bond_members, bond_ad_sys_mac, br):
|
||||
'''
|
||||
"""
|
||||
Used to setup bonds on the TORs and hosts for MH
|
||||
'''
|
||||
"""
|
||||
node.run("ip link add dev %s type bond mode 802.3ad" % bond_name)
|
||||
node.run("ip link set dev %s type bond lacp_rate 1" % bond_name)
|
||||
node.run("ip link set dev %s type bond miimon 100" % bond_name)
|
||||
node.run("ip link set dev %s type bond xmit_hash_policy layer3+4" % bond_name)
|
||||
node.run("ip link set dev %s type bond min_links 1" % bond_name)
|
||||
node.run("ip link set dev %s type bond ad_actor_system %s" %\
|
||||
(bond_name, bond_ad_sys_mac))
|
||||
node.run(
|
||||
"ip link set dev %s type bond ad_actor_system %s" % (bond_name, bond_ad_sys_mac)
|
||||
)
|
||||
|
||||
for bond_member in bond_members:
|
||||
node.run("ip link set dev %s down" % bond_member)
|
||||
@ -225,15 +230,14 @@ def config_bond(node, bond_name, bond_members, bond_ad_sys_mac, br):
|
||||
node.run("/sbin/bridge vlan del vid 1 dev %s" % bond_name)
|
||||
node.run("/sbin/bridge vlan del vid 1 untagged pvid dev %s" % bond_name)
|
||||
node.run("/sbin/bridge vlan add vid 1000 dev %s" % bond_name)
|
||||
node.run("/sbin/bridge vlan add vid 1000 untagged pvid dev %s"\
|
||||
% bond_name)
|
||||
node.run("/sbin/bridge vlan add vid 1000 untagged pvid dev %s" % bond_name)
|
||||
|
||||
|
||||
def config_mcast_tunnel_termination_device(node):
|
||||
'''
|
||||
"""
|
||||
The kernel requires a device to terminate VxLAN multicast tunnels
|
||||
when EVPN-PIM is used for flooded traffic
|
||||
'''
|
||||
"""
|
||||
node.run("ip link add dev ipmr-lo type dummy")
|
||||
node.run("ip link set dev ipmr-lo mtu 16000")
|
||||
node.run("ip link set dev ipmr-lo mode dormant")
|
||||
@ -241,9 +245,9 @@ def config_mcast_tunnel_termination_device(node):
|
||||
|
||||
|
||||
def config_bridge(node):
|
||||
'''
|
||||
"""
|
||||
Create a VLAN aware bridge
|
||||
'''
|
||||
"""
|
||||
node.run("ip link add dev bridge type bridge stp_state 0")
|
||||
node.run("ip link set dev bridge type bridge vlan_filtering 1")
|
||||
node.run("ip link set dev bridge mtu 9216")
|
||||
@ -255,10 +259,10 @@ def config_bridge(node):
|
||||
|
||||
|
||||
def config_vxlan(node, node_ip):
|
||||
'''
|
||||
"""
|
||||
Create a VxLAN device for VNI 1000 and add it to the bridge.
|
||||
VLAN-1000 is mapped to VNI-1000.
|
||||
'''
|
||||
"""
|
||||
node.run("ip link add dev vx-1000 type vxlan id 1000 dstport 4789")
|
||||
node.run("ip link set dev vx-1000 type vxlan nolearning")
|
||||
node.run("ip link set dev vx-1000 type vxlan local %s" % node_ip)
|
||||
@ -279,9 +283,9 @@ def config_vxlan(node, node_ip):
|
||||
|
||||
|
||||
def config_svi(node, svi_pip):
|
||||
'''
|
||||
"""
|
||||
Create an SVI for VLAN 1000
|
||||
'''
|
||||
"""
|
||||
node.run("ip link add link bridge name vlan1000 type vlan id 1000 protocol 802.1q")
|
||||
node.run("ip addr add %s/24 dev vlan1000" % svi_pip)
|
||||
node.run("ip link set dev vlan1000 up")
|
||||
@ -297,9 +301,9 @@ def config_svi(node, svi_pip):
|
||||
|
||||
|
||||
def config_tor(tor_name, tor, tor_ip, svi_pip):
|
||||
'''
|
||||
"""
|
||||
Create the bond/vxlan-bridge on the TOR which acts as VTEP and EPN-PE
|
||||
'''
|
||||
"""
|
||||
# create a device for terminating VxLAN multicast tunnels
|
||||
config_mcast_tunnel_termination_device(tor)
|
||||
|
||||
@ -329,17 +333,19 @@ def config_tors(tgen, tors):
|
||||
tor = tgen.gears[tor_name]
|
||||
config_tor(tor_name, tor, tor_ips.get(tor_name), svi_ips.get(tor_name))
|
||||
|
||||
|
||||
def compute_host_ip_mac(host_name):
|
||||
host_id = host_name.split("hostd")[1]
|
||||
host_ip = "45.0.0."+ host_id + "/24"
|
||||
host_ip = "45.0.0." + host_id + "/24"
|
||||
host_mac = "00:00:00:00:00:" + host_id
|
||||
|
||||
return host_ip, host_mac
|
||||
|
||||
|
||||
def config_host(host_name, host):
|
||||
'''
|
||||
"""
|
||||
Create the dual-attached bond on host nodes for MH
|
||||
'''
|
||||
"""
|
||||
bond_members = []
|
||||
bond_members.append(host_name + "-eth0")
|
||||
bond_members.append(host_name + "-eth1")
|
||||
@ -407,9 +413,9 @@ def teardown_module(_mod):
|
||||
|
||||
|
||||
def check_local_es(esi, vtep_ips, dut_name, down_vteps):
|
||||
'''
|
||||
"""
|
||||
Check if ES peers are setup correctly on local ESs
|
||||
'''
|
||||
"""
|
||||
peer_ips = []
|
||||
if "torm1" in dut_name:
|
||||
tor_ips_rack = tor_ips_rack_1
|
||||
@ -432,9 +438,9 @@ def check_local_es(esi, vtep_ips, dut_name, down_vteps):
|
||||
|
||||
|
||||
def check_remote_es(esi, vtep_ips, dut_name, down_vteps):
|
||||
'''
|
||||
"""
|
||||
Verify list of PEs associated with a remote ES
|
||||
'''
|
||||
"""
|
||||
remote_ips = []
|
||||
|
||||
if "torm1" in dut_name:
|
||||
@ -455,10 +461,11 @@ def check_remote_es(esi, vtep_ips, dut_name, down_vteps):
|
||||
|
||||
return (esi, diff) if diff else None
|
||||
|
||||
|
||||
def check_es(dut):
|
||||
'''
|
||||
"""
|
||||
Verify list of PEs associated all ESs, local and remote
|
||||
'''
|
||||
"""
|
||||
bgp_es = dut.vtysh_cmd("show bgp l2vp evpn es json")
|
||||
bgp_es_json = json.loads(bgp_es)
|
||||
|
||||
@ -490,10 +497,11 @@ def check_es(dut):
|
||||
|
||||
return result if result else None
|
||||
|
||||
|
||||
def check_one_es(dut, esi, down_vteps):
|
||||
'''
|
||||
"""
|
||||
Verify list of PEs associated all ESs, local and remote
|
||||
'''
|
||||
"""
|
||||
bgp_es = dut.vtysh_cmd("show bgp l2vp evpn es %s json" % esi)
|
||||
es = json.loads(bgp_es)
|
||||
|
||||
@ -513,12 +521,13 @@ def check_one_es(dut, esi, down_vteps):
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def test_evpn_es():
|
||||
'''
|
||||
"""
|
||||
Two ES are setup on each rack. This test checks if -
|
||||
1. ES peer has been added to the local ES (via Type-1/EAD route)
|
||||
2. The remote ESs are setup with the right list of PEs (via Type-1)
|
||||
'''
|
||||
"""
|
||||
|
||||
tgen = get_topogen()
|
||||
|
||||
@ -534,11 +543,12 @@ def test_evpn_es():
|
||||
assert result is None, assertmsg
|
||||
# tgen.mininet_cli()
|
||||
|
||||
|
||||
def test_evpn_ead_update():
|
||||
'''
|
||||
"""
|
||||
Flap a host link one the remote rack and check if the EAD updates
|
||||
are sent/processed for the corresponding ESI
|
||||
'''
|
||||
"""
|
||||
tgen = get_topogen()
|
||||
|
||||
if tgen.routers_have_failure():
|
||||
@ -580,10 +590,11 @@ def test_evpn_ead_update():
|
||||
|
||||
# tgen.mininet_cli()
|
||||
|
||||
|
||||
def check_mac(dut, vni, mac, m_type, esi, intf):
|
||||
'''
|
||||
"""
|
||||
checks if mac is present and if desination matches the one provided
|
||||
'''
|
||||
"""
|
||||
|
||||
out = dut.vtysh_cmd("show evpn mac vni %d mac %s json" % (vni, mac))
|
||||
|
||||
@ -597,13 +608,14 @@ def check_mac(dut, vni, mac, m_type, esi, intf):
|
||||
|
||||
return "invalid vni %d mac %s out %s" % (vni, mac, mac_js)
|
||||
|
||||
|
||||
def test_evpn_mac():
|
||||
'''
|
||||
"""
|
||||
1. Add a MAC on hostd11 and check if the MAC is synced between
|
||||
torm11 and torm12. And installed as a local MAC.
|
||||
2. Add a MAC on hostd21 and check if the MAC is installed as a
|
||||
remote MAC on torm11 and torm12
|
||||
'''
|
||||
"""
|
||||
|
||||
tgen = get_topogen()
|
||||
|
||||
@ -646,6 +658,7 @@ def test_evpn_mac():
|
||||
assertmsg = '"{}" remote MAC content incorrect'.format(tor.name)
|
||||
assert result is None, assertmsg
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = ["-s"] + sys.argv[1:]
|
||||
sys.exit(pytest.main(args))
|
||||
|
@ -90,84 +90,36 @@ def test_vrf_route_leak():
|
||||
|
||||
# Test DONNA VRF.
|
||||
expect = {
|
||||
'10.0.0.0/24': [
|
||||
{
|
||||
'protocol': 'connected',
|
||||
}
|
||||
"10.0.0.0/24": [{"protocol": "connected",}],
|
||||
"10.0.1.0/24": [
|
||||
{"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]}
|
||||
],
|
||||
'10.0.1.0/24': [
|
||||
{
|
||||
'protocol': 'bgp',
|
||||
'selected': True,
|
||||
'nexthops': [
|
||||
{
|
||||
'fib': True
|
||||
}
|
||||
]
|
||||
}
|
||||
"10.0.2.0/24": [{"protocol": "connected"}],
|
||||
"10.0.3.0/24": [
|
||||
{"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]}
|
||||
],
|
||||
'10.0.2.0/24': [
|
||||
{
|
||||
'protocol': 'connected'
|
||||
}
|
||||
],
|
||||
'10.0.3.0/24': [
|
||||
{
|
||||
'protocol': 'bgp',
|
||||
'selected': True,
|
||||
'nexthops': [
|
||||
{
|
||||
'fib': True
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r1, 'show ip route vrf DONNA json', expect
|
||||
topotest.router_json_cmp, r1, "show ip route vrf DONNA json", expect
|
||||
)
|
||||
result, diff = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
|
||||
assert result, "BGP VRF DONNA check failed:\n{}".format(diff)
|
||||
|
||||
# Test EVA VRF.
|
||||
expect = {
|
||||
'10.0.0.0/24': [
|
||||
{
|
||||
'protocol': 'bgp',
|
||||
'selected': True,
|
||||
'nexthops': [
|
||||
{
|
||||
'fib': True
|
||||
}
|
||||
]
|
||||
}
|
||||
"10.0.0.0/24": [
|
||||
{"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]}
|
||||
],
|
||||
'10.0.1.0/24': [
|
||||
{
|
||||
'protocol': 'connected',
|
||||
}
|
||||
"10.0.1.0/24": [{"protocol": "connected",}],
|
||||
"10.0.2.0/24": [
|
||||
{"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]}
|
||||
],
|
||||
'10.0.2.0/24': [
|
||||
{
|
||||
'protocol': 'bgp',
|
||||
'selected': True,
|
||||
'nexthops': [
|
||||
{
|
||||
'fib': True
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
'10.0.3.0/24': [
|
||||
{
|
||||
'protocol': 'connected',
|
||||
}
|
||||
]
|
||||
"10.0.3.0/24": [{"protocol": "connected",}],
|
||||
}
|
||||
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r1, 'show ip route vrf EVA json', expect
|
||||
topotest.router_json_cmp, r1, "show ip route vrf EVA json", expect
|
||||
)
|
||||
result, diff = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
|
||||
assert result, "BGP VRF EVA check failed:\n{}".format(diff)
|
||||
|
@ -47,16 +47,17 @@ class BgpAggregateAddressTopo1(Topo):
|
||||
def build(self, *_args, **_opts):
|
||||
tgen = get_topogen(self)
|
||||
|
||||
r1 = tgen.add_router('r1')
|
||||
r2 = tgen.add_router('r2')
|
||||
peer1 = tgen.add_exabgp_peer('peer1', ip='10.0.0.2',
|
||||
defaultRoute='via 10.0.0.1')
|
||||
r1 = tgen.add_router("r1")
|
||||
r2 = tgen.add_router("r2")
|
||||
peer1 = tgen.add_exabgp_peer(
|
||||
"peer1", ip="10.0.0.2", defaultRoute="via 10.0.0.1"
|
||||
)
|
||||
|
||||
switch = tgen.add_switch('s1')
|
||||
switch = tgen.add_switch("s1")
|
||||
switch.add_link(r1)
|
||||
switch.add_link(peer1)
|
||||
|
||||
switch = tgen.add_switch('s2')
|
||||
switch = tgen.add_switch("s2")
|
||||
switch.add_link(r1)
|
||||
switch.add_link(r2)
|
||||
|
||||
@ -65,17 +66,17 @@ def setup_module(mod):
|
||||
tgen = Topogen(BgpAggregateAddressTopo1, mod.__name__)
|
||||
tgen.start_topology()
|
||||
|
||||
router = tgen.gears['r1']
|
||||
router = tgen.gears["r1"]
|
||||
router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, "r1/zebra.conf"))
|
||||
router.load_config(TopoRouter.RD_BGP, os.path.join(CWD, "r1/bgpd.conf"))
|
||||
router.start()
|
||||
|
||||
router = tgen.gears['r2']
|
||||
router = tgen.gears["r2"]
|
||||
router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, "r2/zebra.conf"))
|
||||
router.load_config(TopoRouter.RD_BGP, os.path.join(CWD, "r2/bgpd.conf"))
|
||||
router.start()
|
||||
|
||||
peer = tgen.gears['peer1']
|
||||
peer = tgen.gears["peer1"]
|
||||
peer.start(os.path.join(CWD, "peer1"), os.path.join(CWD, "exabgp.env"))
|
||||
|
||||
|
||||
@ -92,21 +93,22 @@ def test_expect_convergence():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
logger.info("waiting for protocols to converge")
|
||||
|
||||
def expect_loopback_route(router, iptype, route, proto):
|
||||
"Wait until route is present on RIB for protocol."
|
||||
logger.info('waiting route {} in {}'.format(route, router))
|
||||
logger.info("waiting route {} in {}".format(route, router))
|
||||
test_func = functools.partial(
|
||||
topotest.router_json_cmp,
|
||||
tgen.gears[router],
|
||||
'show {} route json'.format(iptype),
|
||||
{ route: [{ 'protocol': proto }] }
|
||||
"show {} route json".format(iptype),
|
||||
{route: [{"protocol": proto}]},
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=130, wait=1)
|
||||
assertmsg = '"{}" BGP convergence failure'.format(router)
|
||||
assert result is None, assertmsg
|
||||
|
||||
expect_loopback_route('r2', 'ip', '10.254.254.1/32', 'bgp')
|
||||
expect_loopback_route('r2', 'ip', '10.254.254.3/32', 'bgp')
|
||||
expect_loopback_route("r2", "ip", "10.254.254.1/32", "bgp")
|
||||
expect_loopback_route("r2", "ip", "10.254.254.3/32", "bgp")
|
||||
|
||||
|
||||
def test_bgp_aggregate_address_matching_med_only():
|
||||
@ -122,19 +124,18 @@ def test_bgp_aggregate_address_matching_med_only():
|
||||
"192.168.0.1/32": [{"protocol": "bgp", "metric": 10}],
|
||||
"192.168.0.2/32": [{"protocol": "bgp", "metric": 10}],
|
||||
"192.168.0.3/32": [{"protocol": "bgp", "metric": 10}],
|
||||
|
||||
# Non matching MED: aggregation must not exist.
|
||||
"192.168.1.0/24": None,
|
||||
"192.168.1.1/32": [{"protocol": "bgp", "metric": 10}],
|
||||
"192.168.1.2/32": [{"protocol": "bgp", "metric": 10}],
|
||||
"192.168.1.3/32": [{"protocol": "bgp", "metric": 20}]
|
||||
"192.168.1.3/32": [{"protocol": "bgp", "metric": 20}],
|
||||
}
|
||||
|
||||
test_func = functools.partial(
|
||||
topotest.router_json_cmp,
|
||||
tgen.gears['r2'],
|
||||
'show ip route json',
|
||||
routes_expected
|
||||
tgen.gears["r2"],
|
||||
"show ip route json",
|
||||
routes_expected,
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
|
||||
assertmsg = '"r2" BGP convergence failure'
|
||||
@ -148,7 +149,8 @@ def test_bgp_aggregate_address_match_and_supress():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
tgen.gears['r1'].vtysh_multicmd("""
|
||||
tgen.gears["r1"].vtysh_multicmd(
|
||||
"""
|
||||
configure terminal
|
||||
router bgp 65000
|
||||
address-family ipv4 unicast
|
||||
@ -156,7 +158,8 @@ no aggregate-address 192.168.0.0/24 matching-MED-only
|
||||
no aggregate-address 192.168.1.0/24 matching-MED-only
|
||||
aggregate-address 192.168.0.0/24 matching-MED-only summary-only
|
||||
aggregate-address 192.168.1.0/24 matching-MED-only summary-only
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
routes_expected = {
|
||||
# All MED matches, aggregation must exist.
|
||||
@ -164,19 +167,18 @@ aggregate-address 192.168.1.0/24 matching-MED-only summary-only
|
||||
"192.168.0.1/32": None,
|
||||
"192.168.0.2/32": None,
|
||||
"192.168.0.3/32": None,
|
||||
|
||||
# Non matching MED: aggregation must not exist.
|
||||
"192.168.1.0/24": None,
|
||||
"192.168.1.1/32": [{"protocol": "bgp", "metric": 10}],
|
||||
"192.168.1.2/32": [{"protocol": "bgp", "metric": 10}],
|
||||
"192.168.1.3/32": [{"protocol": "bgp", "metric": 20}]
|
||||
"192.168.1.3/32": [{"protocol": "bgp", "metric": 20}],
|
||||
}
|
||||
|
||||
test_func = functools.partial(
|
||||
topotest.router_json_cmp,
|
||||
tgen.gears['r2'],
|
||||
'show ip route json',
|
||||
routes_expected
|
||||
tgen.gears["r2"],
|
||||
"show ip route json",
|
||||
routes_expected,
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=120, wait=1)
|
||||
assertmsg = '"r2" BGP convergence failure'
|
||||
|
@ -65,7 +65,7 @@ from lib.common_config import (
|
||||
create_route_maps,
|
||||
check_address_types,
|
||||
step,
|
||||
required_linux_kernel_version
|
||||
required_linux_kernel_version,
|
||||
)
|
||||
from lib.topolog import logger
|
||||
from lib.bgp import (
|
||||
@ -114,7 +114,7 @@ def setup_module(mod):
|
||||
"""
|
||||
|
||||
# Required linux kernel version for this suite to run.
|
||||
result = required_linux_kernel_version('4.15')
|
||||
result = required_linux_kernel_version("4.15")
|
||||
if result is not True:
|
||||
pytest.skip("Kernel requirements are not met")
|
||||
|
||||
|
@ -54,7 +54,7 @@ from lib.common_config import (
|
||||
create_route_maps,
|
||||
create_prefix_lists,
|
||||
create_route_maps,
|
||||
required_linux_kernel_version
|
||||
required_linux_kernel_version,
|
||||
)
|
||||
from lib.topolog import logger
|
||||
from lib.bgp import (
|
||||
@ -104,7 +104,7 @@ def setup_module(mod):
|
||||
"""
|
||||
|
||||
# Required linux kernel version for this suite to run.
|
||||
result = required_linux_kernel_version('4.15')
|
||||
result = required_linux_kernel_version("4.15")
|
||||
if result is not True:
|
||||
pytest.skip("Kernel requirements are not met")
|
||||
|
||||
|
@ -122,27 +122,39 @@ def test_ebgp_requires_policy():
|
||||
|
||||
test_func = functools.partial(_bgp_converge, "r2")
|
||||
success, result = topotest.run_and_expect(test_func, None, count=65, wait=2)
|
||||
assert success is True, 'Failed bgp convergence (r2) in "{}"'.format(tgen.gears["r2"])
|
||||
assert success is True, 'Failed bgp convergence (r2) in "{}"'.format(
|
||||
tgen.gears["r2"]
|
||||
)
|
||||
|
||||
test_func = functools.partial(_bgp_has_routes, "r2")
|
||||
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
|
||||
assert success is True, 'eBGP policy is not working (r2) in "{}"'.format(tgen.gears["r2"])
|
||||
assert success is True, 'eBGP policy is not working (r2) in "{}"'.format(
|
||||
tgen.gears["r2"]
|
||||
)
|
||||
|
||||
test_func = functools.partial(_bgp_converge, "r4")
|
||||
success, result = topotest.run_and_expect(test_func, None, count=65, wait=2)
|
||||
assert success is True, 'Failed bgp convergence (r4) in "{}"'.format(tgen.gears["r4"])
|
||||
assert success is True, 'Failed bgp convergence (r4) in "{}"'.format(
|
||||
tgen.gears["r4"]
|
||||
)
|
||||
|
||||
test_func = functools.partial(_bgp_has_routes, "r4")
|
||||
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
|
||||
assert success is False, 'eBGP policy is not working (r4) in "{}"'.format(tgen.gears["r4"])
|
||||
assert success is False, 'eBGP policy is not working (r4) in "{}"'.format(
|
||||
tgen.gears["r4"]
|
||||
)
|
||||
|
||||
test_func = functools.partial(_bgp_converge, "r6")
|
||||
success, result = topotest.run_and_expect(test_func, None, count=65, wait=2)
|
||||
assert success is True, 'Failed bgp convergence (r6) in "{}"'.format(tgen.gears["r6"])
|
||||
assert success is True, 'Failed bgp convergence (r6) in "{}"'.format(
|
||||
tgen.gears["r6"]
|
||||
)
|
||||
|
||||
test_func = functools.partial(_bgp_has_routes, "r6")
|
||||
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
|
||||
assert success is True, 'eBGP policy is not working (r6) in "{}"'.format(tgen.gears["r6"])
|
||||
assert success is True, 'eBGP policy is not working (r6) in "{}"'.format(
|
||||
tgen.gears["r6"]
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -35,7 +35,7 @@ import platform
|
||||
|
||||
# Save the Current Working Directory to find configuration files.
|
||||
CWD = os.path.dirname(os.path.realpath(__file__))
|
||||
sys.path.append(os.path.join(CWD, '../'))
|
||||
sys.path.append(os.path.join(CWD, "../"))
|
||||
|
||||
# pylint: disable=C0413
|
||||
# Import topogen and topotest helpers
|
||||
@ -47,26 +47,29 @@ from lib.topolog import logger
|
||||
from mininet.topo import Topo
|
||||
|
||||
l3mdev_accept = 0
|
||||
krel = ''
|
||||
krel = ""
|
||||
|
||||
|
||||
class BGPEVPNTopo(Topo):
|
||||
"Test topology builder"
|
||||
|
||||
def build(self, *_args, **_opts):
|
||||
"Build function"
|
||||
tgen = get_topogen(self)
|
||||
|
||||
tgen.add_router('r1')
|
||||
tgen.add_router('r2')
|
||||
tgen.add_router("r1")
|
||||
tgen.add_router("r2")
|
||||
|
||||
switch = tgen.add_switch('s1')
|
||||
switch.add_link(tgen.gears['r1'])
|
||||
switch.add_link(tgen.gears['r2'])
|
||||
switch = tgen.add_switch("s1")
|
||||
switch.add_link(tgen.gears["r1"])
|
||||
switch.add_link(tgen.gears["r2"])
|
||||
|
||||
switch = tgen.add_switch('s2')
|
||||
switch.add_link(tgen.gears['r1'])
|
||||
switch = tgen.add_switch("s2")
|
||||
switch.add_link(tgen.gears["r1"])
|
||||
|
||||
switch = tgen.add_switch("s3")
|
||||
switch.add_link(tgen.gears["r2"])
|
||||
|
||||
switch = tgen.add_switch('s3')
|
||||
switch.add_link(tgen.gears['r2'])
|
||||
|
||||
def setup_module(mod):
|
||||
"Sets up the pytest environment"
|
||||
@ -79,99 +82,109 @@ def setup_module(mod):
|
||||
router_list = tgen.routers()
|
||||
|
||||
krel = platform.release()
|
||||
if topotest.version_cmp(krel, '4.18') < 0:
|
||||
logger.info('BGP EVPN RT5 NETNS tests will not run (have kernel "{}", but it requires 4.18)'.format(krel))
|
||||
return pytest.skip('Skipping BGP EVPN RT5 NETNS Test. Kernel not supported')
|
||||
if topotest.version_cmp(krel, "4.18") < 0:
|
||||
logger.info(
|
||||
'BGP EVPN RT5 NETNS tests will not run (have kernel "{}", but it requires 4.18)'.format(
|
||||
krel
|
||||
)
|
||||
)
|
||||
return pytest.skip("Skipping BGP EVPN RT5 NETNS Test. Kernel not supported")
|
||||
|
||||
l3mdev_accept = 1
|
||||
logger.info('setting net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept))
|
||||
logger.info("setting net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept))
|
||||
|
||||
# create VRF vrf-101 on R1 and R2
|
||||
# create loop101
|
||||
cmds_vrflite = ['sysctl -w net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept),
|
||||
'ip link add {}-vrf-101 type vrf table 101',
|
||||
'ip ru add oif {}-vrf-101 table 101',
|
||||
'ip ru add iif {}-vrf-101 table 101',
|
||||
'ip link set dev {}-vrf-101 up',
|
||||
'sysctl -w net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept),
|
||||
'ip link add loop101 type dummy',
|
||||
'ip link set dev loop101 master {}-vrf-101',
|
||||
'ip link set dev loop101 up']
|
||||
cmds_netns = ['ip netns add {}-vrf-101',
|
||||
'ip link add loop101 type dummy',
|
||||
'ip link set dev loop101 netns {}-vrf-101',
|
||||
'ip netns exec {}-vrf-101 ip link set dev loop101 up']
|
||||
cmds_vrflite = [
|
||||
"sysctl -w net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept),
|
||||
"ip link add {}-vrf-101 type vrf table 101",
|
||||
"ip ru add oif {}-vrf-101 table 101",
|
||||
"ip ru add iif {}-vrf-101 table 101",
|
||||
"ip link set dev {}-vrf-101 up",
|
||||
"sysctl -w net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept),
|
||||
"ip link add loop101 type dummy",
|
||||
"ip link set dev loop101 master {}-vrf-101",
|
||||
"ip link set dev loop101 up",
|
||||
]
|
||||
cmds_netns = [
|
||||
"ip netns add {}-vrf-101",
|
||||
"ip link add loop101 type dummy",
|
||||
"ip link set dev loop101 netns {}-vrf-101",
|
||||
"ip netns exec {}-vrf-101 ip link set dev loop101 up",
|
||||
]
|
||||
|
||||
cmds_r2 = [ # config routing 101
|
||||
'ip link add name bridge-101 up type bridge stp_state 0',
|
||||
'ip link set bridge-101 master {}-vrf-101',
|
||||
'ip link set dev bridge-101 up',
|
||||
'ip link add name vxlan-101 type vxlan id 101 dstport 4789 dev r2-eth0 local 192.168.100.41',
|
||||
'ip link set dev vxlan-101 master bridge-101',
|
||||
'ip link set vxlan-101 up type bridge_slave learning off flood off mcast_flood off']
|
||||
"ip link add name bridge-101 up type bridge stp_state 0",
|
||||
"ip link set bridge-101 master {}-vrf-101",
|
||||
"ip link set dev bridge-101 up",
|
||||
"ip link add name vxlan-101 type vxlan id 101 dstport 4789 dev r2-eth0 local 192.168.100.41",
|
||||
"ip link set dev vxlan-101 master bridge-101",
|
||||
"ip link set vxlan-101 up type bridge_slave learning off flood off mcast_flood off",
|
||||
]
|
||||
|
||||
cmds_r1_netns_method3 = ['ip link add name vxlan-{1} type vxlan id {1} dstport 4789 dev {0}-eth0 local 192.168.100.21',
|
||||
'ip link set dev vxlan-{1} netns {0}-vrf-{1}',
|
||||
'ip netns exec {0}-vrf-{1} ip li set dev lo up',
|
||||
'ip netns exec {0}-vrf-{1} ip link add name bridge-{1} up type bridge stp_state 0',
|
||||
'ip netns exec {0}-vrf-{1} ip link set dev vxlan-{1} master bridge-{1}',
|
||||
'ip netns exec {0}-vrf-{1} ip link set bridge-{1} up',
|
||||
'ip netns exec {0}-vrf-{1} ip link set vxlan-{1} up']
|
||||
cmds_r1_netns_method3 = [
|
||||
"ip link add name vxlan-{1} type vxlan id {1} dstport 4789 dev {0}-eth0 local 192.168.100.21",
|
||||
"ip link set dev vxlan-{1} netns {0}-vrf-{1}",
|
||||
"ip netns exec {0}-vrf-{1} ip li set dev lo up",
|
||||
"ip netns exec {0}-vrf-{1} ip link add name bridge-{1} up type bridge stp_state 0",
|
||||
"ip netns exec {0}-vrf-{1} ip link set dev vxlan-{1} master bridge-{1}",
|
||||
"ip netns exec {0}-vrf-{1} ip link set bridge-{1} up",
|
||||
"ip netns exec {0}-vrf-{1} ip link set vxlan-{1} up",
|
||||
]
|
||||
|
||||
router = tgen.gears['r1']
|
||||
router = tgen.gears["r1"]
|
||||
for cmd in cmds_netns:
|
||||
logger.info('cmd to r1: '+cmd);
|
||||
output = router.run(cmd.format('r1'))
|
||||
logger.info('result: '+output);
|
||||
logger.info("cmd to r1: " + cmd)
|
||||
output = router.run(cmd.format("r1"))
|
||||
logger.info("result: " + output)
|
||||
|
||||
router = tgen.gears['r2']
|
||||
router = tgen.gears["r2"]
|
||||
for cmd in cmds_vrflite:
|
||||
logger.info('cmd to r2: '+cmd.format('r2'));
|
||||
output = router.run(cmd.format('r2'))
|
||||
logger.info('result: '+output);
|
||||
logger.info("cmd to r2: " + cmd.format("r2"))
|
||||
output = router.run(cmd.format("r2"))
|
||||
logger.info("result: " + output)
|
||||
|
||||
for cmd in cmds_r2:
|
||||
logger.info('cmd to r2: '+cmd.format('r2'));
|
||||
output = router.run(cmd.format('r2'))
|
||||
logger.info('result: '+output);
|
||||
logger.info("cmd to r2: " + cmd.format("r2"))
|
||||
output = router.run(cmd.format("r2"))
|
||||
logger.info("result: " + output)
|
||||
|
||||
router = tgen.gears['r1']
|
||||
bridge_id = '101'
|
||||
router = tgen.gears["r1"]
|
||||
bridge_id = "101"
|
||||
for cmd in cmds_r1_netns_method3:
|
||||
logger.info('cmd to r1: '+cmd.format('r1', bridge_id));
|
||||
output = router.run(cmd.format('r1', bridge_id))
|
||||
logger.info('result: '+output);
|
||||
router = tgen.gears['r1']
|
||||
logger.info("cmd to r1: " + cmd.format("r1", bridge_id))
|
||||
output = router.run(cmd.format("r1", bridge_id))
|
||||
logger.info("result: " + output)
|
||||
router = tgen.gears["r1"]
|
||||
|
||||
for rname, router in router_list.items():
|
||||
if rname == 'r1':
|
||||
if rname == "r1":
|
||||
router.load_config(
|
||||
TopoRouter.RD_ZEBRA,
|
||||
os.path.join(CWD, '{}/zebra.conf'.format(rname)),
|
||||
'--vrfwnetns -o vrf0'
|
||||
os.path.join(CWD, "{}/zebra.conf".format(rname)),
|
||||
"--vrfwnetns -o vrf0",
|
||||
)
|
||||
else:
|
||||
router.load_config(
|
||||
TopoRouter.RD_ZEBRA,
|
||||
os.path.join(CWD, '{}/zebra.conf'.format(rname))
|
||||
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
|
||||
)
|
||||
router.load_config(
|
||||
TopoRouter.RD_BGP,
|
||||
os.path.join(CWD, '{}/bgpd.conf'.format(rname))
|
||||
TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
|
||||
)
|
||||
|
||||
# Initialize all routers.
|
||||
tgen.start_router()
|
||||
|
||||
|
||||
def teardown_module(_mod):
|
||||
"Teardown the pytest environment"
|
||||
tgen = get_topogen()
|
||||
cmds_rx_netns = ['ip netns del {}-vrf-101']
|
||||
cmds_rx_netns = ["ip netns del {}-vrf-101"]
|
||||
|
||||
router = tgen.gears['r1']
|
||||
router = tgen.gears["r1"]
|
||||
for cmd in cmds_rx_netns:
|
||||
logger.info('cmd to r1: '+cmd.format('r1'));
|
||||
output = router.run(cmd.format('r1'))
|
||||
logger.info("cmd to r1: " + cmd.format("r1"))
|
||||
output = router.run(cmd.format("r1"))
|
||||
tgen.stop_topology()
|
||||
|
||||
|
||||
@ -183,52 +196,59 @@ def test_protocols_convergence():
|
||||
tgen = get_topogen()
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
topotest.sleep(4, 'waiting 4 seconds for bgp convergence')
|
||||
topotest.sleep(4, "waiting 4 seconds for bgp convergence")
|
||||
# Check IPv4/IPv6 routing tables.
|
||||
output = tgen.gears['r1'].vtysh_cmd('show bgp l2vpn evpn', isjson=False)
|
||||
logger.info('==== result from show bgp l2vpn evpn')
|
||||
output = tgen.gears["r1"].vtysh_cmd("show bgp l2vpn evpn", isjson=False)
|
||||
logger.info("==== result from show bgp l2vpn evpn")
|
||||
logger.info(output)
|
||||
output = tgen.gears['r1'].vtysh_cmd('show bgp l2vpn evpn route detail', isjson=False)
|
||||
logger.info('==== result from show bgp l2vpn evpn route detail')
|
||||
output = tgen.gears["r1"].vtysh_cmd(
|
||||
"show bgp l2vpn evpn route detail", isjson=False
|
||||
)
|
||||
logger.info("==== result from show bgp l2vpn evpn route detail")
|
||||
logger.info(output)
|
||||
output = tgen.gears['r1'].vtysh_cmd('show bgp vrf r1-vrf-101 ipv4', isjson=False)
|
||||
logger.info('==== result from show bgp vrf r1-vrf-101 ipv4')
|
||||
output = tgen.gears["r1"].vtysh_cmd("show bgp vrf r1-vrf-101 ipv4", isjson=False)
|
||||
logger.info("==== result from show bgp vrf r1-vrf-101 ipv4")
|
||||
logger.info(output)
|
||||
output = tgen.gears['r1'].vtysh_cmd('show bgp vrf r1-vrf-101', isjson=False)
|
||||
logger.info('==== result from show bgp vrf r1-vrf-101 ')
|
||||
output = tgen.gears["r1"].vtysh_cmd("show bgp vrf r1-vrf-101", isjson=False)
|
||||
logger.info("==== result from show bgp vrf r1-vrf-101 ")
|
||||
logger.info(output)
|
||||
output = tgen.gears['r1'].vtysh_cmd('show ip route vrf r1-vrf-101', isjson=False)
|
||||
logger.info('==== result from show ip route vrf r1-vrf-101')
|
||||
output = tgen.gears["r1"].vtysh_cmd("show ip route vrf r1-vrf-101", isjson=False)
|
||||
logger.info("==== result from show ip route vrf r1-vrf-101")
|
||||
logger.info(output)
|
||||
output = tgen.gears['r1'].vtysh_cmd('show evpn vni detail', isjson=False)
|
||||
logger.info('==== result from show evpn vni detail')
|
||||
output = tgen.gears["r1"].vtysh_cmd("show evpn vni detail", isjson=False)
|
||||
logger.info("==== result from show evpn vni detail")
|
||||
logger.info(output)
|
||||
output = tgen.gears['r1'].vtysh_cmd('show evpn next-hops vni all', isjson=False)
|
||||
logger.info('==== result from show evpn next-hops vni all')
|
||||
output = tgen.gears["r1"].vtysh_cmd("show evpn next-hops vni all", isjson=False)
|
||||
logger.info("==== result from show evpn next-hops vni all")
|
||||
logger.info(output)
|
||||
output = tgen.gears['r1'].vtysh_cmd('show evpn rmac vni all', isjson=False)
|
||||
logger.info('==== result from show evpn next-hops vni all')
|
||||
output = tgen.gears["r1"].vtysh_cmd("show evpn rmac vni all", isjson=False)
|
||||
logger.info("==== result from show evpn next-hops vni all")
|
||||
logger.info(output)
|
||||
# Check IPv4 and IPv6 connectivity between r1 and r2 ( routing vxlan evpn)
|
||||
pingrouter = tgen.gears['r1']
|
||||
logger.info('Check Ping IPv4 from R1(r1-vrf-101) to R2(r2-vrf-101 = 192.168.101.41)')
|
||||
output = pingrouter.run('ip netns exec r1-vrf-101 ping 192.168.101.41 -f -c 1000')
|
||||
pingrouter = tgen.gears["r1"]
|
||||
logger.info(
|
||||
"Check Ping IPv4 from R1(r1-vrf-101) to R2(r2-vrf-101 = 192.168.101.41)"
|
||||
)
|
||||
output = pingrouter.run("ip netns exec r1-vrf-101 ping 192.168.101.41 -f -c 1000")
|
||||
logger.info(output)
|
||||
if '1000 packets transmitted, 1000 received' not in output:
|
||||
assertmsg = 'expected ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) should be ok'
|
||||
if "1000 packets transmitted, 1000 received" not in output:
|
||||
assertmsg = (
|
||||
"expected ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) should be ok"
|
||||
)
|
||||
assert 0, assertmsg
|
||||
else:
|
||||
logger.info('Check Ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) OK')
|
||||
logger.info("Check Ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) OK")
|
||||
|
||||
|
||||
def test_memory_leak():
|
||||
"Run the memory leak test and report results."
|
||||
tgen = get_topogen()
|
||||
if not tgen.is_memleak_enabled():
|
||||
pytest.skip('Memory leak test/report is disabled')
|
||||
pytest.skip("Memory leak test/report is disabled")
|
||||
|
||||
tgen.report_memory_leaks()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
args = ["-s"] + sys.argv[1:]
|
||||
sys.exit(pytest.main(args))
|
||||
|
@ -188,11 +188,15 @@ def test_bgp_shutdown():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
tgen.net['r1'].cmd('vtysh -c \"conf t\" -c \"router bgp 65000\" -c \"bgp shutdown message ABCDabcd\"')
|
||||
tgen.net["r1"].cmd(
|
||||
'vtysh -c "conf t" -c "router bgp 65000" -c "bgp shutdown message ABCDabcd"'
|
||||
)
|
||||
|
||||
# Check BGP Summary on local and remote routers
|
||||
for rtrNum in [1, 2, 4]:
|
||||
logger.info("Checking BGP Summary after shutdown of R1 BGP on router r{}".format(rtrNum))
|
||||
logger.info(
|
||||
"Checking BGP Summary after shutdown of R1 BGP on router r{}".format(rtrNum)
|
||||
)
|
||||
|
||||
router = tgen.gears["r{}".format(rtrNum)]
|
||||
reffile = os.path.join(CWD, "r{}/bgp_shutdown_summary.json".format(rtrNum))
|
||||
@ -202,7 +206,9 @@ def test_bgp_shutdown():
|
||||
topotest.router_json_cmp, router, "show ip bgp summary json", expected
|
||||
)
|
||||
_, res = topotest.run_and_expect(test_func, None, count=60, wait=2)
|
||||
assertmsg = "BGP sessions on router R{} are in incorrect state (not down as expected?)".format(rtrNum)
|
||||
assertmsg = "BGP sessions on router R{} are in incorrect state (not down as expected?)".format(
|
||||
rtrNum
|
||||
)
|
||||
assert res is None, assertmsg
|
||||
|
||||
|
||||
@ -218,18 +224,21 @@ def test_bgp_shutdown_message():
|
||||
for rtrNum in [2, 4]:
|
||||
logger.info("Checking BGP shutdown received on router r{}".format(rtrNum))
|
||||
|
||||
shut_message = tgen.net['r{}'.format(rtrNum)].cmd(
|
||||
'tail bgpd.log | grep "NOTIFICATION.*Cease/Administratively Shutdown"')
|
||||
shut_message = tgen.net["r{}".format(rtrNum)].cmd(
|
||||
'tail bgpd.log | grep "NOTIFICATION.*Cease/Administratively Shutdown"'
|
||||
)
|
||||
assertmsg = "BGP shutdown message not received on router R{}".format(rtrNum)
|
||||
assert shut_message != '', assertmsg
|
||||
assert shut_message != "", assertmsg
|
||||
|
||||
m = re.search('.*([0-9]+ bytes[ 0-9a-fA-F]+)', shut_message)
|
||||
m = re.search(".*([0-9]+ bytes[ 0-9a-fA-F]+)", shut_message)
|
||||
if m:
|
||||
found = m.group(1)
|
||||
else:
|
||||
found = ''
|
||||
assertmsg = "Incorrect BGP shutdown message received on router R{}".format(rtrNum)
|
||||
assert found == '8 bytes 41 42 43 44 61 62 63 64', assertmsg
|
||||
found = ""
|
||||
assertmsg = "Incorrect BGP shutdown message received on router R{}".format(
|
||||
rtrNum
|
||||
)
|
||||
assert found == "8 bytes 41 42 43 44 61 62 63 64", assertmsg
|
||||
|
||||
# tgen.mininet_cli()
|
||||
|
||||
@ -243,11 +252,15 @@ def test_bgp_no_shutdown():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
tgen.net['r1'].cmd('vtysh -c \"conf t\" -c \"router bgp 65000\" -c \"no bgp shutdown\"')
|
||||
tgen.net["r1"].cmd('vtysh -c "conf t" -c "router bgp 65000" -c "no bgp shutdown"')
|
||||
|
||||
# Check BGP Summary on local and remote routers
|
||||
for rtrNum in [1, 2, 4]:
|
||||
logger.info("Checking BGP Summary after removing bgp shutdown on router r1 on router r{}".format(rtrNum))
|
||||
logger.info(
|
||||
"Checking BGP Summary after removing bgp shutdown on router r1 on router r{}".format(
|
||||
rtrNum
|
||||
)
|
||||
)
|
||||
|
||||
router = tgen.gears["r{}".format(rtrNum)]
|
||||
reffile = os.path.join(CWD, "r{}/bgp_summary.json".format(rtrNum))
|
||||
@ -257,7 +270,9 @@ def test_bgp_no_shutdown():
|
||||
topotest.router_json_cmp, router, "show ip bgp summary json", expected
|
||||
)
|
||||
_, res = topotest.run_and_expect(test_func, None, count=60, wait=2)
|
||||
assertmsg = "BGP sessions on router R{} are in incorrect state (not down as expected?)".format(rtrNum)
|
||||
assertmsg = "BGP sessions on router R{} are in incorrect state (not down as expected?)".format(
|
||||
rtrNum
|
||||
)
|
||||
assert res is None, assertmsg
|
||||
|
||||
|
||||
@ -303,31 +318,43 @@ def test_bgp_metric_config():
|
||||
# set metric +12
|
||||
# !
|
||||
|
||||
tgen.net['r1'].cmd('vtysh -c "conf t" -c "router bgp 65000" '+
|
||||
'-c "address-family ipv4 unicast" '+
|
||||
'-c "neighbor 192.168.0.2 route-map addmetric-in in" '+
|
||||
'-c "neighbor 192.168.0.2 route-map addmetric-out out" '+
|
||||
'-c "neighbor 192.168.101.2 route-map setmetric-in in" '+
|
||||
'-c "neighbor 192.168.101.2 route-map setmetric-out out" ')
|
||||
tgen.net['r1'].cmd('vtysh -c "conf t" '+
|
||||
'-c "ip prefix-list net1 seq 10 permit 192.168.101.0/24" '+
|
||||
'-c "ip prefix-list net2 seq 20 permit 192.168.1.0/24"')
|
||||
tgen.net['r1'].cmd('vtysh -c "conf t" '+
|
||||
'-c "route-map setmetric-in permit 10" '+
|
||||
'-c "match ip address prefix-list net1" '+
|
||||
'-c "set metric 111" '+
|
||||
'-c "route-map setmetric-in permit 20"')
|
||||
tgen.net['r1'].cmd('vtysh -c "conf t" '+
|
||||
'-c "route-map setmetric-out permit 10" '+
|
||||
'-c "match ip address prefix-list net2" '+
|
||||
'-c "set metric 1011" '+
|
||||
'-c "route-map setmetric-out permit 20"')
|
||||
tgen.net['r1'].cmd('vtysh -c "conf t" '+
|
||||
'-c "route-map addmetric-in permit 10" '+
|
||||
'-c "set metric +11"')
|
||||
tgen.net['r1'].cmd('vtysh -c "conf t" '+
|
||||
'-c "route-map addmetric-out permit 10" '+
|
||||
'-c "set metric +12"')
|
||||
tgen.net["r1"].cmd(
|
||||
'vtysh -c "conf t" -c "router bgp 65000" '
|
||||
+ '-c "address-family ipv4 unicast" '
|
||||
+ '-c "neighbor 192.168.0.2 route-map addmetric-in in" '
|
||||
+ '-c "neighbor 192.168.0.2 route-map addmetric-out out" '
|
||||
+ '-c "neighbor 192.168.101.2 route-map setmetric-in in" '
|
||||
+ '-c "neighbor 192.168.101.2 route-map setmetric-out out" '
|
||||
)
|
||||
tgen.net["r1"].cmd(
|
||||
'vtysh -c "conf t" '
|
||||
+ '-c "ip prefix-list net1 seq 10 permit 192.168.101.0/24" '
|
||||
+ '-c "ip prefix-list net2 seq 20 permit 192.168.1.0/24"'
|
||||
)
|
||||
tgen.net["r1"].cmd(
|
||||
'vtysh -c "conf t" '
|
||||
+ '-c "route-map setmetric-in permit 10" '
|
||||
+ '-c "match ip address prefix-list net1" '
|
||||
+ '-c "set metric 111" '
|
||||
+ '-c "route-map setmetric-in permit 20"'
|
||||
)
|
||||
tgen.net["r1"].cmd(
|
||||
'vtysh -c "conf t" '
|
||||
+ '-c "route-map setmetric-out permit 10" '
|
||||
+ '-c "match ip address prefix-list net2" '
|
||||
+ '-c "set metric 1011" '
|
||||
+ '-c "route-map setmetric-out permit 20"'
|
||||
)
|
||||
tgen.net["r1"].cmd(
|
||||
'vtysh -c "conf t" '
|
||||
+ '-c "route-map addmetric-in permit 10" '
|
||||
+ '-c "set metric +11"'
|
||||
)
|
||||
tgen.net["r1"].cmd(
|
||||
'vtysh -c "conf t" '
|
||||
+ '-c "route-map addmetric-out permit 10" '
|
||||
+ '-c "set metric +12"'
|
||||
)
|
||||
|
||||
# # Adding the following configuration to r2:
|
||||
# router bgp 65000
|
||||
@ -360,50 +387,72 @@ def test_bgp_metric_config():
|
||||
# set metric -23
|
||||
# !
|
||||
|
||||
tgen.net['r2'].cmd('vtysh -c "conf t" -c "router bgp 65000" '+
|
||||
'-c "address-family ipv4 unicast" '+
|
||||
'-c "neighbor 192.168.0.1 route-map subtractmetric-in in" '+
|
||||
'-c "neighbor 192.168.0.1 route-map subtractmetric-out out" '+
|
||||
'-c "neighbor 192.168.201.2 route-map setmetric-in in" ' +
|
||||
'-c "neighbor 192.168.201.2 route-map setmetric-out out" ')
|
||||
tgen.net['r2'].cmd('vtysh -c "conf t" '+
|
||||
'-c "ip prefix-list net1 seq 10 permit 192.168.201.0/24" '+
|
||||
'-c "ip prefix-list net2 seq 20 permit 192.168.2.0/24" ')
|
||||
tgen.net['r2'].cmd('vtysh -c "conf t" '+
|
||||
'-c "route-map setmetric-in permit 10" '+
|
||||
'-c "match ip address prefix-list net1" '+
|
||||
'-c "set metric 222" '+
|
||||
'-c "route-map setmetric-in permit 20"')
|
||||
tgen.net['r2'].cmd('vtysh -c "conf t" '+
|
||||
'-c "route-map setmetric-out permit 10" '+
|
||||
'-c "match ip address prefix-list net2" '+
|
||||
'-c "set metric 2022" '+
|
||||
'-c "route-map setmetric-out permit 20"')
|
||||
tgen.net['r2'].cmd('vtysh -c "conf t" '+
|
||||
'-c "route-map subtractmetric-in permit 10" '+
|
||||
'-c "set metric -22"')
|
||||
tgen.net['r2'].cmd('vtysh -c "conf t" '+
|
||||
'-c "route-map subtractmetric-out permit 10" '+
|
||||
'-c "set metric -23"')
|
||||
tgen.net["r2"].cmd(
|
||||
'vtysh -c "conf t" -c "router bgp 65000" '
|
||||
+ '-c "address-family ipv4 unicast" '
|
||||
+ '-c "neighbor 192.168.0.1 route-map subtractmetric-in in" '
|
||||
+ '-c "neighbor 192.168.0.1 route-map subtractmetric-out out" '
|
||||
+ '-c "neighbor 192.168.201.2 route-map setmetric-in in" '
|
||||
+ '-c "neighbor 192.168.201.2 route-map setmetric-out out" '
|
||||
)
|
||||
tgen.net["r2"].cmd(
|
||||
'vtysh -c "conf t" '
|
||||
+ '-c "ip prefix-list net1 seq 10 permit 192.168.201.0/24" '
|
||||
+ '-c "ip prefix-list net2 seq 20 permit 192.168.2.0/24" '
|
||||
)
|
||||
tgen.net["r2"].cmd(
|
||||
'vtysh -c "conf t" '
|
||||
+ '-c "route-map setmetric-in permit 10" '
|
||||
+ '-c "match ip address prefix-list net1" '
|
||||
+ '-c "set metric 222" '
|
||||
+ '-c "route-map setmetric-in permit 20"'
|
||||
)
|
||||
tgen.net["r2"].cmd(
|
||||
'vtysh -c "conf t" '
|
||||
+ '-c "route-map setmetric-out permit 10" '
|
||||
+ '-c "match ip address prefix-list net2" '
|
||||
+ '-c "set metric 2022" '
|
||||
+ '-c "route-map setmetric-out permit 20"'
|
||||
)
|
||||
tgen.net["r2"].cmd(
|
||||
'vtysh -c "conf t" '
|
||||
+ '-c "route-map subtractmetric-in permit 10" '
|
||||
+ '-c "set metric -22"'
|
||||
)
|
||||
tgen.net["r2"].cmd(
|
||||
'vtysh -c "conf t" '
|
||||
+ '-c "route-map subtractmetric-out permit 10" '
|
||||
+ '-c "set metric -23"'
|
||||
)
|
||||
|
||||
# Clear IN the bgp neighbors to make sure the route-maps are applied
|
||||
tgen.net['r1'].cmd('vtysh -c "clear ip bgp 192.168.0.2 in" '+
|
||||
'-c "clear ip bgp 192.168.101.2 in"')
|
||||
tgen.net['r2'].cmd('vtysh -c "clear ip bgp 192.168.0.1 in" '+
|
||||
'-c "clear ip bgp 192.168.201.2 in"')
|
||||
tgen.net["r1"].cmd(
|
||||
'vtysh -c "clear ip bgp 192.168.0.2 in" ' + '-c "clear ip bgp 192.168.101.2 in"'
|
||||
)
|
||||
tgen.net["r2"].cmd(
|
||||
'vtysh -c "clear ip bgp 192.168.0.1 in" ' + '-c "clear ip bgp 192.168.201.2 in"'
|
||||
)
|
||||
|
||||
# tgen.mininet_cli()
|
||||
|
||||
# Checking BGP config - should show the bgp metric settings in the route-maps
|
||||
logger.info("Checking BGP configuration for correct 'set metric' values")
|
||||
|
||||
setmetric111 = tgen.net['r1'].cmd('vtysh -c "show running" | grep "^ set metric 111"').rstrip()
|
||||
assertmsg = "'set metric 111' configuration applied to R1, but not visible in configuration"
|
||||
assert setmetric111 == ' set metric 111', assertmsg
|
||||
setmetric111 = (
|
||||
tgen.net["r1"].cmd('vtysh -c "show running" | grep "^ set metric 111"').rstrip()
|
||||
)
|
||||
assertmsg = (
|
||||
"'set metric 111' configuration applied to R1, but not visible in configuration"
|
||||
)
|
||||
assert setmetric111 == " set metric 111", assertmsg
|
||||
|
||||
setmetric222 = tgen.net['r2'].cmd('vtysh -c "show running" | grep "^ set metric 222"').rstrip()
|
||||
assertmsg = "'set metric 222' configuration applied to R2, but not visible in configuration"
|
||||
assert setmetric222 == ' set metric 222', assertmsg
|
||||
setmetric222 = (
|
||||
tgen.net["r2"].cmd('vtysh -c "show running" | grep "^ set metric 222"').rstrip()
|
||||
)
|
||||
assertmsg = (
|
||||
"'set metric 222' configuration applied to R2, but not visible in configuration"
|
||||
)
|
||||
assert setmetric222 == " set metric 222", assertmsg
|
||||
|
||||
|
||||
def test_bgp_metric_add_config():
|
||||
@ -417,9 +466,13 @@ def test_bgp_metric_add_config():
|
||||
|
||||
logger.info("Checking BGP configuration for correct 'set metric' ADD value")
|
||||
|
||||
setmetricP11 = tgen.net['r1'].cmd('vtysh -c "show running" | grep "^ set metric +11"').rstrip()
|
||||
assertmsg = "'set metric +11' configuration applied to R1, but not visible in configuration"
|
||||
assert setmetricP11 == ' set metric +11', assertmsg
|
||||
setmetricP11 = (
|
||||
tgen.net["r1"].cmd('vtysh -c "show running" | grep "^ set metric +11"').rstrip()
|
||||
)
|
||||
assertmsg = (
|
||||
"'set metric +11' configuration applied to R1, but not visible in configuration"
|
||||
)
|
||||
assert setmetricP11 == " set metric +11", assertmsg
|
||||
|
||||
|
||||
def test_bgp_metric_subtract_config():
|
||||
@ -433,9 +486,13 @@ def test_bgp_metric_subtract_config():
|
||||
|
||||
logger.info("Checking BGP configuration for correct 'set metric' SUBTRACT value")
|
||||
|
||||
setmetricM22 = tgen.net['r2'].cmd('vtysh -c "show running" | grep "^ set metric -22"').rstrip()
|
||||
assertmsg = "'set metric -22' configuration applied to R2, but not visible in configuration"
|
||||
assert setmetricM22 == ' set metric -22', assertmsg
|
||||
setmetricM22 = (
|
||||
tgen.net["r2"].cmd('vtysh -c "show running" | grep "^ set metric -22"').rstrip()
|
||||
)
|
||||
assertmsg = (
|
||||
"'set metric -22' configuration applied to R2, but not visible in configuration"
|
||||
)
|
||||
assert setmetricM22 == " set metric -22", assertmsg
|
||||
|
||||
|
||||
def test_bgp_set_metric():
|
||||
@ -478,47 +535,49 @@ def test_bgp_remove_metric_rmaps():
|
||||
|
||||
# Remove metric route-maps and relevant comfiguration
|
||||
|
||||
tgen.net['r1'].cmd('vtysh -c "conf t" -c "router bgp 65000" '+
|
||||
'-c "address-family ipv4 unicast" '+
|
||||
'-c "no neighbor 192.168.0.2 route-map addmetric-in in" '+
|
||||
'-c "no neighbor 192.168.0.2 route-map addmetric-out out" '+
|
||||
'-c "no neighbor 192.168.101.2 route-map setmetric-in in" '+
|
||||
'-c "no neighbor 192.168.101.2 route-map setmetric-out out" ')
|
||||
tgen.net['r1'].cmd('vtysh -c "conf t" '+
|
||||
'-c "no ip prefix-list net1" '+
|
||||
'-c "no ip prefix-list net2"')
|
||||
tgen.net['r1'].cmd('vtysh -c "conf t" '+
|
||||
'-c "no route-map setmetric-in" ')
|
||||
tgen.net['r1'].cmd('vtysh -c "conf t" '+
|
||||
'-c "no route-map setmetric-out" ')
|
||||
tgen.net['r1'].cmd('vtysh -c "conf t" '+
|
||||
'-c "no route-map addmetric-in" ')
|
||||
tgen.net['r1'].cmd('vtysh -c "conf t" '+
|
||||
'-c "no route-map addmetric-out" ')
|
||||
tgen.net["r1"].cmd(
|
||||
'vtysh -c "conf t" -c "router bgp 65000" '
|
||||
+ '-c "address-family ipv4 unicast" '
|
||||
+ '-c "no neighbor 192.168.0.2 route-map addmetric-in in" '
|
||||
+ '-c "no neighbor 192.168.0.2 route-map addmetric-out out" '
|
||||
+ '-c "no neighbor 192.168.101.2 route-map setmetric-in in" '
|
||||
+ '-c "no neighbor 192.168.101.2 route-map setmetric-out out" '
|
||||
)
|
||||
tgen.net["r1"].cmd(
|
||||
'vtysh -c "conf t" '
|
||||
+ '-c "no ip prefix-list net1" '
|
||||
+ '-c "no ip prefix-list net2"'
|
||||
)
|
||||
tgen.net["r1"].cmd('vtysh -c "conf t" ' + '-c "no route-map setmetric-in" ')
|
||||
tgen.net["r1"].cmd('vtysh -c "conf t" ' + '-c "no route-map setmetric-out" ')
|
||||
tgen.net["r1"].cmd('vtysh -c "conf t" ' + '-c "no route-map addmetric-in" ')
|
||||
tgen.net["r1"].cmd('vtysh -c "conf t" ' + '-c "no route-map addmetric-out" ')
|
||||
|
||||
tgen.net['r2'].cmd('vtysh -c "conf t" -c "router bgp 65000" '+
|
||||
'-c "address-family ipv4 unicast" '+
|
||||
'-c "no neighbor 192.168.0.1 route-map subtractmetric-in in" '+
|
||||
'-c "no neighbor 192.168.0.1 route-map subtractmetric-out out" '+
|
||||
'-c "no neighbor 192.168.201.2 route-map setmetric-in in" ' +
|
||||
'-c "no neighbor 192.168.201.2 route-map setmetric-out out" ')
|
||||
tgen.net['r2'].cmd('vtysh -c "conf t" '+
|
||||
'-c "no ip prefix-list net1" '+
|
||||
'-c "no ip prefix-list net2" ')
|
||||
tgen.net['r2'].cmd('vtysh -c "conf t" '+
|
||||
'-c "no route-map setmetric-in" ')
|
||||
tgen.net['r2'].cmd('vtysh -c "conf t" '+
|
||||
'-c "no route-map setmetric-out" ')
|
||||
tgen.net['r2'].cmd('vtysh -c "conf t" '+
|
||||
'-c "no route-map addmetric-in" ')
|
||||
tgen.net['r2'].cmd('vtysh -c "conf t" '+
|
||||
'-c "no route-map addmetric-out" ')
|
||||
tgen.net["r2"].cmd(
|
||||
'vtysh -c "conf t" -c "router bgp 65000" '
|
||||
+ '-c "address-family ipv4 unicast" '
|
||||
+ '-c "no neighbor 192.168.0.1 route-map subtractmetric-in in" '
|
||||
+ '-c "no neighbor 192.168.0.1 route-map subtractmetric-out out" '
|
||||
+ '-c "no neighbor 192.168.201.2 route-map setmetric-in in" '
|
||||
+ '-c "no neighbor 192.168.201.2 route-map setmetric-out out" '
|
||||
)
|
||||
tgen.net["r2"].cmd(
|
||||
'vtysh -c "conf t" '
|
||||
+ '-c "no ip prefix-list net1" '
|
||||
+ '-c "no ip prefix-list net2" '
|
||||
)
|
||||
tgen.net["r2"].cmd('vtysh -c "conf t" ' + '-c "no route-map setmetric-in" ')
|
||||
tgen.net["r2"].cmd('vtysh -c "conf t" ' + '-c "no route-map setmetric-out" ')
|
||||
tgen.net["r2"].cmd('vtysh -c "conf t" ' + '-c "no route-map addmetric-in" ')
|
||||
tgen.net["r2"].cmd('vtysh -c "conf t" ' + '-c "no route-map addmetric-out" ')
|
||||
|
||||
# Clear IN the bgp neighbors to make sure the route-maps are applied
|
||||
tgen.net['r1'].cmd('vtysh -c "clear ip bgp 192.168.0.2 in" '+
|
||||
'-c "clear ip bgp 192.168.101.2 in"')
|
||||
tgen.net['r2'].cmd('vtysh -c "clear ip bgp 192.168.0.1 in" '+
|
||||
'-c "clear ip bgp 192.168.201.2 in"')
|
||||
tgen.net["r1"].cmd(
|
||||
'vtysh -c "clear ip bgp 192.168.0.2 in" ' + '-c "clear ip bgp 192.168.101.2 in"'
|
||||
)
|
||||
tgen.net["r2"].cmd(
|
||||
'vtysh -c "clear ip bgp 192.168.0.1 in" ' + '-c "clear ip bgp 192.168.201.2 in"'
|
||||
)
|
||||
|
||||
# tgen.mininet_cli()
|
||||
|
||||
@ -534,7 +593,9 @@ def test_bgp_remove_metric_rmaps():
|
||||
topotest.router_json_cmp, router, "show ip bgp json", expected
|
||||
)
|
||||
_, res = topotest.run_and_expect(test_func, None, count=60, wait=2)
|
||||
assertmsg = "BGP routes on router r{} are wrong after removing metric route-maps".format(rtrNum)
|
||||
assertmsg = "BGP routes on router r{} are wrong after removing metric route-maps".format(
|
||||
rtrNum
|
||||
)
|
||||
assert res is None, assertmsg
|
||||
|
||||
|
||||
@ -549,15 +610,17 @@ def test_bgp_norib():
|
||||
|
||||
logger.info("Configuring 'bgp no-rib' on router r1")
|
||||
|
||||
tgen.net['r1'].cmd('vtysh -c \"conf t\" -c \"bgp no-rib\"')
|
||||
tgen.net["r1"].cmd('vtysh -c "conf t" -c "bgp no-rib"')
|
||||
|
||||
# Checking BGP config - should show the "bgp no-rib" under the router bgp section
|
||||
logger.info("Checking BGP configuration for 'bgp no-rib'")
|
||||
|
||||
norib_cfg = tgen.net['r1'].cmd('vtysh -c "show running bgpd" | grep "^bgp no-rib"').rstrip()
|
||||
norib_cfg = (
|
||||
tgen.net["r1"].cmd('vtysh -c "show running bgpd" | grep "^bgp no-rib"').rstrip()
|
||||
)
|
||||
|
||||
assertmsg = "'bgp no-rib' configuration applied, but not visible in configuration"
|
||||
assert norib_cfg == 'bgp no-rib', assertmsg
|
||||
assert norib_cfg == "bgp no-rib", assertmsg
|
||||
|
||||
|
||||
def test_bgp_norib_routes():
|
||||
@ -585,7 +648,11 @@ def test_bgp_norib_routes():
|
||||
|
||||
# Check BGP Summary on local and remote routers
|
||||
for rtrNum in [1, 2, 4]:
|
||||
logger.info("Checking BGP Summary after 'bgp no-rib' on router r1 on router r{}".format(rtrNum))
|
||||
logger.info(
|
||||
"Checking BGP Summary after 'bgp no-rib' on router r1 on router r{}".format(
|
||||
rtrNum
|
||||
)
|
||||
)
|
||||
|
||||
router = tgen.gears["r{}".format(rtrNum)]
|
||||
reffile = os.path.join(CWD, "r{}/bgp_summary.json".format(rtrNum))
|
||||
@ -595,7 +662,9 @@ def test_bgp_norib_routes():
|
||||
topotest.router_json_cmp, router, "show ip bgp summary json", expected
|
||||
)
|
||||
_, res = topotest.run_and_expect(test_func, None, count=30, wait=2)
|
||||
assertmsg = "BGP sessions on router R{} has incorrect routes after adding 'bgp no-rib on r1'".format(rtrNum)
|
||||
assertmsg = "BGP sessions on router R{} has incorrect routes after adding 'bgp no-rib on r1'".format(
|
||||
rtrNum
|
||||
)
|
||||
assert res is None, assertmsg
|
||||
|
||||
# tgen.mininet_cli()
|
||||
@ -612,15 +681,21 @@ def test_bgp_disable_norib():
|
||||
|
||||
logger.info("Configuring 'no bgp no-rib' on router r1")
|
||||
|
||||
tgen.net['r1'].cmd('vtysh -c \"conf t\" -c \"no bgp no-rib\"')
|
||||
tgen.net["r1"].cmd('vtysh -c "conf t" -c "no bgp no-rib"')
|
||||
|
||||
# Checking BGP config - should show the "bgp no-rib" under the router bgp section
|
||||
logger.info("Checking BGP configuration for 'bgp no-rib'")
|
||||
|
||||
norib_cfg = tgen.net['r1'].cmd('vtysh -c "show running bgpd" | grep "^ bgp no-rib"').rstrip()
|
||||
norib_cfg = (
|
||||
tgen.net["r1"]
|
||||
.cmd('vtysh -c "show running bgpd" | grep "^ bgp no-rib"')
|
||||
.rstrip()
|
||||
)
|
||||
|
||||
assertmsg = "'no bgp no-rib'configuration applied, but still visible in configuration"
|
||||
assert norib_cfg == '', assertmsg
|
||||
assertmsg = (
|
||||
"'no bgp no-rib'configuration applied, but still visible in configuration"
|
||||
)
|
||||
assert norib_cfg == "", assertmsg
|
||||
|
||||
|
||||
def test_bgp_disable_norib_routes():
|
||||
@ -648,7 +723,11 @@ def test_bgp_disable_norib_routes():
|
||||
|
||||
# Check BGP Summary on local and remote routers
|
||||
for rtrNum in [1, 2, 4]:
|
||||
logger.info("Checking BGP Summary after removing the 'bgp no-rib' on router r1 on router r{}".format(rtrNum))
|
||||
logger.info(
|
||||
"Checking BGP Summary after removing the 'bgp no-rib' on router r1 on router r{}".format(
|
||||
rtrNum
|
||||
)
|
||||
)
|
||||
|
||||
router = tgen.gears["r{}".format(rtrNum)]
|
||||
reffile = os.path.join(CWD, "r{}/bgp_summary.json".format(rtrNum))
|
||||
@ -658,13 +737,14 @@ def test_bgp_disable_norib_routes():
|
||||
topotest.router_json_cmp, router, "show ip bgp summary json", expected
|
||||
)
|
||||
_, res = topotest.run_and_expect(test_func, None, count=30, wait=2)
|
||||
assertmsg = "BGP sessions on router R{} has incorrect routes after removing 'bgp no-rib on r1'".format(rtrNum)
|
||||
assertmsg = "BGP sessions on router R{} has incorrect routes after removing 'bgp no-rib on r1'".format(
|
||||
rtrNum
|
||||
)
|
||||
assert res is None, assertmsg
|
||||
|
||||
# tgen.mininet_cli()
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = ["-s"] + sys.argv[1:]
|
||||
sys.exit(pytest.main(args))
|
||||
|
@ -200,6 +200,7 @@ def test_bgp_flowspec():
|
||||
else:
|
||||
logger.info("Check BGP FS entry for 3::3 with redirect IP OK")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
args = ["-s"] + sys.argv[1:]
|
||||
|
@ -135,7 +135,7 @@ from lib.common_config import (
|
||||
kill_mininet_routers_process,
|
||||
get_frr_ipv6_linklocal,
|
||||
create_route_maps,
|
||||
required_linux_kernel_version
|
||||
required_linux_kernel_version,
|
||||
)
|
||||
|
||||
# Reading the data from JSON File for topology and configuration creation
|
||||
@ -188,7 +188,7 @@ def setup_module(mod):
|
||||
global ADDR_TYPES
|
||||
|
||||
# Required linux kernel version for this suite to run.
|
||||
result = required_linux_kernel_version('4.15')
|
||||
result = required_linux_kernel_version("4.15")
|
||||
if result is not True:
|
||||
pytest.skip("Kernel requirements are not met")
|
||||
|
||||
|
@ -135,7 +135,7 @@ from lib.common_config import (
|
||||
kill_mininet_routers_process,
|
||||
get_frr_ipv6_linklocal,
|
||||
create_route_maps,
|
||||
required_linux_kernel_version
|
||||
required_linux_kernel_version,
|
||||
)
|
||||
|
||||
# Reading the data from JSON File for topology and configuration creation
|
||||
@ -185,7 +185,7 @@ def setup_module(mod):
|
||||
"""
|
||||
|
||||
# Required linux kernel version for this suite to run.
|
||||
result = required_linux_kernel_version('4.15')
|
||||
result = required_linux_kernel_version("4.15")
|
||||
if result is not True:
|
||||
pytest.skip("Kernel requirements are not met")
|
||||
|
||||
|
@ -99,12 +99,14 @@ class TemplateTopo(Topo):
|
||||
switch.add_link(tgen.gears["r2"])
|
||||
switch.add_link(tgen.gears["r5"])
|
||||
|
||||
|
||||
def _run_cmd_and_check(router, cmd, results_file, retries=100, intvl=0.5):
|
||||
json_file = "{}/{}".format(CWD, results_file)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp, router, cmd, expected)
|
||||
return topotest.run_and_expect(test_func, None, retries, intvl)
|
||||
|
||||
|
||||
def setup_module(mod):
|
||||
tgen = Topogen(TemplateTopo, mod.__name__)
|
||||
tgen.start_topology()
|
||||
@ -134,12 +136,14 @@ def setup_module(mod):
|
||||
tgen.start_router()
|
||||
|
||||
# Basic peering test to see if things are ok
|
||||
_, result = _run_cmd_and_check(r2, 'show ip bgp summary json', 'r2/bgp_sum_1.json')
|
||||
assertmsg = 'R2: Basic sanity test after init failed -- global peerings not up'
|
||||
_, result = _run_cmd_and_check(r2, "show ip bgp summary json", "r2/bgp_sum_1.json")
|
||||
assertmsg = "R2: Basic sanity test after init failed -- global peerings not up"
|
||||
assert result is None, assertmsg
|
||||
|
||||
_, result = _run_cmd_and_check(r2, 'show ip bgp vrf vrf1 summary json', 'r2/bgp_sum_2.json')
|
||||
assertmsg = 'R2: Basic sanity test after init failed -- VRF peerings not up'
|
||||
_, result = _run_cmd_and_check(
|
||||
r2, "show ip bgp vrf vrf1 summary json", "r2/bgp_sum_2.json"
|
||||
)
|
||||
assertmsg = "R2: Basic sanity test after init failed -- VRF peerings not up"
|
||||
assert result is None, assertmsg
|
||||
|
||||
|
||||
@ -160,27 +164,33 @@ def test_bgp_gshut():
|
||||
r4 = tgen.gears["r4"]
|
||||
r5 = tgen.gears["r5"]
|
||||
|
||||
|
||||
# Verify initial route states
|
||||
logger.info('\nVerify initial route states')
|
||||
logger.info("\nVerify initial route states")
|
||||
|
||||
_, result = _run_cmd_and_check(r1, 'show ip bgp 13.1.1.1/32 json', 'r1/bgp_route_1.json')
|
||||
assertmsg = 'R1: Route 13.1.1.1/32 not present or has unexpected params'
|
||||
_, result = _run_cmd_and_check(
|
||||
r1, "show ip bgp 13.1.1.1/32 json", "r1/bgp_route_1.json"
|
||||
)
|
||||
assertmsg = "R1: Route 13.1.1.1/32 not present or has unexpected params"
|
||||
assert result is None, assertmsg
|
||||
|
||||
_, result = _run_cmd_and_check(r3, 'show ip bgp 11.1.1.1/32 json', 'r3/bgp_route_1.json')
|
||||
assertmsg = 'R3: Route 11.1.1.1/32 not present or has unexpected params'
|
||||
_, result = _run_cmd_and_check(
|
||||
r3, "show ip bgp 11.1.1.1/32 json", "r3/bgp_route_1.json"
|
||||
)
|
||||
assertmsg = "R3: Route 11.1.1.1/32 not present or has unexpected params"
|
||||
assert result is None, assertmsg
|
||||
|
||||
_, result = _run_cmd_and_check(r5, 'show ip bgp 14.1.1.1/32 json', 'r5/bgp_route_1.json')
|
||||
assertmsg = 'R5: Route 14.1.1.1/32 not present or has unexpected params'
|
||||
_, result = _run_cmd_and_check(
|
||||
r5, "show ip bgp 14.1.1.1/32 json", "r5/bgp_route_1.json"
|
||||
)
|
||||
assertmsg = "R5: Route 14.1.1.1/32 not present or has unexpected params"
|
||||
assert result is None, assertmsg
|
||||
|
||||
logger.info('\nInitial route states are as expected')
|
||||
logger.info("\nInitial route states are as expected")
|
||||
|
||||
|
||||
#"Test #1: Enable BGP-wide graceful-shutdown on R2 and check routes on peers"
|
||||
logger.info('\nTest #1: Enable BGP-wide graceful-shutdown on R2 and check routes on peers')
|
||||
# "Test #1: Enable BGP-wide graceful-shutdown on R2 and check routes on peers"
|
||||
logger.info(
|
||||
"\nTest #1: Enable BGP-wide graceful-shutdown on R2 and check routes on peers"
|
||||
)
|
||||
|
||||
r2.vtysh_cmd(
|
||||
"""
|
||||
@ -191,23 +201,32 @@ def test_bgp_gshut():
|
||||
|
||||
# R1, R3 and R5 should see routes from R2 with GSHUT. In addition,
|
||||
# R1 should see LOCAL_PREF of 0
|
||||
_, result = _run_cmd_and_check(r1, 'show ip bgp 13.1.1.1/32 json', 'r1/bgp_route_2.json')
|
||||
assertmsg = 'R1: Route 13.1.1.1/32 not present or has unexpected params'
|
||||
_, result = _run_cmd_and_check(
|
||||
r1, "show ip bgp 13.1.1.1/32 json", "r1/bgp_route_2.json"
|
||||
)
|
||||
assertmsg = "R1: Route 13.1.1.1/32 not present or has unexpected params"
|
||||
assert result is None, assertmsg
|
||||
|
||||
_, result = _run_cmd_and_check(r3, 'show ip bgp 11.1.1.1/32 json', 'r3/bgp_route_2.json')
|
||||
assertmsg = 'R3: Route 11.1.1.1/32 not present or has unexpected params'
|
||||
_, result = _run_cmd_and_check(
|
||||
r3, "show ip bgp 11.1.1.1/32 json", "r3/bgp_route_2.json"
|
||||
)
|
||||
assertmsg = "R3: Route 11.1.1.1/32 not present or has unexpected params"
|
||||
assert result is None, assertmsg
|
||||
|
||||
_, result = _run_cmd_and_check(r5, 'show ip bgp 14.1.1.1/32 json', 'r5/bgp_route_2.json')
|
||||
assertmsg = 'R5: Route 14.1.1.1/32 not present or has unexpected params'
|
||||
_, result = _run_cmd_and_check(
|
||||
r5, "show ip bgp 14.1.1.1/32 json", "r5/bgp_route_2.json"
|
||||
)
|
||||
assertmsg = "R5: Route 14.1.1.1/32 not present or has unexpected params"
|
||||
assert result is None, assertmsg
|
||||
|
||||
logger.info('\nTest #1: Successful, routes have GSHUT and/or LPREF of 0 as expected')
|
||||
logger.info(
|
||||
"\nTest #1: Successful, routes have GSHUT and/or LPREF of 0 as expected"
|
||||
)
|
||||
|
||||
|
||||
#"Test #2: Turn off BGP-wide graceful-shutdown on R2 and check routes on peers"
|
||||
logger.info('\nTest #2: Turn off BGP-wide graceful-shutdown on R2 and check routes on peers')
|
||||
# "Test #2: Turn off BGP-wide graceful-shutdown on R2 and check routes on peers"
|
||||
logger.info(
|
||||
"\nTest #2: Turn off BGP-wide graceful-shutdown on R2 and check routes on peers"
|
||||
)
|
||||
|
||||
r2.vtysh_cmd(
|
||||
"""
|
||||
@ -217,23 +236,32 @@ def test_bgp_gshut():
|
||||
)
|
||||
|
||||
# R1, R3 and R5 should see routes from R2 with their original attributes
|
||||
_, result = _run_cmd_and_check(r1, 'show ip bgp 13.1.1.1/32 json', 'r1/bgp_route_1.json')
|
||||
assertmsg = 'R1: Route 13.1.1.1/32 not present or has unexpected params'
|
||||
_, result = _run_cmd_and_check(
|
||||
r1, "show ip bgp 13.1.1.1/32 json", "r1/bgp_route_1.json"
|
||||
)
|
||||
assertmsg = "R1: Route 13.1.1.1/32 not present or has unexpected params"
|
||||
assert result is None, assertmsg
|
||||
|
||||
_, result = _run_cmd_and_check(r3, 'show ip bgp 11.1.1.1/32 json', 'r3/bgp_route_1.json')
|
||||
assertmsg = 'R3: Route 11.1.1.1/32 not present or has unexpected params'
|
||||
_, result = _run_cmd_and_check(
|
||||
r3, "show ip bgp 11.1.1.1/32 json", "r3/bgp_route_1.json"
|
||||
)
|
||||
assertmsg = "R3: Route 11.1.1.1/32 not present or has unexpected params"
|
||||
assert result is None, assertmsg
|
||||
|
||||
_, result = _run_cmd_and_check(r5, 'show ip bgp 14.1.1.1/32 json', 'r5/bgp_route_1.json')
|
||||
assertmsg = 'R5: Route 14.1.1.1/32 not present or has unexpected params'
|
||||
_, result = _run_cmd_and_check(
|
||||
r5, "show ip bgp 14.1.1.1/32 json", "r5/bgp_route_1.json"
|
||||
)
|
||||
assertmsg = "R5: Route 14.1.1.1/32 not present or has unexpected params"
|
||||
assert result is None, assertmsg
|
||||
|
||||
logger.info('\nTest #2: Successful, routes have their original attributes with default LPREF and without GSHUT')
|
||||
logger.info(
|
||||
"\nTest #2: Successful, routes have their original attributes with default LPREF and without GSHUT"
|
||||
)
|
||||
|
||||
|
||||
#"Test #3: Enable graceful-shutdown on R2 only in VRF1 and check routes on peers"
|
||||
logger.info('\nTest #3: Enable graceful-shutdown on R2 only in VRF1 and check routes on peers')
|
||||
# "Test #3: Enable graceful-shutdown on R2 only in VRF1 and check routes on peers"
|
||||
logger.info(
|
||||
"\nTest #3: Enable graceful-shutdown on R2 only in VRF1 and check routes on peers"
|
||||
)
|
||||
|
||||
r2.vtysh_cmd(
|
||||
"""
|
||||
@ -244,24 +272,31 @@ def test_bgp_gshut():
|
||||
)
|
||||
|
||||
# R1 and R3 should see no change to their routes
|
||||
_, result = _run_cmd_and_check(r1, 'show ip bgp 13.1.1.1/32 json', 'r1/bgp_route_1.json')
|
||||
assertmsg = 'R1: Route 13.1.1.1/32 not present or has unexpected params'
|
||||
_, result = _run_cmd_and_check(
|
||||
r1, "show ip bgp 13.1.1.1/32 json", "r1/bgp_route_1.json"
|
||||
)
|
||||
assertmsg = "R1: Route 13.1.1.1/32 not present or has unexpected params"
|
||||
assert result is None, assertmsg
|
||||
|
||||
_, result = _run_cmd_and_check(r3, 'show ip bgp 11.1.1.1/32 json', 'r3/bgp_route_1.json')
|
||||
assertmsg = 'R3: Route 11.1.1.1/32 not present or has unexpected params'
|
||||
_, result = _run_cmd_and_check(
|
||||
r3, "show ip bgp 11.1.1.1/32 json", "r3/bgp_route_1.json"
|
||||
)
|
||||
assertmsg = "R3: Route 11.1.1.1/32 not present or has unexpected params"
|
||||
assert result is None, assertmsg
|
||||
|
||||
# R5 should see routes from R2 with GSHUT.
|
||||
_, result = _run_cmd_and_check(r5, 'show ip bgp 14.1.1.1/32 json', 'r5/bgp_route_2.json')
|
||||
assertmsg = 'R5: Route 14.1.1.1/32 not present or has unexpected params'
|
||||
_, result = _run_cmd_and_check(
|
||||
r5, "show ip bgp 14.1.1.1/32 json", "r5/bgp_route_2.json"
|
||||
)
|
||||
assertmsg = "R5: Route 14.1.1.1/32 not present or has unexpected params"
|
||||
assert result is None, assertmsg
|
||||
|
||||
logger.info('\nTest #3: Successful, only VRF peers like R5 see routes with GSHUT')
|
||||
logger.info("\nTest #3: Successful, only VRF peers like R5 see routes with GSHUT")
|
||||
|
||||
|
||||
#"Test #4: Try to enable BGP-wide graceful-shutdown on R2 while it is configured in VRF1"
|
||||
logger.info('\nTest #4: Try to enable BGP-wide graceful-shutdown on R2 while it is configured in VRF1')
|
||||
# "Test #4: Try to enable BGP-wide graceful-shutdown on R2 while it is configured in VRF1"
|
||||
logger.info(
|
||||
"\nTest #4: Try to enable BGP-wide graceful-shutdown on R2 while it is configured in VRF1"
|
||||
)
|
||||
|
||||
ret = r2.vtysh_cmd(
|
||||
"""
|
||||
@ -271,14 +306,19 @@ def test_bgp_gshut():
|
||||
)
|
||||
|
||||
# This should fail
|
||||
assertmsg = 'R2: BGP-wide graceful-shutdown config not rejected even though it is enabled in VRF1'
|
||||
assert re.search("global graceful-shutdown not permitted", ret) is not None, assertmsg
|
||||
assertmsg = "R2: BGP-wide graceful-shutdown config not rejected even though it is enabled in VRF1"
|
||||
assert (
|
||||
re.search("global graceful-shutdown not permitted", ret) is not None
|
||||
), assertmsg
|
||||
|
||||
logger.info('\nTest #4: Successful, BGP-wide graceful-shutdown rejected as it is enabled in VRF')
|
||||
logger.info(
|
||||
"\nTest #4: Successful, BGP-wide graceful-shutdown rejected as it is enabled in VRF"
|
||||
)
|
||||
|
||||
|
||||
#"Test #5: Turn off graceful-shutdown on R2 in VRF1 and check routes on peers"
|
||||
logger.info('\nTest #5: Turn off graceful-shutdown on R2 in VRF1 and check routes on peers')
|
||||
# "Test #5: Turn off graceful-shutdown on R2 in VRF1 and check routes on peers"
|
||||
logger.info(
|
||||
"\nTest #5: Turn off graceful-shutdown on R2 in VRF1 and check routes on peers"
|
||||
)
|
||||
|
||||
r2.vtysh_cmd(
|
||||
"""
|
||||
@ -289,25 +329,32 @@ def test_bgp_gshut():
|
||||
)
|
||||
|
||||
# R1 and R3 should see no change to their routes
|
||||
_, result = _run_cmd_and_check(r1, 'show ip bgp 13.1.1.1/32 json', 'r1/bgp_route_1.json')
|
||||
assertmsg = 'R1: Route 13.1.1.1/32 not present or has unexpected params'
|
||||
_, result = _run_cmd_and_check(
|
||||
r1, "show ip bgp 13.1.1.1/32 json", "r1/bgp_route_1.json"
|
||||
)
|
||||
assertmsg = "R1: Route 13.1.1.1/32 not present or has unexpected params"
|
||||
assert result is None, assertmsg
|
||||
|
||||
_, result = _run_cmd_and_check(r3, 'show ip bgp 11.1.1.1/32 json', 'r3/bgp_route_1.json')
|
||||
assertmsg = 'R3: Route 11.1.1.1/32 not present or has unexpected params'
|
||||
_, result = _run_cmd_and_check(
|
||||
r3, "show ip bgp 11.1.1.1/32 json", "r3/bgp_route_1.json"
|
||||
)
|
||||
assertmsg = "R3: Route 11.1.1.1/32 not present or has unexpected params"
|
||||
assert result is None, assertmsg
|
||||
|
||||
# R5 should see routes from R2 with original attributes.
|
||||
_, result = _run_cmd_and_check(r5, 'show ip bgp 14.1.1.1/32 json', 'r5/bgp_route_1.json')
|
||||
assertmsg = 'R5: Route 14.1.1.1/32 not present or has unexpected params'
|
||||
_, result = _run_cmd_and_check(
|
||||
r5, "show ip bgp 14.1.1.1/32 json", "r5/bgp_route_1.json"
|
||||
)
|
||||
assertmsg = "R5: Route 14.1.1.1/32 not present or has unexpected params"
|
||||
assert result is None, assertmsg
|
||||
|
||||
logger.info(
|
||||
"\nTest #5: Successful, routes have their original attributes with default LPREF and without GSHUT"
|
||||
)
|
||||
|
||||
logger.info('\nTest #5: Successful, routes have their original attributes with default LPREF and without GSHUT')
|
||||
# tgen.mininet_cli()
|
||||
|
||||
|
||||
#tgen.mininet_cli()
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = ["-s"] + sys.argv[1:]
|
||||
sys.exit(pytest.main(args))
|
||||
|
@ -306,8 +306,13 @@ want_r1_remote_cust1_routes = [
|
||||
{"p": "99.0.0.4/32", "n": "4.4.4.4"},
|
||||
]
|
||||
bgpribRequireUnicastRoutes(
|
||||
"r1", "ipv4", "r1-cust1", "Customer 1 routes in r1 vrf (2)", want_r1_remote_cust1_routes
|
||||
, debug=False)
|
||||
"r1",
|
||||
"ipv4",
|
||||
"r1-cust1",
|
||||
"Customer 1 routes in r1 vrf (2)",
|
||||
want_r1_remote_cust1_routes,
|
||||
debug=False,
|
||||
)
|
||||
|
||||
want_r3_remote_cust1_routes = [
|
||||
{"p": "5.1.0.0/24", "n": "1.1.1.1", "bp": True},
|
||||
@ -329,8 +334,13 @@ want_r3_remote_cust1_routes = [
|
||||
{"p": "99.0.0.4/32", "n": "4.4.4.4", "bp": True},
|
||||
]
|
||||
bgpribRequireUnicastRoutes(
|
||||
"r3", "ipv4", "r3-cust1", "Customer 1 routes in r3 vrf (2)", want_r3_remote_cust1_routes
|
||||
, debug=False)
|
||||
"r3",
|
||||
"ipv4",
|
||||
"r3-cust1",
|
||||
"Customer 1 routes in r3 vrf (2)",
|
||||
want_r3_remote_cust1_routes,
|
||||
debug=False,
|
||||
)
|
||||
|
||||
want_r4_remote_cust1_routes = [
|
||||
{"p": "5.1.0.0/24", "n": "1.1.1.1", "bp": True},
|
||||
@ -351,8 +361,13 @@ want_r4_remote_cust1_routes = [
|
||||
{"p": "99.0.0.4/32", "n": "192.168.2.2", "bp": True},
|
||||
]
|
||||
bgpribRequireUnicastRoutes(
|
||||
"r4", "ipv4", "r4-cust1", "Customer 1 routes in r4 vrf (2)", want_r4_remote_cust1_routes
|
||||
, debug=False)
|
||||
"r4",
|
||||
"ipv4",
|
||||
"r4-cust1",
|
||||
"Customer 1 routes in r4 vrf (2)",
|
||||
want_r4_remote_cust1_routes,
|
||||
debug=False,
|
||||
)
|
||||
|
||||
want_r4_remote_cust2_routes = [
|
||||
{"p": "5.1.0.0/24", "n": "1.1.1.1", "bp": True},
|
||||
@ -373,8 +388,13 @@ want_r4_remote_cust2_routes = [
|
||||
{"p": "99.0.0.4/32", "n": "192.168.2.2", "bp": True},
|
||||
]
|
||||
bgpribRequireUnicastRoutes(
|
||||
"r4", "ipv4", "r4-cust2", "Customer 2 routes in r4 vrf (2)", want_r4_remote_cust2_routes
|
||||
, debug=False)
|
||||
"r4",
|
||||
"ipv4",
|
||||
"r4-cust2",
|
||||
"Customer 2 routes in r4 vrf (2)",
|
||||
want_r4_remote_cust2_routes,
|
||||
debug=False,
|
||||
)
|
||||
|
||||
|
||||
#########################################################################
|
||||
@ -402,7 +422,9 @@ want = [
|
||||
{"p": "6.0.1.0/24", "n": "99.0.0.1", "bp": True},
|
||||
{"p": "6.0.2.0/24", "n": "99.0.0.1", "bp": True},
|
||||
]
|
||||
bgpribRequireUnicastRoutes("ce1", "ipv4", "", "Cust 1 routes from remote", want, debug=False)
|
||||
bgpribRequireUnicastRoutes(
|
||||
"ce1", "ipv4", "", "Cust 1 routes from remote", want, debug=False
|
||||
)
|
||||
|
||||
luCommand(
|
||||
"ce2",
|
||||
@ -425,7 +447,9 @@ want = [
|
||||
{"p": "6.0.1.0/24", "n": "99.0.0.2", "bp": True},
|
||||
{"p": "6.0.2.0/24", "n": "99.0.0.2", "bp": True},
|
||||
]
|
||||
bgpribRequireUnicastRoutes("ce2", "ipv4", "", "Cust 1 routes from remote", want, debug=False)
|
||||
bgpribRequireUnicastRoutes(
|
||||
"ce2", "ipv4", "", "Cust 1 routes from remote", want, debug=False
|
||||
)
|
||||
|
||||
# human readable output for debugging
|
||||
luCommand("r4", 'vtysh -c "show bgp vrf r4-cust1 ipv4 uni"')
|
||||
@ -453,7 +477,9 @@ want = [
|
||||
{"p": "6.0.1.0/24", "n": "99.0.0.3", "bp": True},
|
||||
{"p": "6.0.2.0/24", "n": "99.0.0.3", "bp": True},
|
||||
]
|
||||
bgpribRequireUnicastRoutes("ce3", "ipv4", "", "Cust 1 routes from remote", want, debug=False)
|
||||
bgpribRequireUnicastRoutes(
|
||||
"ce3", "ipv4", "", "Cust 1 routes from remote", want, debug=False
|
||||
)
|
||||
|
||||
luCommand(
|
||||
"ce4",
|
||||
@ -477,58 +503,91 @@ bgpribRequireUnicastRoutes(
|
||||
"ce4", "ipv4", "ce4-cust2", "Cust 2 routes from remote", want, debug=False
|
||||
)
|
||||
|
||||
#verify details of exported/imported routes
|
||||
luCommand("ce1",'vtysh -c "show bgp ipv4 uni 6.0.1.0"',
|
||||
# verify details of exported/imported routes
|
||||
luCommand(
|
||||
"ce1",
|
||||
'vtysh -c "show bgp ipv4 uni 6.0.1.0"',
|
||||
"1 available.*192.168.1.1.*99.0.0.1.*Community: 0:67.*Extended Community: RT:89:123.*Large Community: 12:34:56",
|
||||
"pass", "Redundant route 1 details")
|
||||
luCommand("ce2",'vtysh -c "show bgp ipv4 uni 6.0.1.0"',
|
||||
"2 available, best .*192.168.1.1.* Local.* 192.168.1.1 from 192.168.1.1 .192.168.1.1" +
|
||||
".* Origin IGP, metric 98, localpref 123, valid, internal" +
|
||||
".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:56",
|
||||
".* Local.* 99.0.0.2 from 0.0.0.0 .99.0.0.2" +
|
||||
".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .Weight" +
|
||||
".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:56",
|
||||
"pass", "Redundant route 1 details")
|
||||
luCommand("ce3",'vtysh -c "show bgp ipv4 uni 6.0.1.0"',
|
||||
"2 available, best .*192.168.1.1.* Local.* 99.0.0.3 from 0.0.0.0 .99.0.0.3" +
|
||||
".* Origin IGP, metric 200, localpref 50, weight 32768, valid, sourced, local, best .Weight" +
|
||||
".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:56" +
|
||||
".* Local.* 192.168.1.1 from 192.168.1.1 .192.168.1.1" +
|
||||
".* Origin IGP, metric 98, localpref 123, valid, internal" +
|
||||
".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:56",
|
||||
"pass", "Redundant route 1 details")
|
||||
luCommand("ce4",'vtysh -c "show bgp vrf ce4-cust2 ipv4 6.0.1.0"',
|
||||
"2 available, best .*192.168.2.1.* Local.* 192.168.2.1 from 192.168.2.1 .192.168.2.1" +
|
||||
".* Origin IGP, metric 98, localpref 123, valid, internal" +
|
||||
".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:56" +
|
||||
".* Local.* 99.0.0.4 from 0.0.0.0 .99.0.0.4" +
|
||||
".* Origin IGP, metric 200, localpref 50, weight 32768, valid, sourced, local, best .Weight" +
|
||||
".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:56",
|
||||
"pass", "Redundant route 1 details")
|
||||
"pass",
|
||||
"Redundant route 1 details",
|
||||
)
|
||||
luCommand(
|
||||
"ce2",
|
||||
'vtysh -c "show bgp ipv4 uni 6.0.1.0"',
|
||||
"2 available, best .*192.168.1.1.* Local.* 192.168.1.1 from 192.168.1.1 .192.168.1.1"
|
||||
+ ".* Origin IGP, metric 98, localpref 123, valid, internal"
|
||||
+ ".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:56",
|
||||
".* Local.* 99.0.0.2 from 0.0.0.0 .99.0.0.2"
|
||||
+ ".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .Weight"
|
||||
+ ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:56",
|
||||
"pass",
|
||||
"Redundant route 1 details",
|
||||
)
|
||||
luCommand(
|
||||
"ce3",
|
||||
'vtysh -c "show bgp ipv4 uni 6.0.1.0"',
|
||||
"2 available, best .*192.168.1.1.* Local.* 99.0.0.3 from 0.0.0.0 .99.0.0.3"
|
||||
+ ".* Origin IGP, metric 200, localpref 50, weight 32768, valid, sourced, local, best .Weight"
|
||||
+ ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:56"
|
||||
+ ".* Local.* 192.168.1.1 from 192.168.1.1 .192.168.1.1"
|
||||
+ ".* Origin IGP, metric 98, localpref 123, valid, internal"
|
||||
+ ".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:56",
|
||||
"pass",
|
||||
"Redundant route 1 details",
|
||||
)
|
||||
luCommand(
|
||||
"ce4",
|
||||
'vtysh -c "show bgp vrf ce4-cust2 ipv4 6.0.1.0"',
|
||||
"2 available, best .*192.168.2.1.* Local.* 192.168.2.1 from 192.168.2.1 .192.168.2.1"
|
||||
+ ".* Origin IGP, metric 98, localpref 123, valid, internal"
|
||||
+ ".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:56"
|
||||
+ ".* Local.* 99.0.0.4 from 0.0.0.0 .99.0.0.4"
|
||||
+ ".* Origin IGP, metric 200, localpref 50, weight 32768, valid, sourced, local, best .Weight"
|
||||
+ ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:56",
|
||||
"pass",
|
||||
"Redundant route 1 details",
|
||||
)
|
||||
|
||||
luCommand("ce1",'vtysh -c "show bgp ipv4 uni 6.0.2.0"',
|
||||
"1 available, best .*192.168.1.1.* Local.* 99.0.0.1 from 0.0.0.0 .99.0.0.1" +
|
||||
".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .First path received" +
|
||||
".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:11",
|
||||
"pass", "Redundant route 2 details")
|
||||
luCommand("ce2",'vtysh -c "show bgp ipv4 uni 6.0.2.0"', "1 available, best .*192.168.1.1.* Local.* 99.0.0.2 from 0.0.0.0 .99.0.0.2" +
|
||||
".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .First path received" +
|
||||
".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:12",
|
||||
"pass", "Redundant route 2 details")
|
||||
luCommand("ce3",'vtysh -c "show bgp ipv4 uni 6.0.2.0"',
|
||||
"2 available, best .*192.168.1.1.* Local.* 99.0.0.3 from 0.0.0.0 .99.0.0.3" +
|
||||
".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .Weight" +
|
||||
".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:13" +
|
||||
".* Local.* 192.168.1.1 from 192.168.1.1 .192.168.1.1" +
|
||||
".* Origin IGP, metric 100, localpref 100, valid, internal" +
|
||||
".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:14",
|
||||
"pass", "Redundant route 2 details")
|
||||
luCommand("ce4",'vtysh -c "show bgp vrf ce4-cust2 ipv4 6.0.2.0"',
|
||||
"2 available, best .*192.168.2.1.* Local.* 192.168.2.1 from 192.168.2.1 .192.168.2.1" +
|
||||
".* Origin IGP, metric 100, localpref 100, valid, internal" +
|
||||
".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:13" +
|
||||
".* Local.* 99.0.0.4 from 0.0.0.0 .99.0.0.4" +
|
||||
".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .Weight" +
|
||||
".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:14",
|
||||
"pass", "Redundant route 2 details")
|
||||
#done
|
||||
luCommand(
|
||||
"ce1",
|
||||
'vtysh -c "show bgp ipv4 uni 6.0.2.0"',
|
||||
"1 available, best .*192.168.1.1.* Local.* 99.0.0.1 from 0.0.0.0 .99.0.0.1"
|
||||
+ ".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .First path received"
|
||||
+ ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:11",
|
||||
"pass",
|
||||
"Redundant route 2 details",
|
||||
)
|
||||
luCommand(
|
||||
"ce2",
|
||||
'vtysh -c "show bgp ipv4 uni 6.0.2.0"',
|
||||
"1 available, best .*192.168.1.1.* Local.* 99.0.0.2 from 0.0.0.0 .99.0.0.2"
|
||||
+ ".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .First path received"
|
||||
+ ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:12",
|
||||
"pass",
|
||||
"Redundant route 2 details",
|
||||
)
|
||||
luCommand(
|
||||
"ce3",
|
||||
'vtysh -c "show bgp ipv4 uni 6.0.2.0"',
|
||||
"2 available, best .*192.168.1.1.* Local.* 99.0.0.3 from 0.0.0.0 .99.0.0.3"
|
||||
+ ".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .Weight"
|
||||
+ ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:13"
|
||||
+ ".* Local.* 192.168.1.1 from 192.168.1.1 .192.168.1.1"
|
||||
+ ".* Origin IGP, metric 100, localpref 100, valid, internal"
|
||||
+ ".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:14",
|
||||
"pass",
|
||||
"Redundant route 2 details",
|
||||
)
|
||||
luCommand(
|
||||
"ce4",
|
||||
'vtysh -c "show bgp vrf ce4-cust2 ipv4 6.0.2.0"',
|
||||
"2 available, best .*192.168.2.1.* Local.* 192.168.2.1 from 192.168.2.1 .192.168.2.1"
|
||||
+ ".* Origin IGP, metric 100, localpref 100, valid, internal"
|
||||
+ ".* Community: 0:67.* Extended Community: RT:52:100 RT:89:123.* Large Community: 12:34:13"
|
||||
+ ".* Local.* 99.0.0.4 from 0.0.0.0 .99.0.0.4"
|
||||
+ ".* Origin IGP, metric 100, localpref 100, weight 32768, valid, sourced, local, best .Weight"
|
||||
+ ".* Community: 0:67.* Extended Community: RT:89:123.* Large Community: 12:34:14",
|
||||
"pass",
|
||||
"Redundant route 2 details",
|
||||
)
|
||||
# done
|
||||
|
@ -67,7 +67,7 @@ from lib.common_config import (
|
||||
verify_bgp_community,
|
||||
step,
|
||||
check_address_types,
|
||||
required_linux_kernel_version
|
||||
required_linux_kernel_version,
|
||||
)
|
||||
from lib.topolog import logger
|
||||
from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify
|
||||
@ -144,7 +144,7 @@ def setup_module(mod):
|
||||
* `mod`: module name
|
||||
"""
|
||||
# Required linux kernel version for this suite to run.
|
||||
result = required_linux_kernel_version('4.15')
|
||||
result = required_linux_kernel_version("4.15")
|
||||
if result is not True:
|
||||
pytest.skip("Kernel requirements are not met")
|
||||
|
||||
|
@ -91,7 +91,7 @@ from lib.common_config import (
|
||||
verify_route_maps,
|
||||
create_static_routes,
|
||||
check_address_types,
|
||||
required_linux_kernel_version
|
||||
required_linux_kernel_version,
|
||||
)
|
||||
from lib.topolog import logger
|
||||
from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify
|
||||
@ -135,7 +135,7 @@ def setup_module(mod):
|
||||
"""
|
||||
|
||||
# Required linux kernel version for this suite to run.
|
||||
result = required_linux_kernel_version('4.15')
|
||||
result = required_linux_kernel_version("4.15")
|
||||
if result is not True:
|
||||
pytest.skip("Kernel requirements are not met")
|
||||
|
||||
|
@ -35,7 +35,7 @@ import json
|
||||
|
||||
# Save the Current Working Directory to find configuration files.
|
||||
CWD = os.path.dirname(os.path.realpath(__file__))
|
||||
sys.path.append(os.path.join(CWD, '../'))
|
||||
sys.path.append(os.path.join(CWD, "../"))
|
||||
|
||||
# pylint: disable=C0413
|
||||
# Import topogen and topotest helpers
|
||||
@ -63,8 +63,10 @@ this scenario, the servers are also routers as they have to announce
|
||||
anycast IP (VIP) addresses via BGP.
|
||||
"""
|
||||
|
||||
|
||||
class BgpLinkBwTopo(Topo):
|
||||
"Test topology builder"
|
||||
|
||||
def build(self, *_args, **_opts):
|
||||
"Build function"
|
||||
tgen = get_topogen(self)
|
||||
@ -73,45 +75,46 @@ class BgpLinkBwTopo(Topo):
|
||||
# and 4 servers
|
||||
routers = {}
|
||||
for i in range(1, 11):
|
||||
routers[i] = tgen.add_router('r{}'.format(i))
|
||||
routers[i] = tgen.add_router("r{}".format(i))
|
||||
|
||||
# Create 13 "switches" - to interconnect the above routers
|
||||
switches = {}
|
||||
for i in range(1, 14):
|
||||
switches[i] = tgen.add_switch('s{}'.format(i))
|
||||
switches[i] = tgen.add_switch("s{}".format(i))
|
||||
|
||||
# Interconnect R1 (super-spine) to R2 and R3 (the two spines)
|
||||
switches[1].add_link(tgen.gears['r1'])
|
||||
switches[1].add_link(tgen.gears['r2'])
|
||||
switches[2].add_link(tgen.gears['r1'])
|
||||
switches[2].add_link(tgen.gears['r3'])
|
||||
switches[1].add_link(tgen.gears["r1"])
|
||||
switches[1].add_link(tgen.gears["r2"])
|
||||
switches[2].add_link(tgen.gears["r1"])
|
||||
switches[2].add_link(tgen.gears["r3"])
|
||||
|
||||
# Interconnect R2 (spine in pod-1) to R4 and R5 (the associated
|
||||
# leaf switches)
|
||||
switches[3].add_link(tgen.gears['r2'])
|
||||
switches[3].add_link(tgen.gears['r4'])
|
||||
switches[4].add_link(tgen.gears['r2'])
|
||||
switches[4].add_link(tgen.gears['r5'])
|
||||
switches[3].add_link(tgen.gears["r2"])
|
||||
switches[3].add_link(tgen.gears["r4"])
|
||||
switches[4].add_link(tgen.gears["r2"])
|
||||
switches[4].add_link(tgen.gears["r5"])
|
||||
|
||||
# Interconnect R3 (spine in pod-2) to R6 (associated leaf)
|
||||
switches[5].add_link(tgen.gears['r3'])
|
||||
switches[5].add_link(tgen.gears['r6'])
|
||||
switches[5].add_link(tgen.gears["r3"])
|
||||
switches[5].add_link(tgen.gears["r6"])
|
||||
|
||||
# Interconnect leaf switches to servers
|
||||
switches[6].add_link(tgen.gears['r4'])
|
||||
switches[6].add_link(tgen.gears['r7'])
|
||||
switches[7].add_link(tgen.gears['r4'])
|
||||
switches[7].add_link(tgen.gears['r8'])
|
||||
switches[8].add_link(tgen.gears['r5'])
|
||||
switches[8].add_link(tgen.gears['r9'])
|
||||
switches[9].add_link(tgen.gears['r6'])
|
||||
switches[9].add_link(tgen.gears['r10'])
|
||||
switches[6].add_link(tgen.gears["r4"])
|
||||
switches[6].add_link(tgen.gears["r7"])
|
||||
switches[7].add_link(tgen.gears["r4"])
|
||||
switches[7].add_link(tgen.gears["r8"])
|
||||
switches[8].add_link(tgen.gears["r5"])
|
||||
switches[8].add_link(tgen.gears["r9"])
|
||||
switches[9].add_link(tgen.gears["r6"])
|
||||
switches[9].add_link(tgen.gears["r10"])
|
||||
|
||||
# Create empty networks for the servers
|
||||
switches[10].add_link(tgen.gears['r7'])
|
||||
switches[11].add_link(tgen.gears['r8'])
|
||||
switches[12].add_link(tgen.gears['r9'])
|
||||
switches[13].add_link(tgen.gears['r10'])
|
||||
switches[10].add_link(tgen.gears["r7"])
|
||||
switches[11].add_link(tgen.gears["r8"])
|
||||
switches[12].add_link(tgen.gears["r9"])
|
||||
switches[13].add_link(tgen.gears["r10"])
|
||||
|
||||
|
||||
def setup_module(mod):
|
||||
"Sets up the pytest environment"
|
||||
@ -121,395 +124,454 @@ def setup_module(mod):
|
||||
router_list = tgen.routers()
|
||||
for rname, router in router_list.items():
|
||||
router.load_config(
|
||||
TopoRouter.RD_ZEBRA,
|
||||
os.path.join(CWD, '{}/zebra.conf'.format(rname))
|
||||
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
|
||||
)
|
||||
router.load_config(
|
||||
TopoRouter.RD_BGP,
|
||||
os.path.join(CWD, '{}/bgpd.conf'.format(rname))
|
||||
TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
|
||||
)
|
||||
|
||||
# Initialize all routers.
|
||||
tgen.start_router()
|
||||
|
||||
#tgen.mininet_cli()
|
||||
# tgen.mininet_cli()
|
||||
|
||||
|
||||
def teardown_module(mod):
|
||||
"Teardown the pytest environment"
|
||||
tgen = get_topogen()
|
||||
tgen.stop_topology()
|
||||
|
||||
|
||||
def test_bgp_linkbw_adv():
|
||||
"Test #1: Test BGP link-bandwidth advertisement based on number of multipaths"
|
||||
logger.info('\nTest #1: Test BGP link-bandwidth advertisement based on number of multipaths')
|
||||
logger.info(
|
||||
"\nTest #1: Test BGP link-bandwidth advertisement based on number of multipaths"
|
||||
)
|
||||
|
||||
tgen = get_topogen()
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip('skipped because of router(s) failure')
|
||||
pytest.skip("skipped because of router(s) failure")
|
||||
|
||||
r1 = tgen.gears['r1']
|
||||
r2 = tgen.gears['r2']
|
||||
r1 = tgen.gears["r1"]
|
||||
r2 = tgen.gears["r2"]
|
||||
|
||||
# Configure anycast IP on server r7
|
||||
logger.info('Configure anycast IP on server r7')
|
||||
logger.info("Configure anycast IP on server r7")
|
||||
|
||||
tgen.net['r7'].cmd('ip addr add 198.10.1.1/32 dev r7-eth1')
|
||||
tgen.net["r7"].cmd("ip addr add 198.10.1.1/32 dev r7-eth1")
|
||||
|
||||
# Check on spine router r2 for link-bw advertisement by leaf router r4
|
||||
logger.info('Check on spine router r2 for link-bw advertisement by leaf router r4')
|
||||
logger.info("Check on spine router r2 for link-bw advertisement by leaf router r4")
|
||||
|
||||
json_file = '{}/r2/bgp-route-1.json'.format(CWD)
|
||||
json_file = "{}/r2/bgp-route-1.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r2, 'show bgp ipv4 uni 198.10.1.1/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r2, "show bgp ipv4 uni 198.10.1.1/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on spine router r2'
|
||||
assertmsg = "JSON output mismatch on spine router r2"
|
||||
assert result is None, assertmsg
|
||||
|
||||
# Check on spine router r2 that default weight is used as there is no multipath
|
||||
logger.info('Check on spine router r2 that default weight is used as there is no multipath')
|
||||
logger.info(
|
||||
"Check on spine router r2 that default weight is used as there is no multipath"
|
||||
)
|
||||
|
||||
json_file = '{}/r2/ip-route-1.json'.format(CWD)
|
||||
json_file = "{}/r2/ip-route-1.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r2, 'show ip route 198.10.1.1/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r2, "show ip route 198.10.1.1/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on spine router r2'
|
||||
assertmsg = "JSON output mismatch on spine router r2"
|
||||
assert result is None, assertmsg
|
||||
|
||||
# Check on super-spine router r1 that link-bw has been propagated by spine router r2
|
||||
logger.info('Check on super-spine router r1 that link-bw has been propagated by spine router r2')
|
||||
logger.info(
|
||||
"Check on super-spine router r1 that link-bw has been propagated by spine router r2"
|
||||
)
|
||||
|
||||
json_file = '{}/r1/bgp-route-1.json'.format(CWD)
|
||||
json_file = "{}/r1/bgp-route-1.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r1, "show bgp ipv4 uni 198.10.1.1/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on super-spine router r1'
|
||||
assertmsg = "JSON output mismatch on super-spine router r1"
|
||||
assert result is None, assertmsg
|
||||
|
||||
|
||||
def test_bgp_cumul_linkbw():
|
||||
"Test #2: Test cumulative link-bandwidth propagation"
|
||||
logger.info('\nTest #2: Test cumulative link-bandwidth propagation')
|
||||
logger.info("\nTest #2: Test cumulative link-bandwidth propagation")
|
||||
|
||||
tgen = get_topogen()
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip('skipped because of router(s) failure')
|
||||
pytest.skip("skipped because of router(s) failure")
|
||||
|
||||
r1 = tgen.gears['r1']
|
||||
r2 = tgen.gears['r2']
|
||||
r4 = tgen.gears['r4']
|
||||
r1 = tgen.gears["r1"]
|
||||
r2 = tgen.gears["r2"]
|
||||
r4 = tgen.gears["r4"]
|
||||
|
||||
# Configure anycast IP on additional server r8
|
||||
logger.info('Configure anycast IP on server r8')
|
||||
logger.info("Configure anycast IP on server r8")
|
||||
|
||||
tgen.net['r8'].cmd('ip addr add 198.10.1.1/32 dev r8-eth1')
|
||||
tgen.net["r8"].cmd("ip addr add 198.10.1.1/32 dev r8-eth1")
|
||||
|
||||
# Check multipath on leaf router r4
|
||||
logger.info('Check multipath on leaf router r4')
|
||||
logger.info("Check multipath on leaf router r4")
|
||||
|
||||
json_file = '{}/r4/bgp-route-1.json'.format(CWD)
|
||||
json_file = "{}/r4/bgp-route-1.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r4, 'show bgp ipv4 uni 198.10.1.1/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r4, "show bgp ipv4 uni 198.10.1.1/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on leaf router r4'
|
||||
assertmsg = "JSON output mismatch on leaf router r4"
|
||||
assert result is None, assertmsg
|
||||
|
||||
# Check regular ECMP is in effect on leaf router r4
|
||||
logger.info('Check regular ECMP is in effect on leaf router r4')
|
||||
logger.info("Check regular ECMP is in effect on leaf router r4")
|
||||
|
||||
json_file = '{}/r4/ip-route-1.json'.format(CWD)
|
||||
json_file = "{}/r4/ip-route-1.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r4, 'show ip route 198.10.1.1/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r4, "show ip route 198.10.1.1/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on leaf router r4'
|
||||
assertmsg = "JSON output mismatch on leaf router r4"
|
||||
assert result is None, assertmsg
|
||||
|
||||
# Check on spine router r2 that leaf has propagated the cumulative link-bw based on num-multipaths
|
||||
logger.info('Check on spine router r2 that leaf has propagated the cumulative link-bw based on num-multipaths')
|
||||
logger.info(
|
||||
"Check on spine router r2 that leaf has propagated the cumulative link-bw based on num-multipaths"
|
||||
)
|
||||
|
||||
json_file = '{}/r2/bgp-route-2.json'.format(CWD)
|
||||
json_file = "{}/r2/bgp-route-2.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r2, 'show bgp ipv4 uni 198.10.1.1/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r2, "show bgp ipv4 uni 198.10.1.1/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on spine router r2'
|
||||
assertmsg = "JSON output mismatch on spine router r2"
|
||||
assert result is None, assertmsg
|
||||
|
||||
|
||||
def test_weighted_ecmp():
|
||||
"Test #3: Test weighted ECMP - multipath with next hop weights"
|
||||
logger.info('\nTest #3: Test weighted ECMP - multipath with next hop weights')
|
||||
logger.info("\nTest #3: Test weighted ECMP - multipath with next hop weights")
|
||||
|
||||
tgen = get_topogen()
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip('skipped because of router(s) failure')
|
||||
pytest.skip("skipped because of router(s) failure")
|
||||
|
||||
r1 = tgen.gears['r1']
|
||||
r2 = tgen.gears['r2']
|
||||
r1 = tgen.gears["r1"]
|
||||
r2 = tgen.gears["r2"]
|
||||
|
||||
# Configure anycast IP on additional server r9
|
||||
logger.info('Configure anycast IP on server r9')
|
||||
logger.info("Configure anycast IP on server r9")
|
||||
|
||||
tgen.net['r9'].cmd('ip addr add 198.10.1.1/32 dev r9-eth1')
|
||||
tgen.net["r9"].cmd("ip addr add 198.10.1.1/32 dev r9-eth1")
|
||||
|
||||
# Check multipath on spine router r2
|
||||
logger.info('Check multipath on spine router r2')
|
||||
json_file = '{}/r2/bgp-route-3.json'.format(CWD)
|
||||
logger.info("Check multipath on spine router r2")
|
||||
json_file = "{}/r2/bgp-route-3.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r2, 'show bgp ipv4 uni 198.10.1.1/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r2, "show bgp ipv4 uni 198.10.1.1/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on spine router r2'
|
||||
assertmsg = "JSON output mismatch on spine router r2"
|
||||
assert result is None, assertmsg
|
||||
|
||||
# Check weighted ECMP is in effect on the spine router r2
|
||||
logger.info('Check weighted ECMP is in effect on the spine router r2')
|
||||
logger.info("Check weighted ECMP is in effect on the spine router r2")
|
||||
|
||||
json_file = '{}/r2/ip-route-2.json'.format(CWD)
|
||||
json_file = "{}/r2/ip-route-2.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r2, 'show ip route 198.10.1.1/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r2, "show ip route 198.10.1.1/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on spine router r2'
|
||||
assertmsg = "JSON output mismatch on spine router r2"
|
||||
assert result is None, assertmsg
|
||||
|
||||
# Configure anycast IP on additional server r10
|
||||
logger.info('Configure anycast IP on server r10')
|
||||
logger.info("Configure anycast IP on server r10")
|
||||
|
||||
tgen.net['r10'].cmd('ip addr add 198.10.1.1/32 dev r10-eth1')
|
||||
tgen.net["r10"].cmd("ip addr add 198.10.1.1/32 dev r10-eth1")
|
||||
|
||||
# Check multipath on super-spine router r1
|
||||
logger.info('Check multipath on super-spine router r1')
|
||||
json_file = '{}/r1/bgp-route-2.json'.format(CWD)
|
||||
logger.info("Check multipath on super-spine router r1")
|
||||
json_file = "{}/r1/bgp-route-2.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r1, "show bgp ipv4 uni 198.10.1.1/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on super-spine router r1'
|
||||
assertmsg = "JSON output mismatch on super-spine router r1"
|
||||
assert result is None, assertmsg
|
||||
|
||||
# Check weighted ECMP is in effect on the super-spine router r1
|
||||
logger.info('Check weighted ECMP is in effect on the super-spine router r1')
|
||||
json_file = '{}/r1/ip-route-1.json'.format(CWD)
|
||||
logger.info("Check weighted ECMP is in effect on the super-spine router r1")
|
||||
json_file = "{}/r1/ip-route-1.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r1, 'show ip route 198.10.1.1/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r1, "show ip route 198.10.1.1/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on super-spine router r1'
|
||||
assertmsg = "JSON output mismatch on super-spine router r1"
|
||||
assert result is None, assertmsg
|
||||
|
||||
|
||||
def test_weighted_ecmp_link_flap():
|
||||
"Test #4: Test weighted ECMP rebalancing upon change (link flap)"
|
||||
logger.info('\nTest #4: Test weighted ECMP rebalancing upon change (link flap)')
|
||||
logger.info("\nTest #4: Test weighted ECMP rebalancing upon change (link flap)")
|
||||
|
||||
tgen = get_topogen()
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip('skipped because of router(s) failure')
|
||||
pytest.skip("skipped because of router(s) failure")
|
||||
|
||||
r1 = tgen.gears['r1']
|
||||
r2 = tgen.gears['r2']
|
||||
r1 = tgen.gears["r1"]
|
||||
r2 = tgen.gears["r2"]
|
||||
|
||||
# Bring down link on server r9
|
||||
logger.info('Bring down link on server r9')
|
||||
logger.info("Bring down link on server r9")
|
||||
|
||||
tgen.net['r9'].cmd('ip link set dev r9-eth1 down')
|
||||
tgen.net["r9"].cmd("ip link set dev r9-eth1 down")
|
||||
|
||||
# Check spine router r2 has only one path
|
||||
logger.info('Check spine router r2 has only one path')
|
||||
logger.info("Check spine router r2 has only one path")
|
||||
|
||||
json_file = '{}/r2/ip-route-3.json'.format(CWD)
|
||||
json_file = "{}/r2/ip-route-3.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r2, 'show ip route 198.10.1.1/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r2, "show ip route 198.10.1.1/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on spine router r2'
|
||||
assertmsg = "JSON output mismatch on spine router r2"
|
||||
assert result is None, assertmsg
|
||||
|
||||
# Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1
|
||||
logger.info('Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1')
|
||||
logger.info(
|
||||
"Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1"
|
||||
)
|
||||
|
||||
json_file = '{}/r1/bgp-route-3.json'.format(CWD)
|
||||
json_file = "{}/r1/bgp-route-3.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r1, "show bgp ipv4 uni 198.10.1.1/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on super-spine router r1'
|
||||
assertmsg = "JSON output mismatch on super-spine router r1"
|
||||
assert result is None, assertmsg
|
||||
|
||||
json_file = '{}/r1/ip-route-2.json'.format(CWD)
|
||||
json_file = "{}/r1/ip-route-2.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r1, 'show ip route 198.10.1.1/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r1, "show ip route 198.10.1.1/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on super-spine router r1'
|
||||
assertmsg = "JSON output mismatch on super-spine router r1"
|
||||
assert result is None, assertmsg
|
||||
|
||||
# Bring up link on server r9
|
||||
logger.info('Bring up link on server r9')
|
||||
logger.info("Bring up link on server r9")
|
||||
|
||||
tgen.net['r9'].cmd('ip link set dev r9-eth1 up')
|
||||
tgen.net["r9"].cmd("ip link set dev r9-eth1 up")
|
||||
|
||||
# Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1
|
||||
logger.info('Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1')
|
||||
logger.info(
|
||||
"Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1"
|
||||
)
|
||||
|
||||
json_file = '{}/r1/bgp-route-2.json'.format(CWD)
|
||||
json_file = "{}/r1/bgp-route-2.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r1, "show bgp ipv4 uni 198.10.1.1/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on super-spine router r1'
|
||||
assertmsg = "JSON output mismatch on super-spine router r1"
|
||||
assert result is None, assertmsg
|
||||
|
||||
json_file = '{}/r1/ip-route-1.json'.format(CWD)
|
||||
json_file = "{}/r1/ip-route-1.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r1, 'show ip route 198.10.1.1/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r1, "show ip route 198.10.1.1/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on super-spine router r1'
|
||||
assertmsg = "JSON output mismatch on super-spine router r1"
|
||||
assert result is None, assertmsg
|
||||
|
||||
|
||||
def test_weighted_ecmp_second_anycast_ip():
|
||||
"Test #5: Test weighted ECMP for a second anycast IP"
|
||||
logger.info('\nTest #5: Test weighted ECMP for a second anycast IP')
|
||||
logger.info("\nTest #5: Test weighted ECMP for a second anycast IP")
|
||||
|
||||
tgen = get_topogen()
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip('skipped because of router(s) failure')
|
||||
pytest.skip("skipped because of router(s) failure")
|
||||
|
||||
r1 = tgen.gears['r1']
|
||||
r2 = tgen.gears['r2']
|
||||
r1 = tgen.gears["r1"]
|
||||
r2 = tgen.gears["r2"]
|
||||
|
||||
# Configure anycast IP on additional server r7, r9 and r10
|
||||
logger.info('Configure anycast IP on server r7, r9 and r10')
|
||||
logger.info("Configure anycast IP on server r7, r9 and r10")
|
||||
|
||||
tgen.net['r7'].cmd('ip addr add 198.10.1.11/32 dev r7-eth1')
|
||||
tgen.net['r9'].cmd('ip addr add 198.10.1.11/32 dev r9-eth1')
|
||||
tgen.net['r10'].cmd('ip addr add 198.10.1.11/32 dev r10-eth1')
|
||||
tgen.net["r7"].cmd("ip addr add 198.10.1.11/32 dev r7-eth1")
|
||||
tgen.net["r9"].cmd("ip addr add 198.10.1.11/32 dev r9-eth1")
|
||||
tgen.net["r10"].cmd("ip addr add 198.10.1.11/32 dev r10-eth1")
|
||||
|
||||
# Check link-bandwidth and weighted ECMP on super-spine router r1
|
||||
logger.info('Check link-bandwidth and weighted ECMP on super-spine router r1')
|
||||
logger.info("Check link-bandwidth and weighted ECMP on super-spine router r1")
|
||||
|
||||
json_file = '{}/r1/bgp-route-4.json'.format(CWD)
|
||||
json_file = "{}/r1/bgp-route-4.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r1, 'show bgp ipv4 uni 198.10.1.11/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r1, "show bgp ipv4 uni 198.10.1.11/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on super-spine router r1'
|
||||
assertmsg = "JSON output mismatch on super-spine router r1"
|
||||
assert result is None, assertmsg
|
||||
|
||||
json_file = '{}/r1/ip-route-3.json'.format(CWD)
|
||||
json_file = "{}/r1/ip-route-3.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r1, 'show ip route 198.10.1.11/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r1, "show ip route 198.10.1.11/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on super-spine router r1'
|
||||
assertmsg = "JSON output mismatch on super-spine router r1"
|
||||
assert result is None, assertmsg
|
||||
|
||||
|
||||
def test_paths_with_and_without_linkbw():
|
||||
"Test #6: Test paths with and without link-bandwidth - receiver should resort to regular ECMP"
|
||||
logger.info('\nTest #6: Test paths with and without link-bandwidth - receiver should resort to regular ECMP')
|
||||
logger.info(
|
||||
"\nTest #6: Test paths with and without link-bandwidth - receiver should resort to regular ECMP"
|
||||
)
|
||||
|
||||
tgen = get_topogen()
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip('skipped because of router(s) failure')
|
||||
pytest.skip("skipped because of router(s) failure")
|
||||
|
||||
r1 = tgen.gears['r1']
|
||||
r1 = tgen.gears["r1"]
|
||||
|
||||
# Configure leaf router r6 to not advertise any link-bandwidth
|
||||
logger.info('Configure leaf router r6 to not advertise any link-bandwidth')
|
||||
logger.info("Configure leaf router r6 to not advertise any link-bandwidth")
|
||||
|
||||
tgen.net['r6'].cmd('vtysh -c \"conf t\" -c \"router bgp 65303\" -c \"address-family ipv4 unicast\" -c \"no neighbor 11.1.3.1 route-map anycast_ip out\"')
|
||||
tgen.net["r6"].cmd(
|
||||
'vtysh -c "conf t" -c "router bgp 65303" -c "address-family ipv4 unicast" -c "no neighbor 11.1.3.1 route-map anycast_ip out"'
|
||||
)
|
||||
|
||||
# Check link-bandwidth change on super-spine router r1
|
||||
logger.info('Check link-bandwidth change on super-spine router r1')
|
||||
logger.info("Check link-bandwidth change on super-spine router r1")
|
||||
|
||||
json_file = '{}/r1/bgp-route-5.json'.format(CWD)
|
||||
json_file = "{}/r1/bgp-route-5.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r1, "show bgp ipv4 uni 198.10.1.1/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on super-spine router r1'
|
||||
assertmsg = "JSON output mismatch on super-spine router r1"
|
||||
assert result is None, assertmsg
|
||||
|
||||
# Check super-spine router r1 resorts to regular ECMP
|
||||
logger.info('Check super-spine router r1 resorts to regular ECMP')
|
||||
logger.info("Check super-spine router r1 resorts to regular ECMP")
|
||||
|
||||
json_file = '{}/r1/ip-route-4.json'.format(CWD)
|
||||
json_file = "{}/r1/ip-route-4.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r1, 'show ip route 198.10.1.1/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r1, "show ip route 198.10.1.1/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on super-spine router r1'
|
||||
assertmsg = "JSON output mismatch on super-spine router r1"
|
||||
assert result is None, assertmsg
|
||||
|
||||
json_file = '{}/r1/ip-route-5.json'.format(CWD)
|
||||
json_file = "{}/r1/ip-route-5.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r1, 'show ip route 198.10.1.11/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r1, "show ip route 198.10.1.11/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on super-spine router r1'
|
||||
assertmsg = "JSON output mismatch on super-spine router r1"
|
||||
assert result is None, assertmsg
|
||||
|
||||
|
||||
def test_linkbw_handling_options():
|
||||
"Test #7: Test different options for processing link-bandwidth on the receiver"
|
||||
logger.info('\nTest #7: Test different options for processing link-bandwidth on the receiver')
|
||||
logger.info(
|
||||
"\nTest #7: Test different options for processing link-bandwidth on the receiver"
|
||||
)
|
||||
|
||||
tgen = get_topogen()
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip('skipped because of router(s) failure')
|
||||
pytest.skip("skipped because of router(s) failure")
|
||||
|
||||
r1 = tgen.gears['r1']
|
||||
r1 = tgen.gears["r1"]
|
||||
|
||||
# Configure super-spine r1 to skip multipaths without link-bandwidth
|
||||
logger.info('Configure super-spine r1 to skip multipaths without link-bandwidth')
|
||||
logger.info("Configure super-spine r1 to skip multipaths without link-bandwidth")
|
||||
|
||||
tgen.net['r1'].cmd('vtysh -c \"conf t\" -c \"router bgp 65101\" -c \"bgp bestpath bandwidth skip-missing\"')
|
||||
tgen.net["r1"].cmd(
|
||||
'vtysh -c "conf t" -c "router bgp 65101" -c "bgp bestpath bandwidth skip-missing"'
|
||||
)
|
||||
|
||||
# Check super-spine router r1 resorts to only one path as other path is skipped
|
||||
logger.info('Check super-spine router r1 resorts to only one path as other path is skipped')
|
||||
logger.info(
|
||||
"Check super-spine router r1 resorts to only one path as other path is skipped"
|
||||
)
|
||||
|
||||
json_file = '{}/r1/ip-route-6.json'.format(CWD)
|
||||
json_file = "{}/r1/ip-route-6.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r1, 'show ip route 198.10.1.1/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r1, "show ip route 198.10.1.1/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on super-spine router r1'
|
||||
assertmsg = "JSON output mismatch on super-spine router r1"
|
||||
assert result is None, assertmsg
|
||||
|
||||
json_file = '{}/r1/ip-route-7.json'.format(CWD)
|
||||
json_file = "{}/r1/ip-route-7.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r1, 'show ip route 198.10.1.11/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r1, "show ip route 198.10.1.11/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on super-spine router r1'
|
||||
assertmsg = "JSON output mismatch on super-spine router r1"
|
||||
assert result is None, assertmsg
|
||||
|
||||
# Configure super-spine r1 to use default-weight for multipaths without link-bandwidth
|
||||
logger.info('Configure super-spine r1 to use default-weight for multipaths without link-bandwidth')
|
||||
logger.info(
|
||||
"Configure super-spine r1 to use default-weight for multipaths without link-bandwidth"
|
||||
)
|
||||
|
||||
tgen.net['r1'].cmd('vtysh -c \"conf t\" -c \"router bgp 65101\" -c \"bgp bestpath bandwidth default-weight-for-missing\"')
|
||||
tgen.net["r1"].cmd(
|
||||
'vtysh -c "conf t" -c "router bgp 65101" -c "bgp bestpath bandwidth default-weight-for-missing"'
|
||||
)
|
||||
|
||||
# Check super-spine router r1 uses ECMP with weight 1 for path without link-bandwidth
|
||||
logger.info('Check super-spine router r1 uses ECMP with weight 1 for path without link-bandwidth')
|
||||
logger.info(
|
||||
"Check super-spine router r1 uses ECMP with weight 1 for path without link-bandwidth"
|
||||
)
|
||||
|
||||
json_file = '{}/r1/ip-route-8.json'.format(CWD)
|
||||
json_file = "{}/r1/ip-route-8.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r1, 'show ip route 198.10.1.1/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r1, "show ip route 198.10.1.1/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on super-spine router r1'
|
||||
assertmsg = "JSON output mismatch on super-spine router r1"
|
||||
assert result is None, assertmsg
|
||||
|
||||
json_file = '{}/r1/ip-route-9.json'.format(CWD)
|
||||
json_file = "{}/r1/ip-route-9.json".format(CWD)
|
||||
expected = json.loads(open(json_file).read())
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
r1, 'show ip route 198.10.1.11/32 json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, r1, "show ip route 198.10.1.11/32 json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
|
||||
assertmsg = 'JSON output mismatch on super-spine router r1'
|
||||
assertmsg = "JSON output mismatch on super-spine router r1"
|
||||
assert result is None, assertmsg
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = ["-s"] + sys.argv[1:]
|
||||
sys.exit(pytest.main(args))
|
||||
|
@ -132,7 +132,7 @@ from lib.common_config import (
|
||||
create_bgp_community_lists,
|
||||
check_router_status,
|
||||
apply_raw_config,
|
||||
required_linux_kernel_version
|
||||
required_linux_kernel_version,
|
||||
)
|
||||
|
||||
from lib.topolog import logger
|
||||
@ -211,7 +211,7 @@ def setup_module(mod):
|
||||
* `mod`: module name
|
||||
"""
|
||||
# Required linux kernel version for this suite to run.
|
||||
result = required_linux_kernel_version('4.15')
|
||||
result = required_linux_kernel_version("4.15")
|
||||
if result is not True:
|
||||
pytest.skip("Kernel requirements are not met")
|
||||
|
||||
@ -693,10 +693,12 @@ def test_static_routes_associated_to_specific_vrfs_p0(request):
|
||||
)
|
||||
|
||||
step(
|
||||
"Verify that static routes 1.x.x.x/32 and 1::x/128 appear " "in VRF BLUE_A table"
|
||||
"Verify that static routes 1.x.x.x/32 and 1::x/128 appear "
|
||||
"in VRF BLUE_A table"
|
||||
)
|
||||
step(
|
||||
"Verify that static routes 2.x.x.x/32 and 2::x/128 appear " "in VRF BLUE_B table"
|
||||
"Verify that static routes 2.x.x.x/32 and 2::x/128 appear "
|
||||
"in VRF BLUE_B table"
|
||||
)
|
||||
|
||||
for addr_type in ADDR_TYPES:
|
||||
|
@ -78,7 +78,7 @@ from lib.common_config import (
|
||||
get_frr_ipv6_linklocal,
|
||||
check_router_status,
|
||||
apply_raw_config,
|
||||
required_linux_kernel_version
|
||||
required_linux_kernel_version,
|
||||
)
|
||||
|
||||
from lib.topolog import logger
|
||||
@ -143,7 +143,7 @@ def setup_module(mod):
|
||||
* `mod`: module name
|
||||
"""
|
||||
# Required linux kernel version for this suite to run.
|
||||
result = required_linux_kernel_version('4.15')
|
||||
result = required_linux_kernel_version("4.15")
|
||||
if result is not True:
|
||||
pytest.skip("Kernel requirements are not met")
|
||||
|
||||
|
@ -91,7 +91,7 @@ jsonFile = "{}/bgp_recursive_route_ebgp_multi_hop.json".format(CWD)
|
||||
try:
|
||||
with open(jsonFile, "r") as topoJson:
|
||||
topo = json.load(topoJson)
|
||||
except IOError :
|
||||
except IOError:
|
||||
logger.info("Could not read file:", jsonFile)
|
||||
|
||||
# Global variables
|
||||
@ -284,7 +284,9 @@ def test_recursive_routes_iBGP_peer_p1(request):
|
||||
input_dict_4,
|
||||
next_hop=topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0],
|
||||
)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
)
|
||||
|
||||
step(
|
||||
"Configure a static routes for next hop IP on R2 via multiple"
|
||||
@ -317,7 +319,9 @@ def test_recursive_routes_iBGP_peer_p1(request):
|
||||
}
|
||||
}
|
||||
result = create_static_routes(tgen, input_dict_3)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
)
|
||||
|
||||
step("verify if redistributed routes are now installed in FIB of R2")
|
||||
result = verify_rib(
|
||||
@ -328,7 +332,9 @@ def test_recursive_routes_iBGP_peer_p1(request):
|
||||
next_hop=topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0],
|
||||
protocol="bgp",
|
||||
)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
)
|
||||
|
||||
step("Delete 1 route from static recursive for the next-hop IP")
|
||||
dut = "r2"
|
||||
@ -345,7 +351,9 @@ def test_recursive_routes_iBGP_peer_p1(request):
|
||||
}
|
||||
}
|
||||
result = create_static_routes(tgen, input_dict_3)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
)
|
||||
|
||||
step("Verify that redistributed routes are withdrawn from FIB of R2")
|
||||
result = verify_rib(
|
||||
@ -355,7 +363,7 @@ def test_recursive_routes_iBGP_peer_p1(request):
|
||||
input_dict_4,
|
||||
next_hop=topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0],
|
||||
protocol="bgp",
|
||||
expected=False
|
||||
expected=False,
|
||||
)
|
||||
assert result is not True, "Testcase : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
@ -375,7 +383,9 @@ def test_recursive_routes_iBGP_peer_p1(request):
|
||||
}
|
||||
}
|
||||
result = create_static_routes(tgen, input_dict_3)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
)
|
||||
|
||||
step("Verify that redistributed routes are again installed" "in FIB of R2")
|
||||
result = verify_rib(
|
||||
@ -386,7 +396,9 @@ def test_recursive_routes_iBGP_peer_p1(request):
|
||||
next_hop=topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0],
|
||||
protocol="bgp",
|
||||
)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
)
|
||||
|
||||
step("Configure static route with changed next-hop from same subnet")
|
||||
for addr_type in ADDR_TYPES:
|
||||
@ -410,7 +422,9 @@ def test_recursive_routes_iBGP_peer_p1(request):
|
||||
}
|
||||
}
|
||||
result = create_static_routes(tgen, input_dict_4)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
)
|
||||
|
||||
result = verify_rib(tgen, addr_type, "r1", input_dict_4, protocol="static")
|
||||
assert result is True, "Testcase {} : Failed \n Error : {}".format(
|
||||
@ -455,7 +469,9 @@ def test_recursive_routes_iBGP_peer_p1(request):
|
||||
}
|
||||
}
|
||||
result = create_static_routes(tgen, input_dict_4)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
)
|
||||
|
||||
result = verify_rib(tgen, addr_type, "r1", input_dict_4, protocol="static")
|
||||
assert result is True, "Testcase {} : Failed \n Error : {}".format(
|
||||
@ -578,7 +594,7 @@ def test_next_hop_as_self_ip_p1(request):
|
||||
"r2",
|
||||
input_dict_4,
|
||||
next_hop=topo["routers"]["r2"]["links"]["r4"][addr_type].split("/")[0],
|
||||
expected=False
|
||||
expected=False,
|
||||
)
|
||||
assert result is not True, "Testcase : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
@ -614,7 +630,9 @@ def test_next_hop_as_self_ip_p1(request):
|
||||
input_dict_4,
|
||||
next_hop=topo["routers"]["r2"]["links"]["r4"][addr_type].split("/")[0],
|
||||
)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
)
|
||||
|
||||
step("No shutdown interface on R2 which was shut in previous step")
|
||||
intf_r2_r4 = topo["routers"]["r2"]["links"]["r4"]["interface"]
|
||||
@ -644,14 +662,16 @@ def test_next_hop_as_self_ip_p1(request):
|
||||
input_dict_4,
|
||||
next_hop=topo["routers"]["r2"]["links"]["r4"][addr_type].split("/")[0],
|
||||
)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
)
|
||||
result = verify_rib(
|
||||
tgen,
|
||||
addr_type,
|
||||
"r2",
|
||||
input_dict_4,
|
||||
next_hop=topo["routers"]["r2"]["links"]["r4"][addr_type].split("/")[0],
|
||||
expected=False
|
||||
expected=False,
|
||||
)
|
||||
assert result is not True, "Testcase : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
@ -907,7 +927,9 @@ def test_next_hop_with_recursive_lookup_p1(request):
|
||||
result = verify_bgp_convergence_from_running_config(tgen, expected=False)
|
||||
assert (
|
||||
result is not True
|
||||
), "Testcase {} : Failed \n" "BGP is converged \n Error : {}".format(tc_name, result)
|
||||
), "Testcase {} : Failed \n" "BGP is converged \n Error : {}".format(
|
||||
tc_name, result
|
||||
)
|
||||
logger.info("Expected behaviour: {}".format(result))
|
||||
|
||||
for addr_type in ADDR_TYPES:
|
||||
@ -1018,7 +1040,7 @@ def test_next_hop_with_recursive_lookup_p1(request):
|
||||
input_dict,
|
||||
protocol="bgp",
|
||||
next_hop=next_hop,
|
||||
expected=False
|
||||
expected=False,
|
||||
)
|
||||
assert result is not True, (
|
||||
"Testcase {} : Failed \n "
|
||||
@ -1083,7 +1105,9 @@ def test_next_hop_with_recursive_lookup_p1(request):
|
||||
result = verify_bgp_convergence_from_running_config(tgen, expected=False)
|
||||
assert (
|
||||
result is not True
|
||||
), "Testcase {} : Failed \n" "BGP is converged \n Error : {}".format(tc_name, result)
|
||||
), "Testcase {} : Failed \n" "BGP is converged \n Error : {}".format(
|
||||
tc_name, result
|
||||
)
|
||||
logger.info("Expected behaviour: {}".format(result))
|
||||
|
||||
for addr_type in ADDR_TYPES:
|
||||
@ -1099,7 +1123,7 @@ def test_next_hop_with_recursive_lookup_p1(request):
|
||||
input_dict,
|
||||
protocol="bgp",
|
||||
next_hop=next_hop,
|
||||
expected=False
|
||||
expected=False,
|
||||
)
|
||||
assert result is not True, (
|
||||
"Testcase {} : Failed \n "
|
||||
@ -1138,7 +1162,9 @@ def test_next_hop_with_recursive_lookup_p1(request):
|
||||
result = verify_bgp_convergence_from_running_config(tgen, expected=False)
|
||||
assert (
|
||||
result is not True
|
||||
), "Testcase {} : Failed \n" "BGP is converged \n Error : {}".format(tc_name, result)
|
||||
), "Testcase {} : Failed \n" "BGP is converged \n Error : {}".format(
|
||||
tc_name, result
|
||||
)
|
||||
logger.info("Expected behaviour: {}".format(result))
|
||||
|
||||
for addr_type in ADDR_TYPES:
|
||||
@ -1154,7 +1180,7 @@ def test_next_hop_with_recursive_lookup_p1(request):
|
||||
input_dict,
|
||||
protocol="bgp",
|
||||
next_hop=next_hop,
|
||||
expected=False
|
||||
expected=False,
|
||||
)
|
||||
assert result is not True, (
|
||||
"Testcase {} : Failed \n "
|
||||
@ -1237,7 +1263,9 @@ def test_BGP_path_attributes_default_values_p1(request):
|
||||
topo["routers"]["r3"]["links"]["r4"][addr_type].split("/")[0],
|
||||
],
|
||||
)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
)
|
||||
|
||||
for addr_type in ADDR_TYPES:
|
||||
input_dict_4 = {
|
||||
@ -1256,7 +1284,9 @@ def test_BGP_path_attributes_default_values_p1(request):
|
||||
rmap_name="rmap_pf",
|
||||
input_dict=input_dict_4,
|
||||
)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
)
|
||||
|
||||
step(
|
||||
"Configure a route-map to set below attribute value as 500"
|
||||
@ -1358,7 +1388,9 @@ def test_BGP_path_attributes_default_values_p1(request):
|
||||
rmap_name="rmap_pf",
|
||||
input_dict=input_dict_4,
|
||||
)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
)
|
||||
|
||||
step("Remove the route-map from R4")
|
||||
input_dict_5 = {
|
||||
@ -1432,7 +1464,9 @@ def test_BGP_path_attributes_default_values_p1(request):
|
||||
input_dict=input_dict_4,
|
||||
nexthop=None,
|
||||
)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
)
|
||||
|
||||
write_test_footer(tc_name)
|
||||
|
||||
@ -1670,7 +1704,7 @@ def test_BGP_peering_bw_loopback_and_physical_p1(request):
|
||||
input_dict_1,
|
||||
protocol="static",
|
||||
next_hop=topo["routers"]["r1"]["links"]["r3"][addr_type].split("/")[0],
|
||||
expected=False
|
||||
expected=False,
|
||||
)
|
||||
assert result is not True, "Testcase {} : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
@ -1801,7 +1835,9 @@ def test_BGP_active_standby_preemption_and_ecmp_p1(request):
|
||||
topo["routers"]["r3"]["links"]["r4"][addr_type].split("/")[0],
|
||||
],
|
||||
)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(tc_name, result)
|
||||
assert result is True, "Testcase : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
)
|
||||
|
||||
step(
|
||||
"Configure a route-map to set as-path attribute and"
|
||||
@ -2037,7 +2073,7 @@ def test_BGP_active_standby_preemption_and_ecmp_p1(request):
|
||||
topo["routers"]["r2"]["links"]["r4"][addr_type].split("/")[0],
|
||||
topo["routers"]["r3"]["links"]["r4"][addr_type].split("/")[0],
|
||||
],
|
||||
expected=False
|
||||
expected=False,
|
||||
)
|
||||
assert result is not True, "Testcase {} : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
@ -2084,7 +2120,7 @@ def test_BGP_active_standby_preemption_and_ecmp_p1(request):
|
||||
topo["routers"]["r2"]["links"]["r4"][addr_type].split("/")[0],
|
||||
topo["routers"]["r3"]["links"]["r4"][addr_type].split("/")[0],
|
||||
],
|
||||
expected=False
|
||||
expected=False,
|
||||
)
|
||||
assert result is not True, "Testcase {} : Failed \n Error : {}".format(
|
||||
tc_name, result
|
||||
|
@ -149,22 +149,21 @@ def test_bgp_update_delay():
|
||||
|
||||
def _bgp_check_update_delay_in_progress(router):
|
||||
output = json.loads(router.vtysh_cmd("show ip bgp sum json"))
|
||||
expected = {"ipv4Unicast": {"updateDelayInProgress":True}}
|
||||
expected = {"ipv4Unicast": {"updateDelayInProgress": True}}
|
||||
|
||||
return topotest.json_cmp(output, expected)
|
||||
|
||||
def _bgp_check_route_install(router):
|
||||
output = json.loads(router.vtysh_cmd("show ip route 172.16.253.254/32 json"))
|
||||
expected = {"172.16.253.254/32": [ {"protocol": "bgp"}]}
|
||||
expected = {"172.16.253.254/32": [{"protocol": "bgp"}]}
|
||||
|
||||
return topotest.json_cmp(output, expected)
|
||||
|
||||
def _bgp_check_update_delay_and_wait(router):
|
||||
output = json.loads(router.vtysh_cmd("show ip bgp sum json"))
|
||||
expected = {
|
||||
"ipv4Unicast": {
|
||||
"updateDelayLimit": 20,
|
||||
"updateDelayEstablishWait": 10}}
|
||||
"ipv4Unicast": {"updateDelayLimit": 20, "updateDelayEstablishWait": 10}
|
||||
}
|
||||
|
||||
return topotest.json_cmp(output, expected)
|
||||
|
||||
@ -177,14 +176,11 @@ def test_bgp_update_delay():
|
||||
def _bgp_check_vrf_update_delay_and_wait(router):
|
||||
output = json.loads(router.vtysh_cmd("show ip bgp vrf vrf1 sum json"))
|
||||
expected = {
|
||||
"ipv4Unicast": {
|
||||
"updateDelayLimit": 20,
|
||||
"updateDelayEstablishWait": 10}}
|
||||
|
||||
"ipv4Unicast": {"updateDelayLimit": 20, "updateDelayEstablishWait": 10}
|
||||
}
|
||||
|
||||
return topotest.json_cmp(output, expected)
|
||||
|
||||
|
||||
# Check r2 initial convergence in default table
|
||||
test_func = functools.partial(_bgp_converge, router2)
|
||||
success, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
|
||||
@ -215,13 +211,17 @@ def test_bgp_update_delay():
|
||||
test_func = functools.partial(_bgp_check_update_delay_in_progress, router2)
|
||||
success, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
|
||||
|
||||
assert result is None, 'Failed to set update-delay max-delay timer "{}"'.format(router2)
|
||||
assert result is None, 'Failed to set update-delay max-delay timer "{}"'.format(
|
||||
router2
|
||||
)
|
||||
|
||||
# Check that r2 only installs route learned from r4 after the max-delay timer expires
|
||||
test_func = functools.partial(_bgp_check_route_install, router2)
|
||||
success, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
|
||||
|
||||
assert result is None, 'Failed to install route after update-delay "{}"'.format(router2)
|
||||
assert result is None, 'Failed to install route after update-delay "{}"'.format(
|
||||
router2
|
||||
)
|
||||
|
||||
# Define update-delay with max-delay and estabish-wait and check json output showing set
|
||||
router2.vtysh_cmd(
|
||||
@ -235,7 +235,9 @@ def test_bgp_update_delay():
|
||||
test_func = functools.partial(_bgp_check_update_delay_and_wait, router2)
|
||||
success, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
|
||||
|
||||
assert result is None, 'Failed to set max-delay and establish-weight timers in "{}"'.format(router2)
|
||||
assert (
|
||||
result is None
|
||||
), 'Failed to set max-delay and establish-weight timers in "{}"'.format(router2)
|
||||
|
||||
# Define update-delay with max-delay and estabish-wait and check json output showing set
|
||||
router2.vtysh_cmd("""clear ip bgp *""")
|
||||
@ -243,7 +245,11 @@ def test_bgp_update_delay():
|
||||
test_func = functools.partial(_bgp_check_route_install, router3)
|
||||
success, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
|
||||
|
||||
assert result is None, 'Failed to installed advertised route after establish-wait timer espired "{}"'.format(router2)
|
||||
assert (
|
||||
result is None
|
||||
), 'Failed to installed advertised route after establish-wait timer espired "{}"'.format(
|
||||
router2
|
||||
)
|
||||
|
||||
# Remove update-delay timer on r2 to verify that it goes back to normal behavior
|
||||
router2.vtysh_cmd(
|
||||
@ -260,7 +266,9 @@ def test_bgp_update_delay():
|
||||
test_func = functools.partial(_bgp_check_route_install, router2)
|
||||
success, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
|
||||
|
||||
assert result is None, 'Failed to remove update-delay delay timing "{}"'.format(router2)
|
||||
assert result is None, 'Failed to remove update-delay delay timing "{}"'.format(
|
||||
router2
|
||||
)
|
||||
|
||||
# Define global bgp update-delay with max-delay and establish-wait on r2
|
||||
router2.vtysh_cmd(
|
||||
@ -274,7 +282,9 @@ def test_bgp_update_delay():
|
||||
test_func = functools.partial(_bgp_check_update_delay_and_wait, router2)
|
||||
success, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
|
||||
|
||||
assert result is None, 'Failed to set update-delay in default instance "{}"'.format(router2)
|
||||
assert result is None, 'Failed to set update-delay in default instance "{}"'.format(
|
||||
router2
|
||||
)
|
||||
|
||||
test_func = functools.partial(_bgp_check_vrf_update_delay_and_wait, router2)
|
||||
success, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
|
||||
@ -287,7 +297,11 @@ def test_bgp_update_delay():
|
||||
test_func = functools.partial(_bgp_check_route_install, router3)
|
||||
success, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
|
||||
|
||||
assert result is None, 'Failed to installed advertised route after establish-wait timer espired "{}"'.format(router2)
|
||||
assert (
|
||||
result is None
|
||||
), 'Failed to installed advertised route after establish-wait timer espired "{}"'.format(
|
||||
router2
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -71,7 +71,7 @@ from lib.common_config import (
|
||||
configure_brctl,
|
||||
apply_raw_config,
|
||||
verify_vrf_vni,
|
||||
verify_cli_json
|
||||
verify_cli_json,
|
||||
)
|
||||
|
||||
from lib.topolog import logger
|
||||
@ -81,7 +81,7 @@ from lib.bgp import (
|
||||
clear_bgp,
|
||||
verify_best_path_as_per_bgp_attribute,
|
||||
verify_attributes_for_evpn_routes,
|
||||
verify_evpn_routes
|
||||
verify_evpn_routes,
|
||||
)
|
||||
from lib.topojson import build_topo_from_json, build_config_from_json
|
||||
|
||||
@ -177,9 +177,11 @@ def setup_module(mod):
|
||||
# Creating configuration from JSON
|
||||
build_config_from_json(tgen, topo)
|
||||
|
||||
if version_cmp(platform.release(), '4.19') < 0:
|
||||
error_msg = ('EVPN tests will not run (have kernel "{}", '
|
||||
'but it requires >= 4.19)'.format(platform.release()))
|
||||
if version_cmp(platform.release(), "4.19") < 0:
|
||||
error_msg = (
|
||||
'EVPN tests will not run (have kernel "{}", '
|
||||
"but it requires >= 4.19)".format(platform.release())
|
||||
)
|
||||
pytest.skip(error_msg)
|
||||
|
||||
global BGP_CONVERGENCE
|
||||
@ -389,9 +391,9 @@ def test_verify_overlay_index_p1(request):
|
||||
"network": NETWORK3_1[addr_type],
|
||||
"next_hop": NEXT_HOP_IP[addr_type],
|
||||
"vrf": "GREEN",
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
result = create_static_routes(tgen, input_dict_1)
|
||||
@ -463,7 +465,7 @@ def test_evpn_cli_json_available_p1(request):
|
||||
"cli": [
|
||||
"show evpn vni detail",
|
||||
"show bgp l2vpn evpn all overlay",
|
||||
"show bgp l2vpn evpn vni"
|
||||
"show bgp l2vpn evpn vni",
|
||||
]
|
||||
}
|
||||
}
|
||||
@ -516,9 +518,9 @@ def test_RT_verification_auto_p0(request):
|
||||
"network": NETWORK4_1[addr_type],
|
||||
"next_hop": NEXT_HOP_IP[addr_type],
|
||||
"vrf": "GREEN",
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
result = create_static_routes(tgen, input_dict_1)
|
||||
|
@ -77,7 +77,7 @@ from lib.common_config import (
|
||||
configure_vxlan,
|
||||
configure_brctl,
|
||||
verify_vrf_vni,
|
||||
create_interface_in_kernel
|
||||
create_interface_in_kernel,
|
||||
)
|
||||
|
||||
from lib.topolog import logger
|
||||
@ -87,7 +87,7 @@ from lib.bgp import (
|
||||
clear_bgp,
|
||||
verify_best_path_as_per_bgp_attribute,
|
||||
verify_attributes_for_evpn_routes,
|
||||
verify_evpn_routes
|
||||
verify_evpn_routes,
|
||||
)
|
||||
from lib.topojson import build_topo_from_json, build_config_from_json
|
||||
|
||||
@ -179,9 +179,11 @@ def setup_module(mod):
|
||||
# Creating configuration from JSON
|
||||
build_config_from_json(tgen, topo)
|
||||
|
||||
if version_cmp(platform.release(), '4.19') < 0:
|
||||
error_msg = ('EVPN tests will not run (have kernel "{}", '
|
||||
'but it requires >= 4.19)'.format(platform.release()))
|
||||
if version_cmp(platform.release(), "4.19") < 0:
|
||||
error_msg = (
|
||||
'EVPN tests will not run (have kernel "{}", '
|
||||
"but it requires >= 4.19)".format(platform.release())
|
||||
)
|
||||
pytest.skip(error_msg)
|
||||
|
||||
global BGP_CONVERGENCE
|
||||
@ -387,9 +389,9 @@ def test_RD_verification_manual_and_auto_p0(request):
|
||||
"network": NETWORK3_1[addr_type],
|
||||
"next_hop": NEXT_HOP_IP[addr_type],
|
||||
"vrf": "GREEN",
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
result = create_static_routes(tgen, input_dict_1)
|
||||
@ -453,7 +455,7 @@ def test_RD_verification_manual_and_auto_p0(request):
|
||||
"vrf": "RED",
|
||||
"address_family": {
|
||||
"l2vpn": {"evpn": {"rd": "100.100.100.100:100"}}
|
||||
}
|
||||
},
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -620,9 +622,9 @@ def test_RT_verification_manual_p0(request):
|
||||
"network": NETWORK3_1[addr_type],
|
||||
"next_hop": NEXT_HOP_IP[addr_type],
|
||||
"vrf": "GREEN",
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
result = create_static_routes(tgen, input_dict_1)
|
||||
@ -652,7 +654,7 @@ def test_RT_verification_manual_p0(request):
|
||||
"l2vpn": {
|
||||
"evpn": {"route-target": {"export": [{"value": "100:100"}]}}
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -995,9 +997,9 @@ def test_active_standby_evpn_implementation_p1(request):
|
||||
"network": NETWORK1_4[addr_type],
|
||||
"next_hop": NEXT_HOP_IP[addr_type],
|
||||
"vrf": "GREEN",
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
result = create_static_routes(tgen, input_dict_1)
|
||||
@ -1249,9 +1251,9 @@ def test_evpn_routes_from_VNFs_p1(request):
|
||||
"network": NETWORK3_1[addr_type],
|
||||
"next_hop": NEXT_HOP_IP[addr_type],
|
||||
"vrf": "GREEN",
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
result = create_static_routes(tgen, input_dict_1)
|
||||
@ -1382,9 +1384,9 @@ def test_evpn_routes_from_VNFs_p1(request):
|
||||
"network": NETWORK3_1[addr_type],
|
||||
"next_hop": NEXT_HOP_IP[addr_type],
|
||||
"vrf": "GREEN",
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
result = create_static_routes(tgen, input_dict_1)
|
||||
@ -1617,9 +1619,9 @@ def test_route_map_operations_for_evpn_address_family_p1(request, attribute):
|
||||
"network": NETWORK3_1[addr_type],
|
||||
"next_hop": NEXT_HOP_IP[addr_type],
|
||||
"vrf": "GREEN",
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
result = create_static_routes(tgen, input_dict_1)
|
||||
@ -1811,9 +1813,9 @@ def test_bgp_attributes_for_evpn_address_family_p1(request, attribute):
|
||||
"network": NETWORK3_1[addr_type],
|
||||
"next_hop": NEXT_HOP_IP[addr_type],
|
||||
"vrf": "GREEN",
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
result = create_static_routes(tgen, input_dict_1)
|
||||
|
@ -73,7 +73,7 @@ from functools import partial
|
||||
|
||||
# Save the Current Working Directory to find configuration files.
|
||||
CWD = os.path.dirname(os.path.realpath(__file__))
|
||||
sys.path.append(os.path.join(CWD, '../'))
|
||||
sys.path.append(os.path.join(CWD, "../"))
|
||||
|
||||
# pylint: disable=C0413
|
||||
# Import topogen and topotest helpers
|
||||
@ -84,8 +84,10 @@ from lib.topolog import logger
|
||||
# Required to instantiate the topology builder class.
|
||||
from mininet.topo import Topo
|
||||
|
||||
|
||||
class TemplateTopo(Topo):
|
||||
"Test topology builder"
|
||||
|
||||
def build(self, *_args, **_opts):
|
||||
"Build function"
|
||||
tgen = get_topogen(self)
|
||||
@ -93,44 +95,45 @@ class TemplateTopo(Topo):
|
||||
#
|
||||
# Define FRR Routers
|
||||
#
|
||||
for router in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
tgen.add_router(router)
|
||||
|
||||
#
|
||||
# Define connections
|
||||
#
|
||||
switch = tgen.add_switch('s1')
|
||||
switch.add_link(tgen.gears['rt1'], nodeif="eth-sw1")
|
||||
switch.add_link(tgen.gears['rt2'], nodeif="eth-sw1")
|
||||
switch.add_link(tgen.gears['rt3'], nodeif="eth-sw1")
|
||||
switch = tgen.add_switch("s1")
|
||||
switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1")
|
||||
switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1")
|
||||
switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1")
|
||||
|
||||
switch = tgen.add_switch('s2')
|
||||
switch.add_link(tgen.gears['rt2'], nodeif="eth-rt4-1")
|
||||
switch.add_link(tgen.gears['rt4'], nodeif="eth-rt2-1")
|
||||
switch = tgen.add_switch("s2")
|
||||
switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-1")
|
||||
switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-1")
|
||||
|
||||
switch = tgen.add_switch('s3')
|
||||
switch.add_link(tgen.gears['rt2'], nodeif="eth-rt4-2")
|
||||
switch.add_link(tgen.gears['rt4'], nodeif="eth-rt2-2")
|
||||
switch = tgen.add_switch("s3")
|
||||
switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-2")
|
||||
switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-2")
|
||||
|
||||
switch = tgen.add_switch('s4')
|
||||
switch.add_link(tgen.gears['rt3'], nodeif="eth-rt5-1")
|
||||
switch.add_link(tgen.gears['rt5'], nodeif="eth-rt3-1")
|
||||
switch = tgen.add_switch("s4")
|
||||
switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-1")
|
||||
switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-1")
|
||||
|
||||
switch = tgen.add_switch('s5')
|
||||
switch.add_link(tgen.gears['rt3'], nodeif="eth-rt5-2")
|
||||
switch.add_link(tgen.gears['rt5'], nodeif="eth-rt3-2")
|
||||
switch = tgen.add_switch("s5")
|
||||
switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-2")
|
||||
switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-2")
|
||||
|
||||
switch = tgen.add_switch('s6')
|
||||
switch.add_link(tgen.gears['rt4'], nodeif="eth-rt5")
|
||||
switch.add_link(tgen.gears['rt5'], nodeif="eth-rt4")
|
||||
switch = tgen.add_switch("s6")
|
||||
switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
|
||||
switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
|
||||
|
||||
switch = tgen.add_switch('s7')
|
||||
switch.add_link(tgen.gears['rt4'], nodeif="eth-rt6")
|
||||
switch.add_link(tgen.gears['rt6'], nodeif="eth-rt4")
|
||||
switch = tgen.add_switch("s7")
|
||||
switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6")
|
||||
switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4")
|
||||
|
||||
switch = tgen.add_switch("s8")
|
||||
switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6")
|
||||
switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5")
|
||||
|
||||
switch = tgen.add_switch('s8')
|
||||
switch.add_link(tgen.gears['rt5'], nodeif="eth-rt6")
|
||||
switch.add_link(tgen.gears['rt6'], nodeif="eth-rt5")
|
||||
|
||||
def setup_module(mod):
|
||||
"Sets up the pytest environment"
|
||||
@ -142,16 +145,15 @@ def setup_module(mod):
|
||||
# For all registered routers, load the zebra configuration file
|
||||
for rname, router in router_list.items():
|
||||
router.load_config(
|
||||
TopoRouter.RD_ZEBRA,
|
||||
os.path.join(CWD, '{}/zebra.conf'.format(rname))
|
||||
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
|
||||
)
|
||||
router.load_config(
|
||||
TopoRouter.RD_ISIS,
|
||||
os.path.join(CWD, '{}/isisd.conf'.format(rname))
|
||||
TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname))
|
||||
)
|
||||
|
||||
tgen.start_router()
|
||||
|
||||
|
||||
def teardown_module(mod):
|
||||
"Teardown the pytest environment"
|
||||
tgen = get_topogen()
|
||||
@ -159,22 +161,23 @@ def teardown_module(mod):
|
||||
# This function tears down the whole topology.
|
||||
tgen.stop_topology()
|
||||
|
||||
|
||||
def router_compare_json_output(rname, command, reference):
|
||||
"Compare router JSON output"
|
||||
|
||||
logger.info('Comparing router "%s" "%s" output', rname, command)
|
||||
|
||||
tgen = get_topogen()
|
||||
filename = '{}/{}/{}'.format(CWD, rname, reference)
|
||||
filename = "{}/{}/{}".format(CWD, rname, reference)
|
||||
expected = json.loads(open(filename).read())
|
||||
|
||||
# Run test function until we get an result. Wait at most 60 seconds.
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
tgen.gears[rname], command, expected)
|
||||
test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected)
|
||||
_, diff = topotest.run_and_expect(test_func, None, count=120, wait=0.5)
|
||||
assertmsg = '"{}" JSON output mismatches the expected result'.format(rname)
|
||||
assert diff is None, assertmsg
|
||||
|
||||
|
||||
#
|
||||
# Step 1
|
||||
#
|
||||
@ -188,9 +191,13 @@ def test_isis_adjacencies_step1():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd",
|
||||
"step1/show_yang_interface_isis_adjacencies.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname,
|
||||
"show yang operational-data /frr-interface:lib isisd",
|
||||
"step1/show_yang_interface_isis_adjacencies.ref",
|
||||
)
|
||||
|
||||
|
||||
def test_rib_ipv4_step1():
|
||||
logger.info("Test (step 1): verify IPv4 RIB")
|
||||
@ -200,9 +207,11 @@ def test_rib_ipv4_step1():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ip route isis json",
|
||||
"step1/show_ip_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ip route isis json", "step1/show_ip_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_rib_ipv6_step1():
|
||||
logger.info("Test (step 1): verify IPv6 RIB")
|
||||
@ -212,9 +221,11 @@ def test_rib_ipv6_step1():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ipv6 route isis json",
|
||||
"step1/show_ipv6_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ipv6 route isis json", "step1/show_ipv6_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_mpls_lib_step1():
|
||||
logger.info("Test (step 1): verify MPLS LIB")
|
||||
@ -224,9 +235,11 @@ def test_mpls_lib_step1():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show mpls table json",
|
||||
"step1/show_mpls_table.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show mpls table json", "step1/show_mpls_table.ref"
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# Step 2
|
||||
@ -252,13 +265,21 @@ def test_isis_adjacencies_step2():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
logger.info('Disabling IS-IS on the eth-rt5 interface on rt4')
|
||||
tgen.net['rt4'].cmd('vtysh -c "conf t" -c "interface eth-rt5" -c "no ip router isis 1"')
|
||||
tgen.net['rt4'].cmd('vtysh -c "conf t" -c "interface eth-rt5" -c "no ipv6 router isis 1"')
|
||||
logger.info("Disabling IS-IS on the eth-rt5 interface on rt4")
|
||||
tgen.net["rt4"].cmd(
|
||||
'vtysh -c "conf t" -c "interface eth-rt5" -c "no ip router isis 1"'
|
||||
)
|
||||
tgen.net["rt4"].cmd(
|
||||
'vtysh -c "conf t" -c "interface eth-rt5" -c "no ipv6 router isis 1"'
|
||||
)
|
||||
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname,
|
||||
"show yang operational-data /frr-interface:lib isisd",
|
||||
"step2/show_yang_interface_isis_adjacencies.ref",
|
||||
)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd",
|
||||
"step2/show_yang_interface_isis_adjacencies.ref")
|
||||
|
||||
def test_rib_ipv4_step2():
|
||||
logger.info("Test (step 2): verify IPv4 RIB")
|
||||
@ -268,9 +289,11 @@ def test_rib_ipv4_step2():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ip route isis json",
|
||||
"step2/show_ip_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ip route isis json", "step2/show_ip_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_rib_ipv6_step2():
|
||||
logger.info("Test (step 2): verify IPv6 RIB")
|
||||
@ -280,9 +303,11 @@ def test_rib_ipv6_step2():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ipv6 route isis json",
|
||||
"step2/show_ipv6_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ipv6 route isis json", "step2/show_ipv6_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_mpls_lib_step2():
|
||||
logger.info("Test (step 2): verify MPLS LIB")
|
||||
@ -292,9 +317,11 @@ def test_mpls_lib_step2():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show mpls table json",
|
||||
"step2/show_mpls_table.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show mpls table json", "step2/show_mpls_table.ref"
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# Step 3
|
||||
@ -318,14 +345,18 @@ def test_isis_adjacencies_step3():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
logger.info('Shutting down the eth-rt4 interface on rt6')
|
||||
tgen.net['rt6'].cmd('vtysh -c "conf t" -c "interface eth-rt4" -c "shutdown"')
|
||||
logger.info('Shutting down the eth-rt5 interface on rt6')
|
||||
tgen.net['rt6'].cmd('vtysh -c "conf t" -c "interface eth-rt5" -c "shutdown"')
|
||||
logger.info("Shutting down the eth-rt4 interface on rt6")
|
||||
tgen.net["rt6"].cmd('vtysh -c "conf t" -c "interface eth-rt4" -c "shutdown"')
|
||||
logger.info("Shutting down the eth-rt5 interface on rt6")
|
||||
tgen.net["rt6"].cmd('vtysh -c "conf t" -c "interface eth-rt5" -c "shutdown"')
|
||||
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname,
|
||||
"show yang operational-data /frr-interface:lib isisd",
|
||||
"step3/show_yang_interface_isis_adjacencies.ref",
|
||||
)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd",
|
||||
"step3/show_yang_interface_isis_adjacencies.ref")
|
||||
|
||||
def test_rib_ipv4_step3():
|
||||
logger.info("Test (step 3): verify IPv4 RIB")
|
||||
@ -335,9 +366,11 @@ def test_rib_ipv4_step3():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ip route isis json",
|
||||
"step3/show_ip_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ip route isis json", "step3/show_ip_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_rib_ipv6_step3():
|
||||
logger.info("Test (step 3): verify IPv6 RIB")
|
||||
@ -347,9 +380,11 @@ def test_rib_ipv6_step3():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ipv6 route isis json",
|
||||
"step3/show_ipv6_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ipv6 route isis json", "step3/show_ipv6_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_mpls_lib_step3():
|
||||
logger.info("Test (step 3): verify MPLS LIB")
|
||||
@ -359,9 +394,11 @@ def test_mpls_lib_step3():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show mpls table json",
|
||||
"step3/show_mpls_table.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show mpls table json", "step3/show_mpls_table.ref"
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# Step 4
|
||||
@ -386,16 +423,22 @@ def test_isis_adjacencies_step4():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
logger.info('Bringing up the eth-rt4 interface on rt6')
|
||||
tgen.net['rt6'].cmd('vtysh -c "conf t" -c "interface eth-rt4" -c "no shutdown"')
|
||||
logger.info('Bringing up the eth-rt5 interface on rt6')
|
||||
tgen.net['rt6'].cmd('vtysh -c "conf t" -c "interface eth-rt5" -c "no shutdown"')
|
||||
logger.info('Changing rt6\'s SRGB')
|
||||
tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing global-block 18000 25999"')
|
||||
logger.info("Bringing up the eth-rt4 interface on rt6")
|
||||
tgen.net["rt6"].cmd('vtysh -c "conf t" -c "interface eth-rt4" -c "no shutdown"')
|
||||
logger.info("Bringing up the eth-rt5 interface on rt6")
|
||||
tgen.net["rt6"].cmd('vtysh -c "conf t" -c "interface eth-rt5" -c "no shutdown"')
|
||||
logger.info("Changing rt6's SRGB")
|
||||
tgen.net["rt6"].cmd(
|
||||
'vtysh -c "conf t" -c "router isis 1" -c "segment-routing global-block 18000 25999"'
|
||||
)
|
||||
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname,
|
||||
"show yang operational-data /frr-interface:lib isisd",
|
||||
"step4/show_yang_interface_isis_adjacencies.ref",
|
||||
)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd",
|
||||
"step4/show_yang_interface_isis_adjacencies.ref")
|
||||
|
||||
def test_rib_ipv4_step4():
|
||||
logger.info("Test (step 4): verify IPv4 RIB")
|
||||
@ -405,9 +448,11 @@ def test_rib_ipv4_step4():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ip route isis json",
|
||||
"step4/show_ip_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ip route isis json", "step4/show_ip_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_rib_ipv6_step4():
|
||||
logger.info("Test (step 4): verify IPv6 RIB")
|
||||
@ -417,9 +462,11 @@ def test_rib_ipv6_step4():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ipv6 route isis json",
|
||||
"step4/show_ipv6_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ipv6 route isis json", "step4/show_ipv6_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_mpls_lib_step4():
|
||||
logger.info("Test (step 4): verify MPLS LIB")
|
||||
@ -429,9 +476,11 @@ def test_mpls_lib_step4():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show mpls table json",
|
||||
"step4/show_mpls_table.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show mpls table json", "step4/show_mpls_table.ref"
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# Step 5
|
||||
@ -453,12 +502,18 @@ def test_isis_adjacencies_step5():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
logger.info('Disabling SR on rt6')
|
||||
tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no segment-routing on"')
|
||||
logger.info("Disabling SR on rt6")
|
||||
tgen.net["rt6"].cmd(
|
||||
'vtysh -c "conf t" -c "router isis 1" -c "no segment-routing on"'
|
||||
)
|
||||
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname,
|
||||
"show yang operational-data /frr-interface:lib isisd",
|
||||
"step5/show_yang_interface_isis_adjacencies.ref",
|
||||
)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd",
|
||||
"step5/show_yang_interface_isis_adjacencies.ref")
|
||||
|
||||
def test_rib_ipv4_step5():
|
||||
logger.info("Test (step 5): verify IPv4 RIB")
|
||||
@ -468,9 +523,11 @@ def test_rib_ipv4_step5():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ip route isis json",
|
||||
"step5/show_ip_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ip route isis json", "step5/show_ip_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_rib_ipv6_step5():
|
||||
logger.info("Test (step 5): verify IPv6 RIB")
|
||||
@ -480,9 +537,11 @@ def test_rib_ipv6_step5():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ipv6 route isis json",
|
||||
"step5/show_ipv6_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ipv6 route isis json", "step5/show_ipv6_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_mpls_lib_step5():
|
||||
logger.info("Test (step 5): verify MPLS LIB")
|
||||
@ -492,9 +551,11 @@ def test_mpls_lib_step5():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show mpls table json",
|
||||
"step5/show_mpls_table.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show mpls table json", "step5/show_mpls_table.ref"
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# Step 6
|
||||
@ -516,12 +577,16 @@ def test_isis_adjacencies_step6():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
logger.info('Enabling SR on rt6')
|
||||
tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing on"')
|
||||
logger.info("Enabling SR on rt6")
|
||||
tgen.net["rt6"].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing on"')
|
||||
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname,
|
||||
"show yang operational-data /frr-interface:lib isisd",
|
||||
"step6/show_yang_interface_isis_adjacencies.ref",
|
||||
)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd",
|
||||
"step6/show_yang_interface_isis_adjacencies.ref")
|
||||
|
||||
def test_rib_ipv4_step6():
|
||||
logger.info("Test (step 6): verify IPv4 RIB")
|
||||
@ -531,9 +596,11 @@ def test_rib_ipv4_step6():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ip route isis json",
|
||||
"step6/show_ip_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ip route isis json", "step6/show_ip_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_rib_ipv6_step6():
|
||||
logger.info("Test (step 6): verify IPv6 RIB")
|
||||
@ -543,9 +610,11 @@ def test_rib_ipv6_step6():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ipv6 route isis json",
|
||||
"step6/show_ipv6_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ipv6 route isis json", "step6/show_ipv6_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_mpls_lib_step6():
|
||||
logger.info("Test (step 6): verify MPLS LIB")
|
||||
@ -555,9 +624,11 @@ def test_mpls_lib_step6():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show mpls table json",
|
||||
"step6/show_mpls_table.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show mpls table json", "step6/show_mpls_table.ref"
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# Step 7
|
||||
@ -576,13 +647,21 @@ def test_isis_adjacencies_step7():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
logger.info('Deleting rt1\'s Prefix-SIDs')
|
||||
tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no segment-routing prefix 1.1.1.1/32 index 10"')
|
||||
tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no segment-routing prefix 2001:db8:1000::1/128 index 11"')
|
||||
logger.info("Deleting rt1's Prefix-SIDs")
|
||||
tgen.net["rt1"].cmd(
|
||||
'vtysh -c "conf t" -c "router isis 1" -c "no segment-routing prefix 1.1.1.1/32 index 10"'
|
||||
)
|
||||
tgen.net["rt1"].cmd(
|
||||
'vtysh -c "conf t" -c "router isis 1" -c "no segment-routing prefix 2001:db8:1000::1/128 index 11"'
|
||||
)
|
||||
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname,
|
||||
"show yang operational-data /frr-interface:lib isisd",
|
||||
"step7/show_yang_interface_isis_adjacencies.ref",
|
||||
)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd",
|
||||
"step7/show_yang_interface_isis_adjacencies.ref")
|
||||
|
||||
def test_rib_ipv4_step7():
|
||||
logger.info("Test (step 7): verify IPv4 RIB")
|
||||
@ -592,9 +671,11 @@ def test_rib_ipv4_step7():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ip route isis json",
|
||||
"step7/show_ip_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ip route isis json", "step7/show_ip_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_rib_ipv6_step7():
|
||||
logger.info("Test (step 7): verify IPv6 RIB")
|
||||
@ -604,9 +685,11 @@ def test_rib_ipv6_step7():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ipv6 route isis json",
|
||||
"step7/show_ipv6_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ipv6 route isis json", "step7/show_ipv6_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_mpls_lib_step7():
|
||||
logger.info("Test (step 7): verify MPLS LIB")
|
||||
@ -616,9 +699,11 @@ def test_mpls_lib_step7():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show mpls table json",
|
||||
"step7/show_mpls_table.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show mpls table json", "step7/show_mpls_table.ref"
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# Step 8
|
||||
@ -637,13 +722,21 @@ def test_isis_adjacencies_step8():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
logger.info('Re-adding rt1\'s Prefix-SIDs')
|
||||
tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 1.1.1.1/32 index 10"')
|
||||
tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::1/128 index 11"')
|
||||
logger.info("Re-adding rt1's Prefix-SIDs")
|
||||
tgen.net["rt1"].cmd(
|
||||
'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 1.1.1.1/32 index 10"'
|
||||
)
|
||||
tgen.net["rt1"].cmd(
|
||||
'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::1/128 index 11"'
|
||||
)
|
||||
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname,
|
||||
"show yang operational-data /frr-interface:lib isisd",
|
||||
"step8/show_yang_interface_isis_adjacencies.ref",
|
||||
)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd",
|
||||
"step8/show_yang_interface_isis_adjacencies.ref")
|
||||
|
||||
def test_rib_ipv4_step8():
|
||||
logger.info("Test (step 8): verify IPv4 RIB")
|
||||
@ -653,9 +746,11 @@ def test_rib_ipv4_step8():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ip route isis json",
|
||||
"step8/show_ip_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ip route isis json", "step8/show_ip_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_rib_ipv6_step8():
|
||||
logger.info("Test (step 8): verify IPv6 RIB")
|
||||
@ -665,9 +760,11 @@ def test_rib_ipv6_step8():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ipv6 route isis json",
|
||||
"step8/show_ipv6_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ipv6 route isis json", "step8/show_ipv6_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_mpls_lib_step8():
|
||||
logger.info("Test (step 8): verify MPLS LIB")
|
||||
@ -677,9 +774,11 @@ def test_mpls_lib_step8():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show mpls table json",
|
||||
"step8/show_mpls_table.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show mpls table json", "step8/show_mpls_table.ref"
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# Step 9
|
||||
@ -700,16 +799,28 @@ def test_isis_adjacencies_step9():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
logger.info('Changing rt1\'s Prefix-SIDs to use the no-php option')
|
||||
tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 1.1.1.1/32 index 10 no-php-flag"')
|
||||
tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::1/128 index 11 no-php-flag"')
|
||||
logger.info('Change rt6\'s Prefix-SIDs to stop using the explicit-null option')
|
||||
tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 6.6.6.6/32 index 60"')
|
||||
tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::6/128 index 61"')
|
||||
logger.info("Changing rt1's Prefix-SIDs to use the no-php option")
|
||||
tgen.net["rt1"].cmd(
|
||||
'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 1.1.1.1/32 index 10 no-php-flag"'
|
||||
)
|
||||
tgen.net["rt1"].cmd(
|
||||
'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::1/128 index 11 no-php-flag"'
|
||||
)
|
||||
logger.info("Change rt6's Prefix-SIDs to stop using the explicit-null option")
|
||||
tgen.net["rt6"].cmd(
|
||||
'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 6.6.6.6/32 index 60"'
|
||||
)
|
||||
tgen.net["rt6"].cmd(
|
||||
'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::6/128 index 61"'
|
||||
)
|
||||
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname,
|
||||
"show yang operational-data /frr-interface:lib isisd",
|
||||
"step9/show_yang_interface_isis_adjacencies.ref",
|
||||
)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd",
|
||||
"step9/show_yang_interface_isis_adjacencies.ref")
|
||||
|
||||
def test_rib_ipv4_step9():
|
||||
logger.info("Test (step 9): verify IPv4 RIB")
|
||||
@ -719,9 +830,11 @@ def test_rib_ipv4_step9():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ip route isis json",
|
||||
"step9/show_ip_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ip route isis json", "step9/show_ip_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_rib_ipv6_step9():
|
||||
logger.info("Test (step 9): verify IPv6 RIB")
|
||||
@ -731,9 +844,11 @@ def test_rib_ipv6_step9():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ipv6 route isis json",
|
||||
"step9/show_ipv6_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ipv6 route isis json", "step9/show_ipv6_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_mpls_lib_step9():
|
||||
logger.info("Test (step 9): verify MPLS LIB")
|
||||
@ -743,9 +858,11 @@ def test_mpls_lib_step9():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show mpls table json",
|
||||
"step9/show_mpls_table.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show mpls table json", "step9/show_mpls_table.ref"
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# Step 10
|
||||
@ -766,12 +883,18 @@ def test_isis_adjacencies_step10():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
logger.info('Removing the IPv4 address from rt4\'s eth-rt2-1 interface')
|
||||
tgen.net['rt4'].cmd('vtysh -c "conf t" -c "interface eth-rt2-1" -c "no ip address 10.0.2.4/24"')
|
||||
logger.info("Removing the IPv4 address from rt4's eth-rt2-1 interface")
|
||||
tgen.net["rt4"].cmd(
|
||||
'vtysh -c "conf t" -c "interface eth-rt2-1" -c "no ip address 10.0.2.4/24"'
|
||||
)
|
||||
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname,
|
||||
"show yang operational-data /frr-interface:lib isisd",
|
||||
"step10/show_yang_interface_isis_adjacencies.ref",
|
||||
)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd",
|
||||
"step10/show_yang_interface_isis_adjacencies.ref")
|
||||
|
||||
def test_rib_ipv4_step10():
|
||||
logger.info("Test (step 10): verify IPv4 RIB")
|
||||
@ -781,9 +904,11 @@ def test_rib_ipv4_step10():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ip route isis json",
|
||||
"step10/show_ip_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ip route isis json", "step10/show_ip_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_rib_ipv6_step10():
|
||||
logger.info("Test (step 10): verify IPv6 RIB")
|
||||
@ -793,9 +918,11 @@ def test_rib_ipv6_step10():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ipv6 route isis json",
|
||||
"step10/show_ipv6_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ipv6 route isis json", "step10/show_ipv6_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_mpls_lib_step10():
|
||||
logger.info("Test (step 10): verify MPLS LIB")
|
||||
@ -805,9 +932,11 @@ def test_mpls_lib_step10():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show mpls table json",
|
||||
"step10/show_mpls_table.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show mpls table json", "step10/show_mpls_table.ref"
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# Step 11
|
||||
@ -826,13 +955,26 @@ def test_isis_invalid_config_step11():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
logger.info('Entering invalid Segment Routing configuration...')
|
||||
ret = tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 1.1.1.1/32 index 10000"')
|
||||
assert re.search("Configuration failed", ret) is not None, "Invalid SR configuration wasn't rejected"
|
||||
ret = tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing global-block 16000 14999"')
|
||||
assert re.search("Configuration failed", ret) is not None, "Invalid SR configuration wasn't rejected"
|
||||
ret = tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing global-block 16000 16001"')
|
||||
assert re.search("Configuration failed", ret) is not None, "Invalid SR configuration wasn't rejected"
|
||||
logger.info("Entering invalid Segment Routing configuration...")
|
||||
ret = tgen.net["rt1"].cmd(
|
||||
'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 1.1.1.1/32 index 10000"'
|
||||
)
|
||||
assert (
|
||||
re.search("Configuration failed", ret) is not None
|
||||
), "Invalid SR configuration wasn't rejected"
|
||||
ret = tgen.net["rt1"].cmd(
|
||||
'vtysh -c "conf t" -c "router isis 1" -c "segment-routing global-block 16000 14999"'
|
||||
)
|
||||
assert (
|
||||
re.search("Configuration failed", ret) is not None
|
||||
), "Invalid SR configuration wasn't rejected"
|
||||
ret = tgen.net["rt1"].cmd(
|
||||
'vtysh -c "conf t" -c "router isis 1" -c "segment-routing global-block 16000 16001"'
|
||||
)
|
||||
assert (
|
||||
re.search("Configuration failed", ret) is not None
|
||||
), "Invalid SR configuration wasn't rejected"
|
||||
|
||||
|
||||
#
|
||||
# Step 12
|
||||
@ -851,19 +993,39 @@ def test_isis_adjacencies_step12():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
logger.info('Restoring the original network setup')
|
||||
tgen.net['rt4'].cmd('vtysh -c "conf t" -c "interface eth-rt5" -c "ip router isis 1"')
|
||||
tgen.net['rt4'].cmd('vtysh -c "conf t" -c "interface eth-rt5" -c "ipv6 router isis 1"')
|
||||
tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing global-block 16000 23999"')
|
||||
tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 1.1.1.1/32 index 10"')
|
||||
tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::1/128 index 11"')
|
||||
tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 6.6.6.6/32 index 60 explicit-null"')
|
||||
tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::6/128 index 61 explicit-null"')
|
||||
tgen.net['rt4'].cmd('vtysh -c "conf t" -c "interface eth-rt2-1" -c "ip address 10.0.2.4/24"')
|
||||
logger.info("Restoring the original network setup")
|
||||
tgen.net["rt4"].cmd(
|
||||
'vtysh -c "conf t" -c "interface eth-rt5" -c "ip router isis 1"'
|
||||
)
|
||||
tgen.net["rt4"].cmd(
|
||||
'vtysh -c "conf t" -c "interface eth-rt5" -c "ipv6 router isis 1"'
|
||||
)
|
||||
tgen.net["rt6"].cmd(
|
||||
'vtysh -c "conf t" -c "router isis 1" -c "segment-routing global-block 16000 23999"'
|
||||
)
|
||||
tgen.net["rt1"].cmd(
|
||||
'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 1.1.1.1/32 index 10"'
|
||||
)
|
||||
tgen.net["rt1"].cmd(
|
||||
'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::1/128 index 11"'
|
||||
)
|
||||
tgen.net["rt6"].cmd(
|
||||
'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 6.6.6.6/32 index 60 explicit-null"'
|
||||
)
|
||||
tgen.net["rt6"].cmd(
|
||||
'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::6/128 index 61 explicit-null"'
|
||||
)
|
||||
tgen.net["rt4"].cmd(
|
||||
'vtysh -c "conf t" -c "interface eth-rt2-1" -c "ip address 10.0.2.4/24"'
|
||||
)
|
||||
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname,
|
||||
"show yang operational-data /frr-interface:lib isisd",
|
||||
"step1/show_yang_interface_isis_adjacencies.ref",
|
||||
)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd",
|
||||
"step1/show_yang_interface_isis_adjacencies.ref")
|
||||
|
||||
def test_rib_ipv4_step12():
|
||||
logger.info("Test (step 12): verify IPv4 RIB")
|
||||
@ -873,9 +1035,11 @@ def test_rib_ipv4_step12():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ip route isis json",
|
||||
"step1/show_ip_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ip route isis json", "step1/show_ip_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_rib_ipv6_step12():
|
||||
logger.info("Test (step 12): verify IPv6 RIB")
|
||||
@ -885,9 +1049,11 @@ def test_rib_ipv6_step12():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show ipv6 route isis json",
|
||||
"step1/show_ipv6_route.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ipv6 route isis json", "step1/show_ipv6_route.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_mpls_lib_step12():
|
||||
logger.info("Test (step 12): verify MPLS LIB")
|
||||
@ -897,19 +1063,22 @@ def test_mpls_lib_step12():
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
|
||||
router_compare_json_output(rname, "show mpls table json",
|
||||
"step1/show_mpls_table.ref")
|
||||
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
|
||||
router_compare_json_output(
|
||||
rname, "show mpls table json", "step1/show_mpls_table.ref"
|
||||
)
|
||||
|
||||
|
||||
# Memory leak test template
|
||||
def test_memory_leak():
|
||||
"Run the memory leak test and report results."
|
||||
tgen = get_topogen()
|
||||
if not tgen.is_memleak_enabled():
|
||||
pytest.skip('Memory leak test/report is disabled')
|
||||
pytest.skip("Memory leak test/report is disabled")
|
||||
|
||||
tgen.report_memory_leaks()
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = ["-s"] + sys.argv[1:]
|
||||
sys.exit(pytest.main(args))
|
||||
|
@ -82,6 +82,7 @@ class ISISTopo1(Topo):
|
||||
sw.add_link(tgen.gears["r4"])
|
||||
sw.add_link(tgen.gears["r5"])
|
||||
|
||||
|
||||
def setup_module(mod):
|
||||
"Sets up the pytest environment"
|
||||
tgen = Topogen(ISISTopo1, mod.__name__)
|
||||
@ -129,12 +130,10 @@ def setup_module(mod):
|
||||
|
||||
for rname, router in tgen.routers().items():
|
||||
router.load_config(
|
||||
TopoRouter.RD_ZEBRA,
|
||||
os.path.join(CWD, "{}/zebra.conf".format(rname))
|
||||
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
|
||||
)
|
||||
router.load_config(
|
||||
TopoRouter.RD_ISIS,
|
||||
os.path.join(CWD, "{}/isisd.conf".format(rname))
|
||||
TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname))
|
||||
)
|
||||
# After loading the configurations, this function loads configured daemons.
|
||||
tgen.start_router()
|
||||
@ -148,6 +147,7 @@ def setup_module(mod):
|
||||
logger.info("Skipping ISIS vrf tests for FRR 2.0")
|
||||
tgen.set_error("ISIS has convergence problems with IPv6")
|
||||
|
||||
|
||||
def teardown_module(mod):
|
||||
"Teardown the pytest environment"
|
||||
tgen = get_topogen()
|
||||
@ -155,6 +155,7 @@ def teardown_module(mod):
|
||||
# delete rx-vrf
|
||||
tgen.stop_topology()
|
||||
|
||||
|
||||
def test_isis_convergence():
|
||||
"Wait for the protocol to converge before starting to test"
|
||||
tgen = get_topogen()
|
||||
@ -167,6 +168,7 @@ def test_isis_convergence():
|
||||
for rname, router in tgen.routers().items():
|
||||
filename = "{0}/{1}/{1}_topology.json".format(CWD, rname)
|
||||
expected = json.loads(open(filename).read())
|
||||
|
||||
def compare_isis_topology(router, expected):
|
||||
"Helper function to test ISIS vrf topology convergence."
|
||||
actual = show_isis_topology(router)
|
||||
@ -177,6 +179,7 @@ def test_isis_convergence():
|
||||
(result, diff) = topotest.run_and_expect(test_func, None, wait=0.5, count=120)
|
||||
assert result, "ISIS did not converge on {}:\n{}".format(rname, diff)
|
||||
|
||||
|
||||
def test_isis_route_installation():
|
||||
"Check whether all expected routes are present"
|
||||
tgen = get_topogen()
|
||||
@ -189,7 +192,9 @@ def test_isis_route_installation():
|
||||
for rname, router in tgen.routers().items():
|
||||
filename = "{0}/{1}/{1}_route.json".format(CWD, rname)
|
||||
expected = json.loads(open(filename, "r").read())
|
||||
actual = router.vtysh_cmd("show ip route vrf {0}-cust1 json".format(rname) , isjson=True)
|
||||
actual = router.vtysh_cmd(
|
||||
"show ip route vrf {0}-cust1 json".format(rname), isjson=True
|
||||
)
|
||||
# Older FRR versions don't list interfaces in some ISIS routes
|
||||
if router.has_version("<", "3.1"):
|
||||
for network, routes in expected.items():
|
||||
@ -209,7 +214,7 @@ def test_isis_linux_route_installation():
|
||||
|
||||
dist = platform.dist()
|
||||
|
||||
if (dist[1] == "16.04"):
|
||||
if dist[1] == "16.04":
|
||||
pytest.skip("Kernel not supported for vrf")
|
||||
|
||||
"Check whether all expected routes are present and installed in the OS"
|
||||
@ -234,6 +239,7 @@ def test_isis_linux_route_installation():
|
||||
assertmsg = "Router '{}' OS routes mismatch".format(rname)
|
||||
assert topotest.json_cmp(actual, expected) is None, assertmsg
|
||||
|
||||
|
||||
def test_isis_route6_installation():
|
||||
"Check whether all expected routes are present"
|
||||
tgen = get_topogen()
|
||||
@ -246,7 +252,9 @@ def test_isis_route6_installation():
|
||||
for rname, router in tgen.routers().items():
|
||||
filename = "{0}/{1}/{1}_route6.json".format(CWD, rname)
|
||||
expected = json.loads(open(filename, "r").read())
|
||||
actual = router.vtysh_cmd("show ipv6 route vrf {}-cust1 json".format(rname) , isjson=True)
|
||||
actual = router.vtysh_cmd(
|
||||
"show ipv6 route vrf {}-cust1 json".format(rname), isjson=True
|
||||
)
|
||||
|
||||
# Older FRR versions don't list interfaces in some ISIS routes
|
||||
if router.has_version("<", "3.1"):
|
||||
@ -262,11 +270,12 @@ def test_isis_route6_installation():
|
||||
assertmsg = "Router '{}' routes mismatch".format(rname)
|
||||
assert topotest.json_cmp(actual, expected) is None, assertmsg
|
||||
|
||||
|
||||
def test_isis_linux_route6_installation():
|
||||
|
||||
dist = platform.dist()
|
||||
|
||||
if (dist[1] == "16.04"):
|
||||
if dist[1] == "16.04":
|
||||
pytest.skip("Kernel not supported for vrf")
|
||||
|
||||
"Check whether all expected routes are present and installed in the OS"
|
||||
@ -291,6 +300,7 @@ def test_isis_linux_route6_installation():
|
||||
assertmsg = "Router '{}' OS routes mismatch".format(rname)
|
||||
assert topotest.json_cmp(actual, expected) is None, assertmsg
|
||||
|
||||
|
||||
def test_memory_leak():
|
||||
"Run the memory leak test and report results."
|
||||
tgen = get_topogen()
|
||||
@ -452,4 +462,3 @@ def show_isis_topology(router):
|
||||
|
||||
dict_merge(l1, l2)
|
||||
return l1
|
||||
|
||||
|
@ -182,7 +182,9 @@ def test_isis_convergence():
|
||||
router_compare_json_output(
|
||||
rname,
|
||||
"show yang operational-data /frr-interface:lib isisd",
|
||||
"show_yang_interface_isis_adjacencies.ref")
|
||||
"show_yang_interface_isis_adjacencies.ref",
|
||||
)
|
||||
|
||||
|
||||
def test_rib():
|
||||
logger.info("Test: verify RIB")
|
||||
@ -265,6 +267,7 @@ def test_ldp_pseudowires():
|
||||
rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_ldp_igp_sync():
|
||||
logger.info("Test: verify LDP igp-sync")
|
||||
tgen = get_topogen()
|
||||
@ -278,6 +281,7 @@ def test_ldp_igp_sync():
|
||||
rname, "show mpls ldp igp-sync json", "show_ldp_igp_sync.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_isis_ldp_sync():
|
||||
logger.info("Test: verify ISIS igp-sync")
|
||||
tgen = get_topogen()
|
||||
@ -287,9 +291,7 @@ def test_isis_ldp_sync():
|
||||
pytest.skip(tgen.errors)
|
||||
|
||||
for rname in ["r1", "r2", "r3"]:
|
||||
(result, diff) = validate_show_isis_ldp_sync(
|
||||
rname, "show_isis_ldp_sync.ref"
|
||||
)
|
||||
(result, diff) = validate_show_isis_ldp_sync(rname, "show_isis_ldp_sync.ref")
|
||||
assert result, "ISIS did not converge on {}:\n{}".format(rname, diff)
|
||||
|
||||
for rname in ["r1", "r2", "r3"]:
|
||||
@ -320,7 +322,9 @@ def test_r1_eth1_shutdown():
|
||||
|
||||
for rname in ["r1", "r2", "r3"]:
|
||||
router_compare_json_output(
|
||||
rname, "show mpls ldp igp-sync json", "show_ldp_igp_sync_r1_eth1_shutdown.ref"
|
||||
rname,
|
||||
"show mpls ldp igp-sync json",
|
||||
"show_ldp_igp_sync_r1_eth1_shutdown.ref",
|
||||
)
|
||||
|
||||
for rname in ["r1", "r2", "r3"]:
|
||||
@ -355,9 +359,7 @@ def test_r1_eth1_no_shutdown():
|
||||
)
|
||||
|
||||
for rname in ["r1", "r2", "r3"]:
|
||||
(result, diff) = validate_show_isis_ldp_sync(
|
||||
rname, "show_isis_ldp_sync.ref"
|
||||
)
|
||||
(result, diff) = validate_show_isis_ldp_sync(rname, "show_isis_ldp_sync.ref")
|
||||
assert result, "ISIS did not converge on {}:\n{}".format(rname, diff)
|
||||
|
||||
for rname in ["r1", "r2", "r3"]:
|
||||
@ -382,7 +384,9 @@ def test_r2_eth1_shutdown():
|
||||
|
||||
for rname in ["r1", "r2", "r3"]:
|
||||
router_compare_json_output(
|
||||
rname, "show mpls ldp igp-sync json", "show_ldp_igp_sync_r1_eth1_shutdown.ref"
|
||||
rname,
|
||||
"show mpls ldp igp-sync json",
|
||||
"show_ldp_igp_sync_r1_eth1_shutdown.ref",
|
||||
)
|
||||
|
||||
for rname in ["r1", "r2", "r3"]:
|
||||
@ -417,9 +421,7 @@ def test_r2_eth1_no_shutdown():
|
||||
)
|
||||
|
||||
for rname in ["r1", "r2", "r3"]:
|
||||
(result, diff) = validate_show_isis_ldp_sync(
|
||||
rname, "show_isis_ldp_sync.ref"
|
||||
)
|
||||
(result, diff) = validate_show_isis_ldp_sync(rname, "show_isis_ldp_sync.ref")
|
||||
assert result, "ISIS did not converge on {}:\n{}".format(rname, diff)
|
||||
|
||||
for rname in ["r1", "r2", "r3"]:
|
||||
@ -448,6 +450,7 @@ if __name__ == "__main__":
|
||||
# Auxiliary functions
|
||||
#
|
||||
|
||||
|
||||
def parse_show_isis_ldp_sync(lines, rname):
|
||||
"""
|
||||
Parse the output of 'show isis mpls ldp sync' into a Python dict.
|
||||
@ -461,20 +464,20 @@ def parse_show_isis_ldp_sync(lines, rname):
|
||||
interface = {}
|
||||
interface_name = None
|
||||
|
||||
line = it.next();
|
||||
line = it.next()
|
||||
|
||||
if line.startswith(rname + "-eth"):
|
||||
interface_name = line
|
||||
|
||||
line = it.next();
|
||||
line = it.next()
|
||||
|
||||
if line.startswith(" LDP-IGP Synchronization enabled: "):
|
||||
interface["ldpIgpSyncEnabled"] = line.endswith("yes")
|
||||
line = it.next();
|
||||
line = it.next()
|
||||
|
||||
if line.startswith(" holddown timer in seconds: "):
|
||||
interface["holdDownTimeInSec"] = int(line.split(": ")[-1])
|
||||
line = it.next();
|
||||
line = it.next()
|
||||
|
||||
if line.startswith(" State: "):
|
||||
interface["ldpIgpSyncState"] = line.split(": ")[-1]
|
||||
@ -534,7 +537,7 @@ def parse_show_isis_interface_detail(lines, rname):
|
||||
|
||||
while True:
|
||||
try:
|
||||
line = it.next();
|
||||
line = it.next()
|
||||
|
||||
area_match = re.match(r"Area (.+):", line)
|
||||
if not area_match:
|
||||
@ -543,34 +546,36 @@ def parse_show_isis_interface_detail(lines, rname):
|
||||
area_id = area_match.group(1)
|
||||
area = {}
|
||||
|
||||
line = it.next();
|
||||
line = it.next()
|
||||
|
||||
while line.startswith(" Interface: "):
|
||||
interface_name = re.split(':|,', line)[1].lstrip()
|
||||
interface_name = re.split(":|,", line)[1].lstrip()
|
||||
|
||||
area[interface_name]= []
|
||||
area[interface_name] = []
|
||||
|
||||
# Look for keyword: Level-1 or Level-2
|
||||
while not line.startswith(" Level-"):
|
||||
line = it.next();
|
||||
line = it.next()
|
||||
|
||||
while line.startswith(" Level-"):
|
||||
|
||||
level = {}
|
||||
|
||||
level_name = line.split()[0]
|
||||
level['level'] = level_name
|
||||
level["level"] = level_name
|
||||
|
||||
line = it.next();
|
||||
line = it.next()
|
||||
|
||||
if line.startswith(" Metric:"):
|
||||
level['metric'] = re.split(':|,', line)[1].lstrip()
|
||||
level["metric"] = re.split(":|,", line)[1].lstrip()
|
||||
|
||||
area[interface_name].append(level)
|
||||
|
||||
# Look for keyword: Level-1 or Level-2 or Interface:
|
||||
while not line.startswith(" Level-") and not line.startswith(" Interface: "):
|
||||
line = it.next();
|
||||
while not line.startswith(" Level-") and not line.startswith(
|
||||
" Interface: "
|
||||
):
|
||||
line = it.next()
|
||||
|
||||
if line.startswith(" Level-"):
|
||||
continue
|
||||
@ -623,4 +628,3 @@ def validate_show_isis_interface_detail(rname, fname):
|
||||
(result, diff) = topotest.run_and_expect(test_func, None, wait=0.5, count=160)
|
||||
|
||||
return (result, diff)
|
||||
|
||||
|
@ -264,6 +264,7 @@ def test_ldp_pseudowires():
|
||||
rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_ldp_igp_sync():
|
||||
logger.info("Test: verify LDP igp-sync")
|
||||
tgen = get_topogen()
|
||||
@ -277,6 +278,7 @@ def test_ldp_igp_sync():
|
||||
rname, "show mpls ldp igp-sync json", "show_ldp_igp_sync.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_ospf_ldp_sync():
|
||||
logger.info("Test: verify OSPF igp-sync")
|
||||
tgen = get_topogen()
|
||||
@ -317,19 +319,26 @@ def test_r1_eth1_shutdown():
|
||||
|
||||
for rname in ["r1", "r2", "r3"]:
|
||||
router_compare_json_output(
|
||||
rname, "show mpls ldp igp-sync json", "show_ldp_igp_sync_r1_eth1_shutdown.ref"
|
||||
rname,
|
||||
"show mpls ldp igp-sync json",
|
||||
"show_ldp_igp_sync_r1_eth1_shutdown.ref",
|
||||
)
|
||||
|
||||
for rname in ["r1", "r2", "r3"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ip ospf mpls ldp-sync json", "show_ospf_ldp_sync_r1_eth1_shutdown.ref"
|
||||
rname,
|
||||
"show ip ospf mpls ldp-sync json",
|
||||
"show_ospf_ldp_sync_r1_eth1_shutdown.ref",
|
||||
)
|
||||
|
||||
for rname in ["r1", "r2", "r3"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ip ospf interface json", "show_ip_ospf_interface_r1_eth1_shutdown.ref"
|
||||
rname,
|
||||
"show ip ospf interface json",
|
||||
"show_ip_ospf_interface_r1_eth1_shutdown.ref",
|
||||
)
|
||||
|
||||
|
||||
def test_r1_eth1_no_shutdown():
|
||||
logger.info("Test: verify behaviour after r1-eth1 is no shutdown")
|
||||
tgen = get_topogen()
|
||||
@ -358,6 +367,7 @@ def test_r1_eth1_no_shutdown():
|
||||
rname, "show ip ospf interface json", "show_ip_ospf_interface.ref"
|
||||
)
|
||||
|
||||
|
||||
def test_r2_eth1_shutdown():
|
||||
logger.info("Test: verify behaviour after r2-eth1 is shutdown")
|
||||
tgen = get_topogen()
|
||||
@ -373,19 +383,26 @@ def test_r2_eth1_shutdown():
|
||||
|
||||
for rname in ["r1", "r2", "r3"]:
|
||||
router_compare_json_output(
|
||||
rname, "show mpls ldp igp-sync json", "show_ldp_igp_sync_r1_eth1_shutdown.ref"
|
||||
rname,
|
||||
"show mpls ldp igp-sync json",
|
||||
"show_ldp_igp_sync_r1_eth1_shutdown.ref",
|
||||
)
|
||||
|
||||
for rname in ["r1", "r2", "r3"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ip ospf mpls ldp-sync json", "show_ospf_ldp_sync_r2_eth1_shutdown.ref"
|
||||
rname,
|
||||
"show ip ospf mpls ldp-sync json",
|
||||
"show_ospf_ldp_sync_r2_eth1_shutdown.ref",
|
||||
)
|
||||
|
||||
for rname in ["r1", "r2", "r3"]:
|
||||
router_compare_json_output(
|
||||
rname, "show ip ospf interface json", "show_ip_ospf_interface_r2_eth1_shutdown.ref"
|
||||
rname,
|
||||
"show ip ospf interface json",
|
||||
"show_ip_ospf_interface_r2_eth1_shutdown.ref",
|
||||
)
|
||||
|
||||
|
||||
def test_r2_eth1_no_shutdown():
|
||||
logger.info("Test: verify behaviour after r2-eth1 is no shutdown")
|
||||
tgen = get_topogen()
|
||||
@ -414,6 +431,7 @@ def test_r2_eth1_no_shutdown():
|
||||
rname, "show ip ospf interface json", "show_ip_ospf_interface.ref"
|
||||
)
|
||||
|
||||
|
||||
# Memory leak test template
|
||||
def test_memory_leak():
|
||||
"Run the memory leak test and report results."
|
||||
|
@ -283,8 +283,7 @@ def test_ldp_pseudowires_after_link_down():
|
||||
# for nexthop resolution). Give some extra wait time.
|
||||
for rname in ["r1", "r2", "r3"]:
|
||||
router_compare_json_output(
|
||||
rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref",
|
||||
count=160, wait=1
|
||||
rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref", count=160, wait=1
|
||||
)
|
||||
|
||||
|
||||
|
@ -40,9 +40,8 @@ import re
|
||||
|
||||
# gpz: get rib in json form and compare against desired routes
|
||||
class BgpRib:
|
||||
|
||||
def log(self, str):
|
||||
LUtil.log ("BgpRib: "+ str)
|
||||
LUtil.log("BgpRib: " + str)
|
||||
|
||||
def routes_include_wanted(self, pfxtbl, want, debug):
|
||||
# helper function to RequireVpnRoutes
|
||||
@ -156,7 +155,7 @@ class BgpRib:
|
||||
errstr = "-script ERROR: check if vrf missing"
|
||||
luResult(target, False, title + errstr, logstr)
|
||||
return
|
||||
#if debug:
|
||||
# if debug:
|
||||
# self.log("table=%s" % table)
|
||||
for want in wantroutes:
|
||||
if debug:
|
||||
|
@ -400,6 +400,7 @@ def check_router_status(tgen):
|
||||
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
|
||||
return True
|
||||
|
||||
|
||||
def getStrIO():
|
||||
"""
|
||||
Return a StringIO object appropriate for the current python version.
|
||||
@ -409,6 +410,7 @@ def getStrIO():
|
||||
else:
|
||||
return StringIO.StringIO()
|
||||
|
||||
|
||||
def reset_config_on_routers(tgen, routerName=None):
|
||||
"""
|
||||
Resets configuration on routers to the snapshot created using input JSON
|
||||
@ -702,14 +704,14 @@ def start_topology(tgen, daemon=None):
|
||||
)
|
||||
TMPDIR = os.path.join(LOGDIR, tgen.modname)
|
||||
|
||||
linux_ver = ''
|
||||
linux_ver = ""
|
||||
router_list = tgen.routers()
|
||||
for rname in ROUTER_LIST:
|
||||
router = router_list[rname]
|
||||
|
||||
# It will help in debugging the failures, will give more details on which
|
||||
# specific kernel version tests are failing
|
||||
if linux_ver == '':
|
||||
if linux_ver == "":
|
||||
linux_ver = router.run("uname -a")
|
||||
logger.info("Logging platform related details: \n %s \n", linux_ver)
|
||||
|
||||
@ -741,11 +743,10 @@ def start_topology(tgen, daemon=None):
|
||||
# Loading empty bgpd.conf file to router, to start the bgp deamon
|
||||
router.load_config(TopoRouter.RD_BGP, "{}/{}/bgpd.conf".format(TMPDIR, rname))
|
||||
|
||||
if daemon and 'ospfd' in daemon:
|
||||
if daemon and "ospfd" in daemon:
|
||||
# Loading empty ospf.conf file to router, to start the bgp deamon
|
||||
router.load_config(
|
||||
TopoRouter.RD_OSPF,
|
||||
'{}/{}/ospfd.conf'.format(TMPDIR, rname)
|
||||
TopoRouter.RD_OSPF, "{}/{}/ospfd.conf".format(TMPDIR, rname)
|
||||
)
|
||||
# Starting routers
|
||||
logger.info("Starting all routers once topology is created")
|
||||
@ -831,8 +832,8 @@ def topo_daemons(tgen, topo):
|
||||
)
|
||||
|
||||
for rtr in ROUTER_LIST:
|
||||
if 'ospf' in topo['routers'][rtr] and 'ospfd' not in daemon_list:
|
||||
daemon_list.append('ospfd')
|
||||
if "ospf" in topo["routers"][rtr] and "ospfd" not in daemon_list:
|
||||
daemon_list.append("ospfd")
|
||||
|
||||
return daemon_list
|
||||
|
||||
@ -1266,8 +1267,7 @@ def interface_status(tgen, topo, input_dict):
|
||||
return True
|
||||
|
||||
|
||||
def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0,
|
||||
return_is_dict=False):
|
||||
def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0, return_is_dict=False):
|
||||
"""
|
||||
Retries function execution, if return is an errormsg or exception
|
||||
|
||||
@ -1279,11 +1279,10 @@ def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0,
|
||||
"""
|
||||
|
||||
def _retry(func):
|
||||
|
||||
@wraps(func)
|
||||
def func_retry(*args, **kwargs):
|
||||
_wait = kwargs.pop('wait', wait)
|
||||
_attempts = kwargs.pop('attempts', attempts)
|
||||
_wait = kwargs.pop("wait", wait)
|
||||
_attempts = kwargs.pop("attempts", attempts)
|
||||
_attempts = int(_attempts)
|
||||
expected = True
|
||||
if _attempts < 0:
|
||||
@ -1293,19 +1292,21 @@ def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0,
|
||||
logger.info("Waiting for [%s]s as initial delay", initial_wait)
|
||||
sleep(initial_wait)
|
||||
|
||||
_return_is_str = kwargs.pop('return_is_str', return_is_str)
|
||||
_return_is_dict = kwargs.pop('return_is_str', return_is_dict)
|
||||
_return_is_str = kwargs.pop("return_is_str", return_is_str)
|
||||
_return_is_dict = kwargs.pop("return_is_str", return_is_dict)
|
||||
for i in range(1, _attempts + 1):
|
||||
try:
|
||||
_expected = kwargs.setdefault('expected', True)
|
||||
_expected = kwargs.setdefault("expected", True)
|
||||
if _expected is False:
|
||||
expected = _expected
|
||||
kwargs.pop('expected')
|
||||
kwargs.pop("expected")
|
||||
ret = func(*args, **kwargs)
|
||||
logger.debug("Function returned %s", ret)
|
||||
if _return_is_str and isinstance(ret, bool) and _expected:
|
||||
return ret
|
||||
if (isinstance(ret, str) or isinstance(ret, unicode)) and _expected is False:
|
||||
if (
|
||||
isinstance(ret, str) or isinstance(ret, unicode)
|
||||
) and _expected is False:
|
||||
return ret
|
||||
if _return_is_dict and isinstance(ret, dict):
|
||||
return ret
|
||||
@ -1316,17 +1317,17 @@ def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0,
|
||||
except Exception as err:
|
||||
if _attempts == i and expected:
|
||||
generate_support_bundle()
|
||||
logger.info("Max number of attempts (%r) reached",
|
||||
_attempts)
|
||||
logger.info("Max number of attempts (%r) reached", _attempts)
|
||||
raise
|
||||
else:
|
||||
logger.info("Function returned %s", err)
|
||||
if i < _attempts:
|
||||
logger.info("Retry [#%r] after sleeping for %ss"
|
||||
% (i, _wait))
|
||||
logger.info("Retry [#%r] after sleeping for %ss" % (i, _wait))
|
||||
sleep(_wait)
|
||||
|
||||
func_retry._original = func
|
||||
return func_retry
|
||||
|
||||
return _retry
|
||||
|
||||
|
||||
@ -1420,58 +1421,63 @@ def create_interfaces_cfg(tgen, topo, build=False):
|
||||
else:
|
||||
interface_data.append("ipv6 address {}\n".format(intf_addr))
|
||||
|
||||
if 'ospf' in data:
|
||||
ospf_data = data['ospf']
|
||||
if 'area' in ospf_data:
|
||||
intf_ospf_area = c_data["links"][destRouterLink][
|
||||
"ospf"]["area"]
|
||||
if "ospf" in data:
|
||||
ospf_data = data["ospf"]
|
||||
if "area" in ospf_data:
|
||||
intf_ospf_area = c_data["links"][destRouterLink]["ospf"]["area"]
|
||||
if "delete" in data and data["delete"]:
|
||||
interface_data.append("no ip ospf area")
|
||||
else:
|
||||
interface_data.append("ip ospf area {}".format(
|
||||
intf_ospf_area
|
||||
))
|
||||
interface_data.append(
|
||||
"ip ospf area {}".format(intf_ospf_area)
|
||||
)
|
||||
|
||||
if "hello_interval" in ospf_data:
|
||||
intf_ospf_hello = c_data["links"][destRouterLink][
|
||||
"ospf"]["hello_interval"]
|
||||
intf_ospf_hello = c_data["links"][destRouterLink]["ospf"][
|
||||
"hello_interval"
|
||||
]
|
||||
if "delete" in data and data["delete"]:
|
||||
interface_data.append("no ip ospf "\
|
||||
" hello-interval")
|
||||
interface_data.append("no ip ospf " " hello-interval")
|
||||
else:
|
||||
interface_data.append("ip ospf "\
|
||||
" hello-interval {}".format(intf_ospf_hello))
|
||||
interface_data.append(
|
||||
"ip ospf " " hello-interval {}".format(intf_ospf_hello)
|
||||
)
|
||||
|
||||
if "dead_interval" in ospf_data:
|
||||
intf_ospf_dead = c_data["links"][destRouterLink][
|
||||
"ospf"]["dead_interval"]
|
||||
intf_ospf_dead = c_data["links"][destRouterLink]["ospf"][
|
||||
"dead_interval"
|
||||
]
|
||||
if "delete" in data and data["delete"]:
|
||||
interface_data.append("no ip ospf"\
|
||||
" dead-interval")
|
||||
interface_data.append("no ip ospf" " dead-interval")
|
||||
else:
|
||||
interface_data.append("ip ospf "\
|
||||
" dead-interval {}".format(intf_ospf_dead))
|
||||
interface_data.append(
|
||||
"ip ospf " " dead-interval {}".format(intf_ospf_dead)
|
||||
)
|
||||
|
||||
if "network" in ospf_data:
|
||||
intf_ospf_nw = c_data["links"][destRouterLink][
|
||||
"ospf"]["network"]
|
||||
intf_ospf_nw = c_data["links"][destRouterLink]["ospf"][
|
||||
"network"
|
||||
]
|
||||
if "delete" in data and data["delete"]:
|
||||
interface_data.append("no ip ospf"\
|
||||
" network {}".format(intf_ospf_nw))
|
||||
interface_data.append(
|
||||
"no ip ospf" " network {}".format(intf_ospf_nw)
|
||||
)
|
||||
else:
|
||||
interface_data.append("ip ospf"\
|
||||
" network {}".format(intf_ospf_nw))
|
||||
interface_data.append(
|
||||
"ip ospf" " network {}".format(intf_ospf_nw)
|
||||
)
|
||||
|
||||
if "priority" in ospf_data:
|
||||
intf_ospf_nw = c_data["links"][destRouterLink][
|
||||
"ospf"]["priority"]
|
||||
intf_ospf_nw = c_data["links"][destRouterLink]["ospf"][
|
||||
"priority"
|
||||
]
|
||||
|
||||
if "delete" in data and data["delete"]:
|
||||
interface_data.append("no ip ospf"\
|
||||
" priority")
|
||||
interface_data.append("no ip ospf" " priority")
|
||||
else:
|
||||
interface_data.append("ip ospf"\
|
||||
" priority {}".format(intf_ospf_nw))
|
||||
interface_data.append(
|
||||
"ip ospf" " priority {}".format(intf_ospf_nw)
|
||||
)
|
||||
result = create_common_configuration(
|
||||
tgen, c_router, interface_data, "interface_config", build=build
|
||||
)
|
||||
@ -3013,7 +3019,7 @@ def verify_fib_routes(tgen, addr_type, dut, input_dict, next_hop=None):
|
||||
|
||||
for st_rt in ip_list:
|
||||
st_rt = str(ipaddress.ip_network(frr_unicode(st_rt)))
|
||||
#st_rt = str(ipaddr.IPNetwork(unicode(st_rt)))
|
||||
# st_rt = str(ipaddr.IPNetwork(unicode(st_rt)))
|
||||
|
||||
_addr_type = validate_ip_address(st_rt)
|
||||
if _addr_type != addr_type:
|
||||
@ -3118,7 +3124,7 @@ def verify_fib_routes(tgen, addr_type, dut, input_dict, next_hop=None):
|
||||
nh_found = False
|
||||
|
||||
for st_rt in ip_list:
|
||||
#st_rt = str(ipaddr.IPNetwork(unicode(st_rt)))
|
||||
# st_rt = str(ipaddr.IPNetwork(unicode(st_rt)))
|
||||
st_rt = str(ipaddress.ip_network(frr_unicode(st_rt)))
|
||||
|
||||
_addr_type = validate_ip_address(st_rt)
|
||||
@ -4075,8 +4081,9 @@ def required_linux_kernel_version(required_version):
|
||||
"""
|
||||
system_kernel = platform.release()
|
||||
if version_cmp(system_kernel, required_version) < 0:
|
||||
error_msg = ('These tests will not run on kernel "{}", '
|
||||
'they require kernel >= {})'.format(system_kernel,
|
||||
required_version ))
|
||||
error_msg = (
|
||||
'These tests will not run on kernel "{}", '
|
||||
"they require kernel >= {})".format(system_kernel, required_version)
|
||||
)
|
||||
return error_msg
|
||||
return True
|
||||
|
@ -43,7 +43,8 @@ from mininet.topo import Topo
|
||||
|
||||
customize = None
|
||||
|
||||
class LTemplate():
|
||||
|
||||
class LTemplate:
|
||||
test = None
|
||||
testdir = None
|
||||
scriptdir = None
|
||||
@ -54,12 +55,12 @@ class LTemplate():
|
||||
|
||||
def __init__(self, test, testdir):
|
||||
global customize
|
||||
customize = imp.load_source('customize', os.path.join(testdir, 'customize.py'))
|
||||
customize = imp.load_source("customize", os.path.join(testdir, "customize.py"))
|
||||
self.test = test
|
||||
self.testdir = testdir
|
||||
self.scriptdir = testdir
|
||||
self.logdir = '/tmp/topotests/{0}.test_{0}'.format(test)
|
||||
logger.info('LTemplate: '+test)
|
||||
self.logdir = "/tmp/topotests/{0}.test_{0}".format(test)
|
||||
logger.info("LTemplate: " + test)
|
||||
|
||||
def setup_module(self, mod):
|
||||
"Sets up the pytest environment"
|
||||
@ -68,14 +69,14 @@ class LTemplate():
|
||||
# ... and here it calls Mininet initialization functions.
|
||||
tgen.start_topology()
|
||||
|
||||
logger.info('Topology started')
|
||||
logger.info("Topology started")
|
||||
try:
|
||||
self.prestarthooksuccess = customize.ltemplatePreRouterStartHook()
|
||||
except AttributeError:
|
||||
#not defined
|
||||
# not defined
|
||||
logger.debug("ltemplatePreRouterStartHook() not defined")
|
||||
if self.prestarthooksuccess != True:
|
||||
logger.info('ltemplatePreRouterStartHook() failed, skipping test')
|
||||
logger.info("ltemplatePreRouterStartHook() failed, skipping test")
|
||||
return
|
||||
|
||||
# This is a sample of configuration loading.
|
||||
@ -85,48 +86,57 @@ class LTemplate():
|
||||
for rname, router in router_list.items():
|
||||
logger.info("Setting up %s" % rname)
|
||||
for rd_val in TopoRouter.RD:
|
||||
config = os.path.join(self.testdir, '{}/{}.conf'.format(rname,TopoRouter.RD[rd_val]))
|
||||
config = os.path.join(
|
||||
self.testdir, "{}/{}.conf".format(rname, TopoRouter.RD[rd_val])
|
||||
)
|
||||
prog = os.path.join(tgen.net[rname].daemondir, TopoRouter.RD[rd_val])
|
||||
if os.path.exists(config):
|
||||
if os.path.exists(prog):
|
||||
router.load_config(rd_val, config)
|
||||
else:
|
||||
logger.warning("{} not found, but have {}.conf file".format(prog, TopoRouter.RD[rd_val]))
|
||||
logger.warning(
|
||||
"{} not found, but have {}.conf file".format(
|
||||
prog, TopoRouter.RD[rd_val]
|
||||
)
|
||||
)
|
||||
|
||||
# After loading the configurations, this function loads configured daemons.
|
||||
logger.info('Starting routers')
|
||||
logger.info("Starting routers")
|
||||
tgen.start_router()
|
||||
try:
|
||||
self.poststarthooksuccess = customize.ltemplatePostRouterStartHook()
|
||||
except AttributeError:
|
||||
#not defined
|
||||
# not defined
|
||||
logger.debug("ltemplatePostRouterStartHook() not defined")
|
||||
luStart(baseScriptDir=self.scriptdir, baseLogDir=self.logdir, net=tgen.net)
|
||||
|
||||
#initialized by ltemplate_start
|
||||
|
||||
# initialized by ltemplate_start
|
||||
_lt = None
|
||||
|
||||
|
||||
def setup_module(mod):
|
||||
global _lt
|
||||
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
test = mod.__name__[:mod.__name__.rfind(".")]
|
||||
test = mod.__name__[: mod.__name__.rfind(".")]
|
||||
testdir = os.path.join(root, test)
|
||||
|
||||
#don't do this for now as reload didn't work as expected
|
||||
#fixup sys.path, want test dir there only once
|
||||
#try:
|
||||
# don't do this for now as reload didn't work as expected
|
||||
# fixup sys.path, want test dir there only once
|
||||
# try:
|
||||
# sys.path.remove(testdir)
|
||||
#except ValueError:
|
||||
# except ValueError:
|
||||
# logger.debug(testdir+" not found in original sys.path")
|
||||
#add testdir
|
||||
#sys.path.append(testdir)
|
||||
# add testdir
|
||||
# sys.path.append(testdir)
|
||||
|
||||
#init class
|
||||
# init class
|
||||
_lt = LTemplate(test, testdir)
|
||||
_lt.setup_module(mod)
|
||||
|
||||
#drop testdir
|
||||
#sys.path.remove(testdir)
|
||||
# drop testdir
|
||||
# sys.path.remove(testdir)
|
||||
|
||||
|
||||
def teardown_module(mod):
|
||||
global _lt
|
||||
@ -141,7 +151,10 @@ def teardown_module(mod):
|
||||
tgen.stop_topology()
|
||||
_lt = None
|
||||
|
||||
def ltemplateTest(script, SkipIfFailed=True, CallOnFail=None, CheckFuncStr=None, KeepGoing=False):
|
||||
|
||||
def ltemplateTest(
|
||||
script, SkipIfFailed=True, CallOnFail=None, CheckFuncStr=None, KeepGoing=False
|
||||
):
|
||||
global _lt
|
||||
if _lt == None or _lt.prestarthooksuccess != True:
|
||||
return
|
||||
@ -149,8 +162,8 @@ def ltemplateTest(script, SkipIfFailed=True, CallOnFail=None, CheckFuncStr=None,
|
||||
tgen = get_topogen()
|
||||
if not os.path.isfile(script):
|
||||
if not os.path.isfile(os.path.join(_lt.scriptdir, script)):
|
||||
logger.error('Could not find script file: ' + script)
|
||||
assert 'Could not find script file: ' + script
|
||||
logger.error("Could not find script file: " + script)
|
||||
assert "Could not find script file: " + script
|
||||
logger.info("Starting template test: " + script)
|
||||
numEntry = luNumFail()
|
||||
|
||||
@ -163,7 +176,7 @@ def ltemplateTest(script, SkipIfFailed=True, CallOnFail=None, CheckFuncStr=None,
|
||||
if CheckFuncStr != None:
|
||||
check = eval(CheckFuncStr)
|
||||
if check != True:
|
||||
pytest.skip("Check function '"+CheckFuncStr+"' returned: " + check)
|
||||
pytest.skip("Check function '" + CheckFuncStr + "' returned: " + check)
|
||||
|
||||
if CallOnFail != None:
|
||||
CallOnFail = eval(CallOnFail)
|
||||
@ -173,22 +186,26 @@ def ltemplateTest(script, SkipIfFailed=True, CallOnFail=None, CheckFuncStr=None,
|
||||
luShowFail()
|
||||
fatal_error = "%d tests failed" % numFail
|
||||
if not KeepGoing:
|
||||
assert "scripts/cleanup_all.py failed" == "See summary output above", fatal_error
|
||||
assert (
|
||||
"scripts/cleanup_all.py failed" == "See summary output above"
|
||||
), fatal_error
|
||||
|
||||
|
||||
# Memory leak test template
|
||||
def test_memory_leak():
|
||||
"Run the memory leak test and report results."
|
||||
tgen = get_topogen()
|
||||
if not tgen.is_memleak_enabled():
|
||||
pytest.skip('Memory leak test/report is disabled')
|
||||
pytest.skip("Memory leak test/report is disabled")
|
||||
|
||||
tgen.report_memory_leaks()
|
||||
|
||||
class ltemplateRtrCmd():
|
||||
|
||||
class ltemplateRtrCmd:
|
||||
def __init__(self):
|
||||
self.resetCounts()
|
||||
|
||||
def doCmd(self, tgen, rtr, cmd, checkstr = None):
|
||||
def doCmd(self, tgen, rtr, cmd, checkstr=None):
|
||||
output = tgen.net[rtr].cmd(cmd).strip()
|
||||
if len(output):
|
||||
self.output += 1
|
||||
@ -199,8 +216,8 @@ class ltemplateRtrCmd():
|
||||
else:
|
||||
self.match += 1
|
||||
return ret
|
||||
logger.info('command: {} {}'.format(rtr, cmd))
|
||||
logger.info('output: ' + output)
|
||||
logger.info("command: {} {}".format(rtr, cmd))
|
||||
logger.info("output: " + output)
|
||||
self.none += 1
|
||||
return None
|
||||
|
||||
@ -222,63 +239,69 @@ class ltemplateRtrCmd():
|
||||
def getNone(self):
|
||||
return self.none
|
||||
|
||||
def ltemplateVersionCheck(vstr, rname='r1', compstr='<',cli=False, kernel='4.9', iproute2=None, mpls=True):
|
||||
|
||||
def ltemplateVersionCheck(
|
||||
vstr, rname="r1", compstr="<", cli=False, kernel="4.9", iproute2=None, mpls=True
|
||||
):
|
||||
tgen = get_topogen()
|
||||
router = tgen.gears[rname]
|
||||
|
||||
if cli:
|
||||
logger.info('calling mininet CLI')
|
||||
logger.info("calling mininet CLI")
|
||||
tgen.mininet_cli()
|
||||
logger.info('exited mininet CLI')
|
||||
logger.info("exited mininet CLI")
|
||||
|
||||
if _lt == None:
|
||||
ret = 'Template not initialized'
|
||||
ret = "Template not initialized"
|
||||
return ret
|
||||
|
||||
if _lt.prestarthooksuccess != True:
|
||||
ret = 'ltemplatePreRouterStartHook failed'
|
||||
ret = "ltemplatePreRouterStartHook failed"
|
||||
return ret
|
||||
|
||||
if _lt.poststarthooksuccess != True:
|
||||
ret = 'ltemplatePostRouterStartHook failed'
|
||||
ret = "ltemplatePostRouterStartHook failed"
|
||||
return ret
|
||||
|
||||
if mpls == True and tgen.hasmpls != True:
|
||||
ret = 'MPLS not initialized'
|
||||
ret = "MPLS not initialized"
|
||||
return ret
|
||||
|
||||
if kernel != None:
|
||||
krel = platform.release()
|
||||
if topotest.version_cmp(krel, kernel) < 0:
|
||||
ret = 'Skipping tests, old kernel ({} < {})'.format(krel, kernel)
|
||||
ret = "Skipping tests, old kernel ({} < {})".format(krel, kernel)
|
||||
return ret
|
||||
|
||||
if iproute2 != None:
|
||||
if _lt.iproute2Ver == None:
|
||||
#collect/log info on iproute2
|
||||
# collect/log info on iproute2
|
||||
cc = ltemplateRtrCmd()
|
||||
found = cc.doCmd(tgen, rname, 'apt-cache policy iproute2', 'Installed: ([\d\.]*)')
|
||||
found = cc.doCmd(
|
||||
tgen, rname, "apt-cache policy iproute2", "Installed: ([\d\.]*)"
|
||||
)
|
||||
if found != None:
|
||||
iproute2Ver = found.group(1)
|
||||
else:
|
||||
iproute2Ver = '0-unknown'
|
||||
logger.info('Have iproute2 version=' + iproute2Ver)
|
||||
iproute2Ver = "0-unknown"
|
||||
logger.info("Have iproute2 version=" + iproute2Ver)
|
||||
|
||||
if topotest.version_cmp(iproute2Ver, iproute2) < 0:
|
||||
ret = 'Skipping tests, old iproute2 ({} < {})'.format(iproute2Ver, iproute2)
|
||||
ret = "Skipping tests, old iproute2 ({} < {})".format(iproute2Ver, iproute2)
|
||||
return ret
|
||||
|
||||
ret = True
|
||||
try:
|
||||
if router.has_version(compstr, vstr):
|
||||
ret = 'Skipping tests, old FRR version {} {}'.format(compstr, vstr)
|
||||
ret = "Skipping tests, old FRR version {} {}".format(compstr, vstr)
|
||||
return ret
|
||||
except:
|
||||
ret = True
|
||||
|
||||
return ret
|
||||
|
||||
#for testing
|
||||
if __name__ == '__main__':
|
||||
|
||||
# for testing
|
||||
if __name__ == "__main__":
|
||||
args = ["-s"] + sys.argv[1:]
|
||||
sys.exit(pytest.main(args))
|
||||
|
@ -32,46 +32,53 @@ from mininet.net import Mininet
|
||||
# These functions are inteneted to provide support for CI testing within MiniNet
|
||||
# environments.
|
||||
|
||||
|
||||
class lUtil:
|
||||
#to be made configurable in the future
|
||||
base_script_dir = '.'
|
||||
base_log_dir = '.'
|
||||
fout_name = 'output.log'
|
||||
fsum_name = 'summary.txt'
|
||||
# to be made configurable in the future
|
||||
base_script_dir = "."
|
||||
base_log_dir = "."
|
||||
fout_name = "output.log"
|
||||
fsum_name = "summary.txt"
|
||||
l_level = 6
|
||||
CallOnFail = False
|
||||
|
||||
l_total = 0
|
||||
l_pass = 0
|
||||
l_fail = 0
|
||||
l_filename = ''
|
||||
l_filename = ""
|
||||
l_last = None
|
||||
l_line = 0
|
||||
l_dotall_experiment = False
|
||||
l_last_nl = None
|
||||
|
||||
fout = ''
|
||||
fsum = ''
|
||||
net = ''
|
||||
fout = ""
|
||||
fsum = ""
|
||||
net = ""
|
||||
|
||||
def log(self, str, level=6):
|
||||
if self.l_level > 0:
|
||||
if self.fout == '':
|
||||
self.fout = open(self.fout_name, 'w', 0)
|
||||
self.fout.write(str+'\n')
|
||||
if self.fout == "":
|
||||
self.fout = open(self.fout_name, "w", 0)
|
||||
self.fout.write(str + "\n")
|
||||
if level <= self.l_level:
|
||||
print(str)
|
||||
|
||||
def summary(self, str):
|
||||
if self.fsum == '':
|
||||
self.fsum = open(self.fsum_name, 'w', 0)
|
||||
self.fsum.write('\
|
||||
******************************************************************************\n')
|
||||
self.fsum.write('\
|
||||
Test Target Summary Pass Fail\n')
|
||||
self.fsum.write('\
|
||||
******************************************************************************\n')
|
||||
self.fsum.write(str+'\n')
|
||||
if self.fsum == "":
|
||||
self.fsum = open(self.fsum_name, "w", 0)
|
||||
self.fsum.write(
|
||||
"\
|
||||
******************************************************************************\n"
|
||||
)
|
||||
self.fsum.write(
|
||||
"\
|
||||
Test Target Summary Pass Fail\n"
|
||||
)
|
||||
self.fsum.write(
|
||||
"\
|
||||
******************************************************************************\n"
|
||||
)
|
||||
self.fsum.write(str + "\n")
|
||||
|
||||
def result(self, target, success, str, logstr=None):
|
||||
if success:
|
||||
@ -88,32 +95,34 @@ Test Target Summary Pass Fail\n
|
||||
if logstr != None:
|
||||
self.log("R:%d %s: %s" % (self.l_total, sstr, logstr))
|
||||
res = "%-4d %-6s %-56s %-4d %d" % (self.l_total, target, str, p, f)
|
||||
self.log ('R:'+res)
|
||||
self.log("R:" + res)
|
||||
self.summary(res)
|
||||
if f == 1 and self.CallOnFail != False:
|
||||
self.CallOnFail()
|
||||
|
||||
def closeFiles(self):
|
||||
ret = '\
|
||||
ret = (
|
||||
"\
|
||||
******************************************************************************\n\
|
||||
Total %-4d %-4d %d\n\
|
||||
******************************************************************************'\
|
||||
% (self.l_total, self.l_pass, self.l_fail)
|
||||
if self.fsum != '':
|
||||
self.fsum.write(ret + '\n')
|
||||
******************************************************************************"
|
||||
% (self.l_total, self.l_pass, self.l_fail)
|
||||
)
|
||||
if self.fsum != "":
|
||||
self.fsum.write(ret + "\n")
|
||||
self.fsum.close()
|
||||
self.fsum = ''
|
||||
if self.fout != '':
|
||||
self.fsum = ""
|
||||
if self.fout != "":
|
||||
if os.path.isfile(self.fsum_name):
|
||||
r = open(self.fsum_name, 'r')
|
||||
r = open(self.fsum_name, "r")
|
||||
self.fout.write(r.read())
|
||||
r.close()
|
||||
self.fout.close()
|
||||
self.fout = ''
|
||||
self.fout = ""
|
||||
return ret
|
||||
|
||||
def setFilename(self, name):
|
||||
str = 'FILE: ' + name
|
||||
str = "FILE: " + name
|
||||
self.log(str)
|
||||
self.summary(str)
|
||||
self.l_filename = name
|
||||
@ -128,19 +137,19 @@ Total %-4d %-4d %d\n\
|
||||
def strToArray(self, string):
|
||||
a = []
|
||||
c = 0
|
||||
end = ''
|
||||
end = ""
|
||||
words = string.split()
|
||||
if len(words) < 1 or words[0].startswith('#'):
|
||||
if len(words) < 1 or words[0].startswith("#"):
|
||||
return a
|
||||
words = string.split()
|
||||
for word in words:
|
||||
if len(end) == 0:
|
||||
a.append(word)
|
||||
else:
|
||||
a[c] += str(' '+word)
|
||||
if end == '\\':
|
||||
end = ''
|
||||
if not word.endswith('\\'):
|
||||
a[c] += str(" " + word)
|
||||
if end == "\\":
|
||||
end = ""
|
||||
if not word.endswith("\\"):
|
||||
if end != '"':
|
||||
if word.startswith('"'):
|
||||
end = '"'
|
||||
@ -148,12 +157,12 @@ Total %-4d %-4d %d\n\
|
||||
c += 1
|
||||
else:
|
||||
if word.endswith('"'):
|
||||
end = ''
|
||||
end = ""
|
||||
c += 1
|
||||
else:
|
||||
c += 1
|
||||
else:
|
||||
end = '\\'
|
||||
end = "\\"
|
||||
# if len(end) == 0:
|
||||
# print('%d:%s:' % (c, a[c-1]))
|
||||
|
||||
@ -169,27 +178,37 @@ Total %-4d %-4d %d\n\
|
||||
luCommand(a[1], a[2], a[3], a[4], a[5])
|
||||
else:
|
||||
self.l_line += 1
|
||||
self.log('%s:%s %s' % (self.l_filename, self.l_line , line))
|
||||
self.log("%s:%s %s" % (self.l_filename, self.l_line, line))
|
||||
if len(a) >= 2:
|
||||
if a[0] == 'sleep':
|
||||
if a[0] == "sleep":
|
||||
time.sleep(int(a[1]))
|
||||
elif a[0] == 'include':
|
||||
elif a[0] == "include":
|
||||
self.execTestFile(a[1])
|
||||
f.close()
|
||||
else:
|
||||
self.log('unable to read: ' + tstFile)
|
||||
self.log("unable to read: " + tstFile)
|
||||
sys.exit(1)
|
||||
|
||||
def command(self, target, command, regexp, op, result, returnJson):
|
||||
global net
|
||||
if op != 'wait':
|
||||
if op != "wait":
|
||||
self.l_line += 1
|
||||
self.log('(#%d) %s:%s COMMAND:%s:%s:%s:%s:%s:' % \
|
||||
(self.l_total+1,
|
||||
self.l_filename, self.l_line, target, command, regexp, op, result))
|
||||
if self.net == '':
|
||||
self.log(
|
||||
"(#%d) %s:%s COMMAND:%s:%s:%s:%s:%s:"
|
||||
% (
|
||||
self.l_total + 1,
|
||||
self.l_filename,
|
||||
self.l_line,
|
||||
target,
|
||||
command,
|
||||
regexp,
|
||||
op,
|
||||
result,
|
||||
)
|
||||
)
|
||||
if self.net == "":
|
||||
return False
|
||||
#self.log("Running %s %s" % (target, command))
|
||||
# self.log("Running %s %s" % (target, command))
|
||||
js = None
|
||||
out = self.net[target].cmd(command).rstrip()
|
||||
if len(out) == 0:
|
||||
@ -201,13 +220,15 @@ Total %-4d %-4d %d\n\
|
||||
js = json.loads(out)
|
||||
except:
|
||||
js = None
|
||||
self.log('WARNING: JSON load failed -- confirm command output is in JSON format.')
|
||||
self.log('COMMAND OUTPUT:%s:' % report)
|
||||
self.log(
|
||||
"WARNING: JSON load failed -- confirm command output is in JSON format."
|
||||
)
|
||||
self.log("COMMAND OUTPUT:%s:" % report)
|
||||
|
||||
# Experiment: can we achieve the same match behavior via DOTALL
|
||||
# without converting newlines to spaces?
|
||||
out_nl = out
|
||||
search_nl = re.search(regexp, out_nl, re.DOTALL);
|
||||
search_nl = re.search(regexp, out_nl, re.DOTALL)
|
||||
self.l_last_nl = search_nl
|
||||
# Set up for comparison
|
||||
if search_nl != None:
|
||||
@ -220,32 +241,50 @@ Total %-4d %-4d %d\n\
|
||||
search = re.search(regexp, out)
|
||||
self.l_last = search
|
||||
if search == None:
|
||||
if op == 'fail':
|
||||
if op == "fail":
|
||||
success = True
|
||||
else:
|
||||
success = False
|
||||
ret = success
|
||||
else:
|
||||
ret = search.group()
|
||||
if op != 'fail':
|
||||
if op != "fail":
|
||||
success = True
|
||||
level = 7
|
||||
else:
|
||||
success = False
|
||||
level = 5
|
||||
self.log('found:%s:' % ret, level)
|
||||
self.log("found:%s:" % ret, level)
|
||||
# Experiment: compare matched strings obtained each way
|
||||
if self.l_dotall_experiment and (group_nl_converted != ret):
|
||||
self.log('DOTALL experiment: strings differ dotall=[%s] orig=[%s]' % (group_nl_converted, ret), 9)
|
||||
if op == 'pass' or op == 'fail':
|
||||
self.log(
|
||||
"DOTALL experiment: strings differ dotall=[%s] orig=[%s]"
|
||||
% (group_nl_converted, ret),
|
||||
9,
|
||||
)
|
||||
if op == "pass" or op == "fail":
|
||||
self.result(target, success, result)
|
||||
if js != None:
|
||||
return js
|
||||
return ret
|
||||
|
||||
def wait(self, target, command, regexp, op, result, wait, returnJson, wait_time=0.5):
|
||||
self.log('%s:%s WAIT:%s:%s:%s:%s:%s:%s:%s:' % \
|
||||
(self.l_filename, self.l_line, target, command, regexp, op, result,wait,wait_time))
|
||||
def wait(
|
||||
self, target, command, regexp, op, result, wait, returnJson, wait_time=0.5
|
||||
):
|
||||
self.log(
|
||||
"%s:%s WAIT:%s:%s:%s:%s:%s:%s:%s:"
|
||||
% (
|
||||
self.l_filename,
|
||||
self.l_line,
|
||||
target,
|
||||
command,
|
||||
regexp,
|
||||
op,
|
||||
result,
|
||||
wait,
|
||||
wait_time,
|
||||
)
|
||||
)
|
||||
found = False
|
||||
n = 0
|
||||
startt = time.time()
|
||||
@ -264,103 +303,137 @@ Total %-4d %-4d %d\n\
|
||||
time.sleep(wait_time)
|
||||
|
||||
delta = time.time() - startt
|
||||
self.log('Done after %d loops, time=%s, Found=%s' % (n, delta, found))
|
||||
found = self.command(target, command, regexp, 'pass', '%s +%4.2f secs' % (result, delta), returnJson)
|
||||
self.log("Done after %d loops, time=%s, Found=%s" % (n, delta, found))
|
||||
found = self.command(
|
||||
target,
|
||||
command,
|
||||
regexp,
|
||||
"pass",
|
||||
"%s +%4.2f secs" % (result, delta),
|
||||
returnJson,
|
||||
)
|
||||
return found
|
||||
|
||||
#initialized by luStart
|
||||
LUtil=None
|
||||
|
||||
#entry calls
|
||||
def luStart(baseScriptDir='.', baseLogDir='.', net='',
|
||||
fout='output.log', fsum='summary.txt', level=None):
|
||||
# initialized by luStart
|
||||
LUtil = None
|
||||
|
||||
# entry calls
|
||||
def luStart(
|
||||
baseScriptDir=".",
|
||||
baseLogDir=".",
|
||||
net="",
|
||||
fout="output.log",
|
||||
fsum="summary.txt",
|
||||
level=None,
|
||||
):
|
||||
global LUtil
|
||||
#init class
|
||||
LUtil=lUtil()
|
||||
# init class
|
||||
LUtil = lUtil()
|
||||
LUtil.base_script_dir = baseScriptDir
|
||||
LUtil.base_log_dir = baseLogDir
|
||||
LUtil.net = net
|
||||
if fout != '':
|
||||
LUtil.fout_name = baseLogDir + '/' + fout
|
||||
if fout != "":
|
||||
LUtil.fout_name = baseLogDir + "/" + fout
|
||||
if fsum != None:
|
||||
LUtil.fsum_name = baseLogDir + '/' + fsum
|
||||
LUtil.fsum_name = baseLogDir + "/" + fsum
|
||||
if level != None:
|
||||
LUtil.l_level = level
|
||||
LUtil.l_dotall_experiment = False
|
||||
LUtil.l_dotall_experiment = True
|
||||
|
||||
def luCommand(target, command, regexp='.', op='none', result='', time=10, returnJson=False, wait_time=0.5):
|
||||
if op != 'wait':
|
||||
|
||||
def luCommand(
|
||||
target,
|
||||
command,
|
||||
regexp=".",
|
||||
op="none",
|
||||
result="",
|
||||
time=10,
|
||||
returnJson=False,
|
||||
wait_time=0.5,
|
||||
):
|
||||
if op != "wait":
|
||||
return LUtil.command(target, command, regexp, op, result, returnJson)
|
||||
else:
|
||||
return LUtil.wait(target, command, regexp, op, result, time, returnJson, wait_time)
|
||||
return LUtil.wait(
|
||||
target, command, regexp, op, result, time, returnJson, wait_time
|
||||
)
|
||||
|
||||
|
||||
def luLast(usenl=False):
|
||||
if usenl:
|
||||
if LUtil.l_last_nl != None:
|
||||
LUtil.log('luLast:%s:' % LUtil.l_last_nl.group(), 7)
|
||||
LUtil.log("luLast:%s:" % LUtil.l_last_nl.group(), 7)
|
||||
return LUtil.l_last_nl
|
||||
else:
|
||||
if LUtil.l_last != None:
|
||||
LUtil.log('luLast:%s:' % LUtil.l_last.group(), 7)
|
||||
LUtil.log("luLast:%s:" % LUtil.l_last.group(), 7)
|
||||
return LUtil.l_last
|
||||
|
||||
|
||||
def luInclude(filename, CallOnFail=None):
|
||||
tstFile = LUtil.base_script_dir + '/' + filename
|
||||
tstFile = LUtil.base_script_dir + "/" + filename
|
||||
LUtil.setFilename(filename)
|
||||
if CallOnFail != None:
|
||||
oldCallOnFail = LUtil.getCallOnFail()
|
||||
LUtil.setCallOnFail(CallOnFail)
|
||||
if filename.endswith('.py'):
|
||||
LUtil.log("luInclude: execfile "+tstFile)
|
||||
if filename.endswith(".py"):
|
||||
LUtil.log("luInclude: execfile " + tstFile)
|
||||
execfile(tstFile)
|
||||
else:
|
||||
LUtil.log("luInclude: execTestFile "+tstFile)
|
||||
LUtil.log("luInclude: execTestFile " + tstFile)
|
||||
LUtil.execTestFile(tstFile)
|
||||
if CallOnFail != None:
|
||||
LUtil.setCallOnFail(oldCallOnFail)
|
||||
|
||||
|
||||
def luFinish():
|
||||
global LUtil
|
||||
ret = LUtil.closeFiles()
|
||||
#done
|
||||
# done
|
||||
LUtil = None
|
||||
return ret;
|
||||
return ret
|
||||
|
||||
|
||||
def luNumFail():
|
||||
return LUtil.l_fail
|
||||
|
||||
|
||||
def luNumPass():
|
||||
return LUtil.l_pass
|
||||
|
||||
|
||||
def luResult(target, success, str, logstr=None):
|
||||
return LUtil.result(target, success, str, logstr)
|
||||
|
||||
|
||||
def luShowResults(prFunction):
|
||||
printed = 0
|
||||
sf = open(LUtil.fsum_name, 'r')
|
||||
sf = open(LUtil.fsum_name, "r")
|
||||
for line in sf:
|
||||
printed+=1
|
||||
printed += 1
|
||||
prFunction(line.rstrip())
|
||||
sf.close()
|
||||
|
||||
|
||||
def luShowFail():
|
||||
printed = 0
|
||||
sf = open(LUtil.fsum_name, 'r')
|
||||
sf = open(LUtil.fsum_name, "r")
|
||||
for line in sf:
|
||||
if line[-2] != "0":
|
||||
printed+=1
|
||||
printed += 1
|
||||
logger.error(line.rstrip())
|
||||
sf.close()
|
||||
if printed > 0:
|
||||
logger.error("See %s for details of errors" % LUtil.fout_name)
|
||||
|
||||
#for testing
|
||||
if __name__ == '__main__':
|
||||
print(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/lib')
|
||||
|
||||
# for testing
|
||||
if __name__ == "__main__":
|
||||
print(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/lib")
|
||||
luStart()
|
||||
for arg in sys.argv[1:]:
|
||||
luInclude(arg)
|
||||
luFinish()
|
||||
sys.exit(0)
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -296,7 +296,7 @@ def test_json_list_ordered():
|
||||
]
|
||||
|
||||
dsub1 = [
|
||||
'__ordered__',
|
||||
"__ordered__",
|
||||
"some string",
|
||||
{"id": 1, "value": "abc"},
|
||||
123,
|
||||
@ -312,28 +312,28 @@ def test_json_list_exact_matching():
|
||||
{"id": 1, "value": "abc"},
|
||||
"some string",
|
||||
123,
|
||||
[1,2,3],
|
||||
[1, 2, 3],
|
||||
]
|
||||
|
||||
dsub1 = [
|
||||
"some string",
|
||||
{"id": 1, "value": "abc"},
|
||||
123,
|
||||
[1,2,3],
|
||||
[1, 2, 3],
|
||||
]
|
||||
|
||||
dsub2 = [
|
||||
{"id": 1},
|
||||
"some string",
|
||||
123,
|
||||
[1,2,3],
|
||||
[1, 2, 3],
|
||||
]
|
||||
|
||||
dsub3 = [
|
||||
{"id": 1, "value": "abc"},
|
||||
"some string",
|
||||
123,
|
||||
[1,3,2],
|
||||
[1, 3, 2],
|
||||
]
|
||||
|
||||
assert json_cmp(dcomplete, dsub1, exact=True) is not None
|
||||
@ -344,30 +344,30 @@ def test_json_object_exact_matching():
|
||||
"Test JSON object on exact matching using the 'exact' parameter."
|
||||
|
||||
dcomplete = {
|
||||
'a': {"id": 1, "value": "abc"},
|
||||
'b': "some string",
|
||||
'c': 123,
|
||||
'd': [1,2,3],
|
||||
"a": {"id": 1, "value": "abc"},
|
||||
"b": "some string",
|
||||
"c": 123,
|
||||
"d": [1, 2, 3],
|
||||
}
|
||||
|
||||
dsub1 = {
|
||||
'a': {"id": 1, "value": "abc"},
|
||||
'c': 123,
|
||||
'd': [1,2,3],
|
||||
"a": {"id": 1, "value": "abc"},
|
||||
"c": 123,
|
||||
"d": [1, 2, 3],
|
||||
}
|
||||
|
||||
dsub2 = {
|
||||
'a': {"id": 1},
|
||||
'b': "some string",
|
||||
'c': 123,
|
||||
'd': [1,2,3],
|
||||
"a": {"id": 1},
|
||||
"b": "some string",
|
||||
"c": 123,
|
||||
"d": [1, 2, 3],
|
||||
}
|
||||
|
||||
dsub3 = {
|
||||
'a': {"id": 1, "value": "abc"},
|
||||
'b': "some string",
|
||||
'c': 123,
|
||||
'd': [1,3],
|
||||
"a": {"id": 1, "value": "abc"},
|
||||
"b": "some string",
|
||||
"c": 123,
|
||||
"d": [1, 3],
|
||||
}
|
||||
|
||||
assert json_cmp(dcomplete, dsub1, exact=True) is not None
|
||||
@ -382,35 +382,35 @@ def test_json_list_asterisk_matching():
|
||||
{"id": 1, "value": "abc"},
|
||||
"some string",
|
||||
123,
|
||||
[1,2,3],
|
||||
[1, 2, 3],
|
||||
]
|
||||
|
||||
dsub1 = [
|
||||
'*',
|
||||
"*",
|
||||
"some string",
|
||||
123,
|
||||
[1,2,3],
|
||||
[1, 2, 3],
|
||||
]
|
||||
|
||||
dsub2 = [
|
||||
{"id": '*', "value": "abc"},
|
||||
{"id": "*", "value": "abc"},
|
||||
"some string",
|
||||
123,
|
||||
[1,2,3],
|
||||
[1, 2, 3],
|
||||
]
|
||||
|
||||
dsub3 = [
|
||||
{"id": 1, "value": "abc"},
|
||||
"some string",
|
||||
123,
|
||||
[1,'*',3],
|
||||
[1, "*", 3],
|
||||
]
|
||||
|
||||
dsub4 = [
|
||||
'*',
|
||||
"*",
|
||||
"some string",
|
||||
'*',
|
||||
[1,2,3],
|
||||
"*",
|
||||
[1, 2, 3],
|
||||
]
|
||||
|
||||
assert json_cmp(dcomplete, dsub1) is None
|
||||
@ -423,38 +423,38 @@ def test_json_object_asterisk_matching():
|
||||
"Test JSON object value elements on matching '*' as a placeholder for arbitrary data."
|
||||
|
||||
dcomplete = {
|
||||
'a': {"id": 1, "value": "abc"},
|
||||
'b': "some string",
|
||||
'c': 123,
|
||||
'd': [1,2,3],
|
||||
"a": {"id": 1, "value": "abc"},
|
||||
"b": "some string",
|
||||
"c": 123,
|
||||
"d": [1, 2, 3],
|
||||
}
|
||||
|
||||
dsub1 = {
|
||||
'a': '*',
|
||||
'b': "some string",
|
||||
'c': 123,
|
||||
'd': [1,2,3],
|
||||
"a": "*",
|
||||
"b": "some string",
|
||||
"c": 123,
|
||||
"d": [1, 2, 3],
|
||||
}
|
||||
|
||||
dsub2 = {
|
||||
'a': {"id": 1, "value": "abc"},
|
||||
'b': "some string",
|
||||
'c': 123,
|
||||
'd': [1,'*',3],
|
||||
"a": {"id": 1, "value": "abc"},
|
||||
"b": "some string",
|
||||
"c": 123,
|
||||
"d": [1, "*", 3],
|
||||
}
|
||||
|
||||
dsub3 = {
|
||||
'a': {"id": '*', "value": "abc"},
|
||||
'b': "some string",
|
||||
'c': 123,
|
||||
'd': [1,2,3],
|
||||
"a": {"id": "*", "value": "abc"},
|
||||
"b": "some string",
|
||||
"c": 123,
|
||||
"d": [1, 2, 3],
|
||||
}
|
||||
|
||||
dsub4 = {
|
||||
'a': '*',
|
||||
'b': "some string",
|
||||
'c': '*',
|
||||
'd': [1,2,3],
|
||||
"a": "*",
|
||||
"b": "some string",
|
||||
"c": "*",
|
||||
"d": [1, 2, 3],
|
||||
}
|
||||
|
||||
assert json_cmp(dcomplete, dsub1) is None
|
||||
@ -465,37 +465,12 @@ def test_json_object_asterisk_matching():
|
||||
|
||||
def test_json_list_nested_with_objects():
|
||||
|
||||
dcomplete = [
|
||||
{
|
||||
"key": 1,
|
||||
"list": [
|
||||
123
|
||||
]
|
||||
},
|
||||
{
|
||||
"key": 2,
|
||||
"list": [
|
||||
123
|
||||
]
|
||||
}
|
||||
]
|
||||
dcomplete = [{"key": 1, "list": [123]}, {"key": 2, "list": [123]}]
|
||||
|
||||
dsub1 = [
|
||||
{
|
||||
"key": 2,
|
||||
"list": [
|
||||
123
|
||||
]
|
||||
},
|
||||
{
|
||||
"key": 1,
|
||||
"list": [
|
||||
123
|
||||
]
|
||||
}
|
||||
]
|
||||
dsub1 = [{"key": 2, "list": [123]}, {"key": 1, "list": [123]}]
|
||||
|
||||
assert json_cmp(dcomplete, dsub1) is None
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(pytest.main())
|
||||
|
@ -703,11 +703,9 @@ class TopoRouter(TopoGear):
|
||||
Stop router, private internal version
|
||||
* Kill daemons
|
||||
"""
|
||||
self.logger.debug("stopping: wait {}, assert {}".format(
|
||||
wait, assertOnError))
|
||||
self.logger.debug("stopping: wait {}, assert {}".format(wait, assertOnError))
|
||||
return self.tgen.net[self.name].stopRouter(wait, assertOnError)
|
||||
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Stop router cleanly:
|
||||
@ -724,7 +722,7 @@ class TopoRouter(TopoGear):
|
||||
* Start daemons (e.g. FRR)
|
||||
* Configure daemon logging files
|
||||
"""
|
||||
self.logger.debug('starting')
|
||||
self.logger.debug("starting")
|
||||
nrouter = self.tgen.net[self.name]
|
||||
result = nrouter.startRouterDaemons(daemons)
|
||||
|
||||
@ -734,10 +732,12 @@ class TopoRouter(TopoGear):
|
||||
for d in daemons:
|
||||
if enabled == 0:
|
||||
continue
|
||||
self.vtysh_cmd('configure terminal\nlog commands\nlog file {}.log'.\
|
||||
format(daemon), daemon=daemon)
|
||||
self.vtysh_cmd(
|
||||
"configure terminal\nlog commands\nlog file {}.log".format(daemon),
|
||||
daemon=daemon,
|
||||
)
|
||||
|
||||
if result != '':
|
||||
if result != "":
|
||||
self.tgen.set_error(result)
|
||||
|
||||
return result
|
||||
@ -747,7 +747,7 @@ class TopoRouter(TopoGear):
|
||||
Kill specific daemon(user defined daemon only)
|
||||
forcefully using SIGKILL
|
||||
"""
|
||||
self.logger.debug('Killing daemons using SIGKILL..')
|
||||
self.logger.debug("Killing daemons using SIGKILL..")
|
||||
return self.tgen.net[self.name].killRouterDaemons(daemons, wait, assertOnError)
|
||||
|
||||
def vtysh_cmd(self, command, isjson=False, daemon=None):
|
||||
@ -1070,7 +1070,7 @@ def diagnose_env_linux():
|
||||
"isisd",
|
||||
"pimd",
|
||||
"ldpd",
|
||||
"pbrd"
|
||||
"pbrd",
|
||||
]:
|
||||
path = os.path.join(frrdir, fname)
|
||||
if not os.path.isfile(path):
|
||||
|
@ -45,6 +45,7 @@ from lib.common_config import (
|
||||
|
||||
from lib.bgp import create_router_bgp
|
||||
from lib.ospf import create_router_ospf
|
||||
|
||||
ROUTER_LIST = []
|
||||
|
||||
|
||||
@ -214,13 +215,14 @@ def build_topo_from_json(tgen, topo):
|
||||
while listSwitches != []:
|
||||
curSwitch = listSwitches.pop(0)
|
||||
# Physical Interfaces
|
||||
if "links" in topo['switches'][curSwitch]:
|
||||
if "links" in topo["switches"][curSwitch]:
|
||||
for destRouterLink, data in sorted(
|
||||
topo['switches'][curSwitch]['links'].items()):
|
||||
topo["switches"][curSwitch]["links"].items()
|
||||
):
|
||||
|
||||
# Loopback interfaces
|
||||
if "dst_node" in data:
|
||||
destRouter = data['dst_node']
|
||||
destRouter = data["dst_node"]
|
||||
|
||||
elif "-" in destRouterLink:
|
||||
# Spliting and storing destRouterLink data in tempList
|
||||
@ -232,39 +234,55 @@ def build_topo_from_json(tgen, topo):
|
||||
|
||||
if destRouter in listAllRouters:
|
||||
|
||||
topo['routers'][destRouter]['links'][curSwitch] = \
|
||||
deepcopy(topo['switches'][curSwitch]['links'][destRouterLink])
|
||||
topo["routers"][destRouter]["links"][curSwitch] = deepcopy(
|
||||
topo["switches"][curSwitch]["links"][destRouterLink]
|
||||
)
|
||||
|
||||
# Assigning name to interfaces
|
||||
topo['routers'][destRouter]['links'][curSwitch]['interface'] = \
|
||||
'{}-{}-eth{}'.format(destRouter, curSwitch, topo['routers'] \
|
||||
[destRouter]['nextIfname'])
|
||||
topo["routers"][destRouter]["links"][curSwitch][
|
||||
"interface"
|
||||
] = "{}-{}-eth{}".format(
|
||||
destRouter, curSwitch, topo["routers"][destRouter]["nextIfname"]
|
||||
)
|
||||
|
||||
topo['switches'][curSwitch]['links'][destRouter]['interface'] = \
|
||||
'{}-{}-eth{}'.format(curSwitch, destRouter, topo['routers'] \
|
||||
[destRouter]['nextIfname'])
|
||||
topo["switches"][curSwitch]["links"][destRouter][
|
||||
"interface"
|
||||
] = "{}-{}-eth{}".format(
|
||||
curSwitch, destRouter, topo["routers"][destRouter]["nextIfname"]
|
||||
)
|
||||
|
||||
topo['routers'][destRouter]['nextIfname'] += 1
|
||||
topo["routers"][destRouter]["nextIfname"] += 1
|
||||
|
||||
# Add links
|
||||
dictSwitches[curSwitch].add_link(tgen.gears[destRouter], \
|
||||
topo['switches'][curSwitch]['links'][destRouter]['interface'],
|
||||
topo['routers'][destRouter]['links'][curSwitch]['interface'],
|
||||
dictSwitches[curSwitch].add_link(
|
||||
tgen.gears[destRouter],
|
||||
topo["switches"][curSwitch]["links"][destRouter]["interface"],
|
||||
topo["routers"][destRouter]["links"][curSwitch]["interface"],
|
||||
)
|
||||
|
||||
# IPv4
|
||||
if 'ipv4' in topo['routers'][destRouter]['links'][curSwitch]:
|
||||
if topo['routers'][destRouter]['links'][curSwitch]['ipv4'] == 'auto':
|
||||
topo['routers'][destRouter]['links'][curSwitch]['ipv4'] = \
|
||||
'{}/{}'.format(ipv4Next, topo['link_ip_start'][ \
|
||||
'v4mask'])
|
||||
if "ipv4" in topo["routers"][destRouter]["links"][curSwitch]:
|
||||
if (
|
||||
topo["routers"][destRouter]["links"][curSwitch]["ipv4"]
|
||||
== "auto"
|
||||
):
|
||||
topo["routers"][destRouter]["links"][curSwitch][
|
||||
"ipv4"
|
||||
] = "{}/{}".format(
|
||||
ipv4Next, topo["link_ip_start"]["v4mask"]
|
||||
)
|
||||
ipv4Next += 1
|
||||
# IPv6
|
||||
if 'ipv6' in topo['routers'][destRouter]['links'][curSwitch]:
|
||||
if topo['routers'][destRouter]['links'][curSwitch]['ipv6'] == 'auto':
|
||||
topo['routers'][destRouter]['links'][curSwitch]['ipv6'] = \
|
||||
'{}/{}'.format(ipv6Next, topo['link_ip_start'][ \
|
||||
'v6mask'])
|
||||
if "ipv6" in topo["routers"][destRouter]["links"][curSwitch]:
|
||||
if (
|
||||
topo["routers"][destRouter]["links"][curSwitch]["ipv6"]
|
||||
== "auto"
|
||||
):
|
||||
topo["routers"][destRouter]["links"][curSwitch][
|
||||
"ipv6"
|
||||
] = "{}/{}".format(
|
||||
ipv6Next, topo["link_ip_start"]["v6mask"]
|
||||
)
|
||||
ipv6Next = ipaddr.IPv6Address(int(ipv6Next) + ipv6Step)
|
||||
|
||||
logger.debug(
|
||||
@ -294,7 +312,7 @@ def build_config_from_json(tgen, topo, save_bkup=True):
|
||||
("bgp_community_list", create_bgp_community_lists),
|
||||
("route_maps", create_route_maps),
|
||||
("bgp", create_router_bgp),
|
||||
("ospf", create_router_ospf)
|
||||
("ospf", create_router_ospf),
|
||||
]
|
||||
)
|
||||
|
||||
|
@ -51,8 +51,9 @@ from mininet.log import setLogLevel, info
|
||||
from mininet.cli import CLI
|
||||
from mininet.link import Intf
|
||||
|
||||
|
||||
def gdb_core(obj, daemon, corefiles):
|
||||
gdbcmds = '''
|
||||
gdbcmds = """
|
||||
info threads
|
||||
bt full
|
||||
disassemble
|
||||
@ -66,21 +67,21 @@ def gdb_core(obj, daemon, corefiles):
|
||||
disassemble
|
||||
up
|
||||
disassemble
|
||||
'''
|
||||
gdbcmds = [['-ex', i.strip()] for i in gdbcmds.strip().split('\n')]
|
||||
"""
|
||||
gdbcmds = [["-ex", i.strip()] for i in gdbcmds.strip().split("\n")]
|
||||
gdbcmds = [item for sl in gdbcmds for item in sl]
|
||||
|
||||
daemon_path = os.path.join(obj.daemondir, daemon)
|
||||
backtrace = subprocess.check_output(
|
||||
['gdb', daemon_path, corefiles[0], '--batch'] + gdbcmds
|
||||
["gdb", daemon_path, corefiles[0], "--batch"] + gdbcmds
|
||||
)
|
||||
sys.stderr.write(
|
||||
"\n%s: %s crashed. Core file found - Backtrace follows:\n"
|
||||
% (obj.name, daemon)
|
||||
"\n%s: %s crashed. Core file found - Backtrace follows:\n" % (obj.name, daemon)
|
||||
)
|
||||
sys.stderr.write("%s" % backtrace)
|
||||
return backtrace
|
||||
|
||||
|
||||
class json_cmp_result(object):
|
||||
"json_cmp result class for better assertion messages"
|
||||
|
||||
@ -739,7 +740,8 @@ def ip4_vrf_route(node):
|
||||
}
|
||||
"""
|
||||
output = normalize_text(
|
||||
node.run("ip route show vrf {0}-cust1".format(node.name))).splitlines()
|
||||
node.run("ip route show vrf {0}-cust1".format(node.name))
|
||||
).splitlines()
|
||||
|
||||
result = {}
|
||||
for line in output:
|
||||
@ -821,7 +823,8 @@ def ip6_vrf_route(node):
|
||||
}
|
||||
"""
|
||||
output = normalize_text(
|
||||
node.run("ip -6 route show vrf {0}-cust1".format(node.name))).splitlines()
|
||||
node.run("ip -6 route show vrf {0}-cust1".format(node.name))
|
||||
).splitlines()
|
||||
result = {}
|
||||
for line in output:
|
||||
columns = line.split(" ")
|
||||
@ -992,7 +995,7 @@ class Router(Node):
|
||||
# Backward compatibility:
|
||||
# Load configuration defaults like topogen.
|
||||
self.config_defaults = configparser.ConfigParser(
|
||||
defaults = {
|
||||
defaults={
|
||||
"verbosity": "info",
|
||||
"frrdir": "/usr/lib/frr",
|
||||
"routertype": "frr",
|
||||
@ -1095,7 +1098,7 @@ class Router(Node):
|
||||
if re.search(r"No such file or directory", rundaemons):
|
||||
return 0
|
||||
if rundaemons is not None:
|
||||
bet = rundaemons.split('\n')
|
||||
bet = rundaemons.split("\n")
|
||||
for d in bet[:-1]:
|
||||
daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
|
||||
if daemonpid.isdigit() and pid_exists(int(daemonpid)):
|
||||
@ -1110,24 +1113,28 @@ class Router(Node):
|
||||
if re.search(r"No such file or directory", rundaemons):
|
||||
return errors
|
||||
if rundaemons is not None:
|
||||
dmns = rundaemons.split('\n')
|
||||
dmns = rundaemons.split("\n")
|
||||
# Exclude empty string at end of list
|
||||
for d in dmns[:-1]:
|
||||
daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
|
||||
if daemonpid.isdigit() and pid_exists(int(daemonpid)):
|
||||
daemonname = os.path.basename(d.rstrip().rsplit(".", 1)[0])
|
||||
logger.info(
|
||||
"{}: stopping {}".format(
|
||||
self.name, daemonname
|
||||
)
|
||||
)
|
||||
logger.info("{}: stopping {}".format(self.name, daemonname))
|
||||
try:
|
||||
os.kill(int(daemonpid), signal.SIGTERM)
|
||||
except OSError as err:
|
||||
if err.errno == errno.ESRCH:
|
||||
logger.error("{}: {} left a dead pidfile (pid={})".format(self.name, daemonname, daemonpid))
|
||||
logger.error(
|
||||
"{}: {} left a dead pidfile (pid={})".format(
|
||||
self.name, daemonname, daemonpid
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.info("{}: {} could not kill pid {}: {}".format(self.name, daemonname, daemonpid, str(err)))
|
||||
logger.info(
|
||||
"{}: {} could not kill pid {}: {}".format(
|
||||
self.name, daemonname, daemonpid, str(err)
|
||||
)
|
||||
)
|
||||
|
||||
if not wait:
|
||||
return errors
|
||||
@ -1135,18 +1142,28 @@ class Router(Node):
|
||||
running = self.listDaemons()
|
||||
|
||||
if running:
|
||||
sleep(0.1, "{}: waiting for daemons stopping: {}".format(self.name, ', '.join(running)))
|
||||
sleep(
|
||||
0.1,
|
||||
"{}: waiting for daemons stopping: {}".format(
|
||||
self.name, ", ".join(running)
|
||||
),
|
||||
)
|
||||
running = self.listDaemons()
|
||||
|
||||
counter = 20
|
||||
while counter > 0 and running:
|
||||
sleep(0.5, "{}: waiting for daemons stopping: {}".format(self.name, ', '.join(running)))
|
||||
sleep(
|
||||
0.5,
|
||||
"{}: waiting for daemons stopping: {}".format(
|
||||
self.name, ", ".join(running)
|
||||
),
|
||||
)
|
||||
running = self.listDaemons()
|
||||
counter -= 1
|
||||
|
||||
if running:
|
||||
# 2nd round of kill if daemons didn't exit
|
||||
dmns = rundaemons.split('\n')
|
||||
dmns = rundaemons.split("\n")
|
||||
# Exclude empty string at end of list
|
||||
for d in dmns[:-1]:
|
||||
daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
|
||||
@ -1295,11 +1312,12 @@ class Router(Node):
|
||||
def startRouterDaemons(self, daemons=None):
|
||||
"Starts all FRR daemons for this router."
|
||||
|
||||
bundle_data = ''
|
||||
bundle_data = ""
|
||||
|
||||
if os.path.exists('/etc/frr/support_bundle_commands.conf'):
|
||||
if os.path.exists("/etc/frr/support_bundle_commands.conf"):
|
||||
bundle_data = subprocess.check_output(
|
||||
["cat /etc/frr/support_bundle_commands.conf"], shell=True)
|
||||
["cat /etc/frr/support_bundle_commands.conf"], shell=True
|
||||
)
|
||||
self.cmd(
|
||||
"echo '{}' > /etc/frr/support_bundle_commands.conf".format(bundle_data)
|
||||
)
|
||||
@ -1400,7 +1418,7 @@ class Router(Node):
|
||||
for daemon in daemons:
|
||||
if rundaemons is not None and daemon in rundaemons:
|
||||
numRunning = 0
|
||||
dmns = rundaemons.split('\n')
|
||||
dmns = rundaemons.split("\n")
|
||||
# Exclude empty string at end of list
|
||||
for d in dmns[:-1]:
|
||||
if re.search(r"%s" % daemon, d):
|
||||
@ -1738,8 +1756,9 @@ class LegacySwitch(OVSSwitch):
|
||||
OVSSwitch.__init__(self, name, failMode="standalone", **params)
|
||||
self.switchIP = None
|
||||
|
||||
|
||||
def frr_unicode(s):
|
||||
'''Convert string to unicode, depending on python version'''
|
||||
"""Convert string to unicode, depending on python version"""
|
||||
if sys.version_info[0] > 2:
|
||||
return s
|
||||
else:
|
||||
|
@ -65,22 +65,22 @@ class OspfSrTopo(Topo):
|
||||
tgen.add_router("r{}".format(routern))
|
||||
|
||||
# Interconect router 1 and 2 with 2 links
|
||||
switch = tgen.add_switch('s1')
|
||||
switch.add_link(tgen.gears['r1'])
|
||||
switch.add_link(tgen.gears['r2'])
|
||||
switch = tgen.add_switch('s2')
|
||||
switch.add_link(tgen.gears['r1'])
|
||||
switch.add_link(tgen.gears['r2'])
|
||||
switch = tgen.add_switch("s1")
|
||||
switch.add_link(tgen.gears["r1"])
|
||||
switch.add_link(tgen.gears["r2"])
|
||||
switch = tgen.add_switch("s2")
|
||||
switch.add_link(tgen.gears["r1"])
|
||||
switch.add_link(tgen.gears["r2"])
|
||||
|
||||
# Interconect router 3 and 2
|
||||
switch = tgen.add_switch('s3')
|
||||
switch.add_link(tgen.gears['r3'])
|
||||
switch.add_link(tgen.gears['r2'])
|
||||
switch = tgen.add_switch("s3")
|
||||
switch.add_link(tgen.gears["r3"])
|
||||
switch.add_link(tgen.gears["r2"])
|
||||
|
||||
# Interconect router 4 and 2
|
||||
switch = tgen.add_switch('s4')
|
||||
switch.add_link(tgen.gears['r4'])
|
||||
switch.add_link(tgen.gears['r2'])
|
||||
switch = tgen.add_switch("s4")
|
||||
switch.add_link(tgen.gears["r4"])
|
||||
switch.add_link(tgen.gears["r2"])
|
||||
|
||||
|
||||
def setup_module(mod):
|
||||
@ -134,12 +134,13 @@ def test_ospf_sr():
|
||||
# Run test function until we get an result. Wait at most 60 seconds.
|
||||
rt = tgen.gears[router]
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, rt, 'show ip ospf database segment-routing json', expected
|
||||
topotest.router_json_cmp,
|
||||
rt,
|
||||
"show ip ospf database segment-routing json",
|
||||
expected,
|
||||
)
|
||||
rv, diff = topotest.run_and_expect(test_func, None, count=25, wait=3)
|
||||
assert rv, "OSPF did not start Segment Routing on {}:\n{}".format(
|
||||
router, diff
|
||||
)
|
||||
assert rv, "OSPF did not start Segment Routing on {}:\n{}".format(router, diff)
|
||||
|
||||
|
||||
def test_ospf_kernel_route():
|
||||
@ -169,7 +170,7 @@ def test_ospf_kernel_route():
|
||||
}
|
||||
]
|
||||
"""
|
||||
out = rt.vtysh_cmd('show mpls table json', isjson=True)
|
||||
out = rt.vtysh_cmd("show mpls table json", isjson=True)
|
||||
|
||||
outlist = []
|
||||
for key in out.keys():
|
||||
|
@ -35,7 +35,7 @@ import json
|
||||
|
||||
# Save the Current Working Directory to find configuration files.
|
||||
CWD = os.path.dirname(os.path.realpath(__file__))
|
||||
sys.path.append(os.path.join(CWD, '../'))
|
||||
sys.path.append(os.path.join(CWD, "../"))
|
||||
|
||||
# pylint: disable=C0413
|
||||
# Import topogen and topotest helpers
|
||||
@ -46,28 +46,30 @@ from lib.topolog import logger
|
||||
# Required to instantiate the topology builder class.
|
||||
from mininet.topo import Topo
|
||||
|
||||
|
||||
class OSPFTopo(Topo):
|
||||
"Test topology builder"
|
||||
|
||||
def build(self, *_args, **_opts):
|
||||
"Build function"
|
||||
tgen = get_topogen(self)
|
||||
|
||||
# Create 4 routers
|
||||
for routern in range(1, 3):
|
||||
tgen.add_router('r{}'.format(routern))
|
||||
tgen.add_router("r{}".format(routern))
|
||||
|
||||
# Create a empty network for router 1
|
||||
switch = tgen.add_switch('s1')
|
||||
switch.add_link(tgen.gears['r1'])
|
||||
switch = tgen.add_switch("s1")
|
||||
switch.add_link(tgen.gears["r1"])
|
||||
|
||||
# Create a empty network for router 2
|
||||
switch = tgen.add_switch('s2')
|
||||
switch.add_link(tgen.gears['r2'])
|
||||
switch = tgen.add_switch("s2")
|
||||
switch.add_link(tgen.gears["r2"])
|
||||
|
||||
# Interconect router 1, 2
|
||||
switch = tgen.add_switch('s3')
|
||||
switch.add_link(tgen.gears['r1'])
|
||||
switch.add_link(tgen.gears['r2'])
|
||||
switch = tgen.add_switch("s3")
|
||||
switch.add_link(tgen.gears["r1"])
|
||||
switch.add_link(tgen.gears["r2"])
|
||||
|
||||
|
||||
def setup_module(mod):
|
||||
@ -78,12 +80,10 @@ def setup_module(mod):
|
||||
router_list = tgen.routers()
|
||||
for rname, router in router_list.items():
|
||||
router.load_config(
|
||||
TopoRouter.RD_ZEBRA,
|
||||
os.path.join(CWD, '{}/zebra.conf'.format(rname))
|
||||
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
|
||||
)
|
||||
router.load_config(
|
||||
TopoRouter.RD_OSPF,
|
||||
os.path.join(CWD, '{}/ospfd.conf'.format(rname))
|
||||
TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname))
|
||||
)
|
||||
|
||||
# What is this? OSPF Unnumbered depends on the rp_filter
|
||||
@ -93,18 +93,15 @@ def setup_module(mod):
|
||||
# the rp_filter. Setting it to '0' allows the OS to pass
|
||||
# up the mcast packet not destined for the local routers
|
||||
# network.
|
||||
topotest.set_sysctl(tgen.net['r1'],
|
||||
'net.ipv4.conf.r1-eth1.rp_filter', 0)
|
||||
topotest.set_sysctl(tgen.net['r1'],
|
||||
'net.ipv4.conf.all.rp_filter', 0)
|
||||
topotest.set_sysctl(tgen.net['r2'],
|
||||
'net.ipv4.conf.r2-eth1.rp_filter', 0)
|
||||
topotest.set_sysctl(tgen.net['r2'],
|
||||
'net.ipv4.conf.all.rp_filter', 0)
|
||||
topotest.set_sysctl(tgen.net["r1"], "net.ipv4.conf.r1-eth1.rp_filter", 0)
|
||||
topotest.set_sysctl(tgen.net["r1"], "net.ipv4.conf.all.rp_filter", 0)
|
||||
topotest.set_sysctl(tgen.net["r2"], "net.ipv4.conf.r2-eth1.rp_filter", 0)
|
||||
topotest.set_sysctl(tgen.net["r2"], "net.ipv4.conf.all.rp_filter", 0)
|
||||
|
||||
# Initialize all routers.
|
||||
tgen.start_router()
|
||||
#tgen.mininet_cli()
|
||||
# tgen.mininet_cli()
|
||||
|
||||
|
||||
def teardown_module(mod):
|
||||
"Teardown the pytest environment"
|
||||
@ -116,50 +113,54 @@ def test_ospf_convergence():
|
||||
"Test OSPF daemon convergence and that we have received the ospf routes"
|
||||
tgen = get_topogen()
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip('skipped because of router(s) failure')
|
||||
pytest.skip("skipped because of router(s) failure")
|
||||
|
||||
for router, rnode in tgen.routers().items():
|
||||
logger.info('Waiting for router "%s" convergence', router)
|
||||
|
||||
json_file = '{}/{}/ospf-route.json'.format(CWD, router)
|
||||
json_file = "{}/{}/ospf-route.json".format(CWD, router)
|
||||
expected = json.loads(open(json_file).read())
|
||||
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
rnode, 'show ip ospf route json', expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, rnode, "show ip ospf route json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5)
|
||||
assertmsg = '"{}" JSON output mismatches'.format(router)
|
||||
assert result is None, assertmsg
|
||||
#tgen.mininet_cli()
|
||||
# tgen.mininet_cli()
|
||||
|
||||
|
||||
def test_ospf_kernel_route():
|
||||
"Test OSPF kernel route installation and we have the onlink success"
|
||||
tgen = get_topogen()
|
||||
if tgen.routers_have_failure():
|
||||
pytest.skip('skipped because of router(s) failure')
|
||||
pytest.skip("skipped because of router(s) failure")
|
||||
|
||||
rlist = tgen.routers().values()
|
||||
for router in rlist:
|
||||
logger.info('Checking OSPF IPv4 kernel routes in "%s"', router.name)
|
||||
|
||||
json_file = '{}/{}/v4_route.json'.format(CWD, router.name)
|
||||
json_file = "{}/{}/v4_route.json".format(CWD, router.name)
|
||||
expected = json.loads(open(json_file).read())
|
||||
|
||||
test_func = partial(topotest.router_json_cmp,
|
||||
router, 'show ip route json', expected)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=10, wait=.5)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, router, "show ip route json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
|
||||
assertmsg = '"{}" JSON output mistmatches'.format(router)
|
||||
assert result is None, assertmsg
|
||||
#tgen.mininet_cli()
|
||||
# tgen.mininet_cli()
|
||||
|
||||
|
||||
def test_memory_leak():
|
||||
"Run the memory leak test and report results."
|
||||
tgen = get_topogen()
|
||||
if not tgen.is_memleak_enabled():
|
||||
pytest.skip('Memory leak test/report is disabled')
|
||||
pytest.skip("Memory leak test/report is disabled")
|
||||
|
||||
tgen.report_memory_leaks()
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = ["-s"] + sys.argv[1:]
|
||||
sys.exit(pytest.main(args))
|
||||
|
@ -48,7 +48,7 @@ from lib.common_config import (
|
||||
reset_config_on_routers,
|
||||
step,
|
||||
shutdown_bringup_interface,
|
||||
topo_daemons
|
||||
topo_daemons,
|
||||
)
|
||||
from lib.topolog import logger
|
||||
from lib.topojson import build_topo_from_json, build_config_from_json
|
||||
|
@ -53,7 +53,7 @@ from lib.common_config import (
|
||||
create_route_maps,
|
||||
shutdown_bringup_interface,
|
||||
create_interfaces_cfg,
|
||||
topo_daemons
|
||||
topo_daemons,
|
||||
)
|
||||
from lib.topolog import logger
|
||||
|
||||
|
@ -53,7 +53,7 @@ from lib.common_config import (
|
||||
shutdown_bringup_interface,
|
||||
stop_router,
|
||||
start_router,
|
||||
topo_daemons
|
||||
topo_daemons,
|
||||
)
|
||||
from lib.bgp import verify_bgp_convergence, create_router_bgp
|
||||
from lib.topolog import logger
|
||||
|
@ -55,7 +55,7 @@ from lib.common_config import (
|
||||
shutdown_bringup_interface,
|
||||
stop_router,
|
||||
start_router,
|
||||
topo_daemons
|
||||
topo_daemons,
|
||||
)
|
||||
from lib.bgp import verify_bgp_convergence, create_router_bgp
|
||||
from lib.topolog import logger
|
||||
|
@ -44,7 +44,7 @@ from lib.common_config import (
|
||||
create_route_maps,
|
||||
shutdown_bringup_interface,
|
||||
create_interfaces_cfg,
|
||||
topo_daemons
|
||||
topo_daemons,
|
||||
)
|
||||
from ipaddress import IPv4Address
|
||||
from lib.topogen import Topogen, get_topogen
|
||||
|
@ -52,7 +52,7 @@ from lib.common_config import (
|
||||
step,
|
||||
create_route_maps,
|
||||
verify_prefix_lists,
|
||||
topo_daemons
|
||||
topo_daemons,
|
||||
)
|
||||
from lib.topolog import logger
|
||||
from lib.topojson import build_topo_from_json, build_config_from_json
|
||||
|
@ -50,7 +50,7 @@ from lib.common_config import (
|
||||
create_static_routes,
|
||||
step,
|
||||
shutdown_bringup_interface,
|
||||
topo_daemons
|
||||
topo_daemons,
|
||||
)
|
||||
from lib.bgp import verify_bgp_convergence, create_router_bgp
|
||||
from lib.topolog import logger
|
||||
@ -278,8 +278,7 @@ def test_ospf_redistribution_tc5_p0(request):
|
||||
|
||||
dut = "r1"
|
||||
for num in range(0, nretry):
|
||||
result = verify_ospf_rib(
|
||||
tgen, dut, input_dict, next_hop=nh, expected=False)
|
||||
result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh, expected=False)
|
||||
if result is not True:
|
||||
break
|
||||
|
||||
@ -399,8 +398,7 @@ def test_ospf_redistribution_tc6_p0(request):
|
||||
|
||||
dut = "r1"
|
||||
for num in range(0, nretry):
|
||||
result = verify_ospf_rib(
|
||||
tgen, dut, input_dict, next_hop=nh, expected=False)
|
||||
result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh, expected=False)
|
||||
if result is not True:
|
||||
break
|
||||
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
|
||||
@ -409,13 +407,7 @@ def test_ospf_redistribution_tc6_p0(request):
|
||||
|
||||
protocol = "ospf"
|
||||
result = verify_rib(
|
||||
tgen,
|
||||
"ipv4",
|
||||
dut,
|
||||
input_dict,
|
||||
protocol=protocol,
|
||||
next_hop=nh,
|
||||
expected=False,
|
||||
tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh, expected=False,
|
||||
)
|
||||
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
|
||||
tc_name, result
|
||||
|
@ -53,7 +53,7 @@ from lib.common_config import (
|
||||
create_route_maps,
|
||||
shutdown_bringup_interface,
|
||||
create_interfaces_cfg,
|
||||
topo_daemons
|
||||
topo_daemons,
|
||||
)
|
||||
from lib.topolog import logger
|
||||
from lib.topojson import build_topo_from_json, build_config_from_json
|
||||
|
@ -147,7 +147,9 @@ def test_pbr_data():
|
||||
expected = json.loads(open(intf_file).read())
|
||||
|
||||
# Actual output from router
|
||||
test_func = partial(topotest.router_json_cmp, router, "show pbr interface json", expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, router, "show pbr interface json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
|
||||
assertmsg = '"show pbr interface" mismatches on {}'.format(router.name)
|
||||
if result is not None:
|
||||
@ -161,7 +163,9 @@ def test_pbr_data():
|
||||
expected = json.loads(open(map_file).read())
|
||||
|
||||
# Actual output from router
|
||||
test_func = partial(topotest.router_json_cmp, router, "show pbr map json", expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, router, "show pbr map json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
|
||||
assertmsg = '"show pbr map" mismatches on {}'.format(router.name)
|
||||
if result is not None:
|
||||
@ -175,13 +179,16 @@ def test_pbr_data():
|
||||
expected = json.loads(open(nexthop_file).read())
|
||||
|
||||
# Actual output from router
|
||||
test_func = partial(topotest.router_json_cmp, router, "show pbr nexthop-groups json", expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, router, "show pbr nexthop-groups json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
|
||||
assertmsg = '"show pbr nexthop-groups" mismatches on {}'.format(router.name)
|
||||
if result is not None:
|
||||
gather_pbr_data_on_error(router)
|
||||
assert result is None, assertmsg
|
||||
|
||||
|
||||
def test_pbr_flap():
|
||||
"Test PBR interface flapping"
|
||||
|
||||
@ -212,7 +219,9 @@ def test_pbr_flap():
|
||||
expected = json.loads(open(intf_file).read())
|
||||
|
||||
# Actual output from router
|
||||
test_func = partial(topotest.router_json_cmp, router, "show pbr interface json", expected)
|
||||
test_func = partial(
|
||||
topotest.router_json_cmp, router, "show pbr interface json", expected
|
||||
)
|
||||
_, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
|
||||
assertmsg = '"show pbr interface" mismatches on {}'.format(router.name)
|
||||
if result is not None:
|
||||
@ -274,4 +283,3 @@ def gather_pbr_data_on_error(router):
|
||||
logger.info(router.run("ip route show table 10005"))
|
||||
logger.info(router.run("ip -6 route show table 10005"))
|
||||
logger.info(router.run("ip rule show"))
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user