summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorArturo Borrero Gonzalez <arturo@netfilter.org>2021-01-20 20:29:00 +0100
committerArturo Borrero Gonzalez <arturo@netfilter.org>2021-02-01 11:31:48 +0100
commit2582f19b47641f7e471728710e6c1e34c2369faa (patch)
treeaca87393922426374f13245b0f7b830cd78743c5 /tests
parent089aa91d6a4a382775d6077c842492ed3d54be60 (diff)
tests: introduce new python-based framework for running tests
This test suite should help us develop better tests for conntrack-tools in general and conntrackd in particular. The framework is composed of a runner script, written in python3, and 3 yaml files for configuration and testcase definition: - scenarios.yaml: contains information on network scenarios for tests to use - tests.yaml: contains testcase definition - env.yaml: contains default values for environment variables The test cases can be anything, from a simple command to an external script call to perform more complex operations. See follow-up patches to know more on how this works. The plan is to replace or call from this framework the other testsuites in this tree. The runner script is rather simple, and it should be more or less straight forward to use it. On Debian machines, it requires the *python3-yaml* package to be installed as a dependency. For reference, here are the script options: === 8< === $ tests/conntrackd/conntrackd-tests.py --help usage: conntrackd-tests.py [-h] [--tests-file TESTS_FILE] [--scenarios-file SCENARIOS_FILE] [--env-file ENV_FILE] [--single SINGLE] [--start-scenario START_SCENARIO] [--stop-scenario STOP_SCENARIO] [--debug] Utility to run tests for conntrack-tools optional arguments: -h, --help show this help message and exit --tests-file TESTS_FILE File with testcase definitions. Defaults to 'tests.yaml' --scenarios-file SCENARIOS_FILE File with configuration scenarios for tests. Defaults to 'scenarios.yaml' --env-file ENV_FILE File with environment variables for scenarios/tests. Defaults to 'env.yaml' --single SINGLE Execute a single testcase and exit. Use this for developing testcases --start-scenario START_SCENARIO Execute scenario start commands and exit. Use this for developing testcases --stop-scenario STOP_SCENARIO Execute scenario stop commands and exit. Use this for cleanup --debug debug mode === 8< === To run it, simply use: === 8< === $ cd tests/conntrackd/ ; sudo ./conntrackd-tests.py [..] === 8< === Signed-off-by: Arturo Borrero Gonzalez <arturo@netfilter.org>
Diffstat (limited to 'tests')
-rwxr-xr-xtests/conntrackd/conntrackd-tests.py263
1 files changed, 263 insertions, 0 deletions
diff --git a/tests/conntrackd/conntrackd-tests.py b/tests/conntrackd/conntrackd-tests.py
new file mode 100755
index 0000000..f760351
--- /dev/null
+++ b/tests/conntrackd/conntrackd-tests.py
@@ -0,0 +1,263 @@
+#!/usr/bin/env python3
+
+# (C) 2021 by Arturo Borrero Gonzalez <arturo@netfilter.org>
+
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+
+# tests.yaml file format:
+# - name: "test 1"
+# scenario: scenario1
+# test:
+# - test1 cmd1
+# - test1 cmd2
+
+# scenarios.yaml file format:
+# - name: scenario1
+# start:
+# - cmd1
+# - cmd2
+# stop:
+# - cmd1
+# - cmd2
+
+# env.yaml file format:
+# - VAR1: value1
+# - VAR2: value2
+
+import os
+import sys
+import argparse
+import subprocess
+import yaml
+import logging
+
+
+def read_yaml_file(file):
+ try:
+ with open(file, "r") as stream:
+ try:
+ return yaml.safe_load(stream)
+ except yaml.YAMLError as e:
+ logging.error(e)
+ exit(2)
+ except FileNotFoundError as e:
+ logging.error(e)
+ exit(2)
+
+
+def validate_dictionary(dictionary, keys):
+ if not isinstance(dictionary, dict):
+ logging.error("not a dictionary:\n{}".format(dictionary))
+ return False
+ for key in keys:
+ if dictionary.get(key) is None:
+ logging.error("missing key {} in dictionary:\n{}".format(key, dictionary))
+ return False
+ return True
+
+
+def stage_validate_config(args):
+ scenarios_dict = read_yaml_file(args.scenarios_file)
+ for definition in scenarios_dict:
+ if not validate_dictionary(definition, ["name", "start", "stop"]):
+ logging.error("couldn't validate file {}".format(args.scenarios_file))
+ return False
+
+ logging.debug("{} seems valid".format(args.scenarios_file))
+ ctx.scenarios_dict = scenarios_dict
+
+ tests_dict = read_yaml_file(args.tests_file)
+ for definition in tests_dict:
+ if not validate_dictionary(definition, ["name", "scenario", "test"]):
+ logging.error("couldn't validate file {}".format(args.tests_file))
+ return False
+
+ logging.debug("{} seems valid".format(args.tests_file))
+ ctx.tests_dict = tests_dict
+
+ env_list = read_yaml_file(args.env_file)
+ if not isinstance(env_list, list):
+ logging.error("couldn't validate file {}".format(args.env_file))
+ return False
+
+ # set env to default values if not overridden when calling this very script
+ for entry in env_list:
+ for key in entry:
+ os.environ[key] = os.getenv(key, entry[key])
+
+
+def cmd_run(cmd):
+ logging.debug("running command: {}".format(cmd))
+ r = subprocess.run(cmd, shell=True)
+ if r.returncode != 0:
+ logging.warning("failed command: {}".format(cmd))
+ return r.returncode
+
+
+def scenario_get(name):
+ for n in ctx.scenarios_dict:
+ if n["name"] == name:
+ return n
+
+ logging.error("couldn't find a definition for scenario '{}'".format(name))
+ exit(1)
+
+
+def scenario_start(scenario):
+ for cmd in scenario["start"]:
+ if cmd_run(cmd) == 0:
+ continue
+
+ logging.warning("--- failed scenario: {}".format(scenario["name"]))
+ ctx.counter_scenario_failed += 1
+ ctx.skip_current_test = True
+ return
+
+
+def scenario_stop(scenario):
+ for cmd in scenario["stop"]:
+ cmd_run(cmd)
+
+
+def test_get(name):
+ for n in ctx.tests_dict:
+ if n["name"] == name:
+ return n
+
+ logging.error("couldn't find a definition for test '{}'".format(name))
+ exit(1)
+
+
+def _test_run(test_definition):
+ if ctx.skip_current_test:
+ return
+
+ for cmd in test_definition["test"]:
+ if cmd_run(cmd) == 0:
+ continue
+
+ logging.warning("--- failed test: {}".format(test_definition["name"]))
+ ctx.counter_test_failed += 1
+ return
+
+ logging.info("--- passed test: {}".format(test_definition["name"]))
+ ctx.counter_test_ok += 1
+
+
+def test_run(test_definition):
+ scenario = scenario_get(test_definition["scenario"])
+
+ logging.info("--- running test: {}".format(test_definition["name"]))
+
+ scenario_start(scenario)
+ _test_run(test_definition)
+ scenario_stop(scenario)
+
+
+def stage_run_tests(args):
+ if args.start_scenario:
+ scenario_start(scenario_get(args.start_scenario))
+ return
+
+ if args.stop_scenario:
+ scenario_stop(scenario_get(args.stop_scenario))
+ return
+
+ if args.single:
+ test_run(test_get(args.single))
+ return
+
+ for test_definition in ctx.tests_dict:
+ ctx.skip_current_test = False
+ test_run(test_definition)
+
+
+def stage_report():
+ logging.info("---")
+ logging.info("--- finished")
+ total = ctx.counter_test_ok + ctx.counter_test_failed + ctx.counter_scenario_failed
+ logging.info("--- passed tests: {}".format(ctx.counter_test_ok))
+ logging.info("--- failed tests: {}".format(ctx.counter_test_failed))
+ logging.info("--- scenario failure: {}".format(ctx.counter_scenario_failed))
+ logging.info("--- total tests: {}".format(total))
+
+
+def parse_args():
+ description = "Utility to run tests for conntrack-tools"
+ parser = argparse.ArgumentParser(description=description)
+ parser.add_argument(
+ "--tests-file",
+ default="tests.yaml",
+ help="File with testcase definitions. Defaults to '%(default)s'",
+ )
+ parser.add_argument(
+ "--scenarios-file",
+ default="scenarios.yaml",
+ help="File with configuration scenarios for tests. Defaults to '%(default)s'",
+ )
+ parser.add_argument(
+ "--env-file",
+ default="env.yaml",
+ help="File with environment variables for scenarios/tests. Defaults to '%(default)s'",
+ )
+ parser.add_argument(
+ "--single",
+ help="Execute a single testcase and exit. Use this for developing testcases",
+ )
+ parser.add_argument(
+ "--start-scenario",
+ help="Execute scenario start commands and exit. Use this for developing testcases",
+ )
+ parser.add_argument(
+ "--stop-scenario",
+ help="Execute scenario stop commands and exit. Use this for cleanup",
+ )
+ parser.add_argument(
+ "--debug",
+ action="store_true",
+ help="debug mode",
+ )
+
+ return parser.parse_args()
+
+
+class Context:
+ def __init__(self):
+ self.scenarios_dict = None
+ self.tests_dict = None
+ self.counter_test_failed = 0
+ self.counter_test_ok = 0
+ self.counter_scenario_failed = 0
+ self.skip_current_test = False
+
+
+# global data
+ctx = Context()
+
+
+def main():
+ args = parse_args()
+
+ logging_format = "[%(filename)s] %(levelname)s: %(message)s"
+ if args.debug:
+ logging_level = logging.DEBUG
+ else:
+ logging_level = logging.INFO
+ logging.basicConfig(format=logging_format, level=logging_level, stream=sys.stdout)
+
+ if os.geteuid() != 0:
+ logging.error("root required")
+ exit(1)
+
+ stage_validate_config(args)
+ stage_run_tests(args)
+ stage_report()
+
+
+if __name__ == "__main__":
+ main()