diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 8df631ae..00000000 --- a/.flake8 +++ /dev/null @@ -1,32 +0,0 @@ -# -*- conf -*- - -[flake8] -# Use black's default line length of 88, rather than using the -# flake8's default line length of 79. -max-line-length = 88 - -ignore = - # E203 is Whitespace before ':': black adds whitespace before and - # after ':'. - E203, - # E501 Line too long: ignore lines black couldn't/didn't handle. - E501, - # W503 Line break before binary operator: per flake8, line break - # after is a violation (which black added), but per flake8, W503 - # and W504 mutually contradicts. - # https://www.flake8rules.com/rules/W503.html - # https://www.flake8rules.com/rules/W504.html - W503, - -statistics = True -exclude = - .git, - .eggs, - __pycache__, - .tox/ - venv/ - doc/, - docs/, - build/, - dist/, - archive/ diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 705d3ca6..2737925b 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -18,24 +18,24 @@ jobs: steps: - name: Check out sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Python 3.9 - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.9" cache: 'pip' # cache pip dependencies - cache-dependency-path: setup.cfg + cache-dependency-path: pyproject.toml - - name: Install dependencies + - name: Install tools run: | python -m pip install --upgrade pip - python -m pip install black isort + python -m pip install .[lint] - name: Run "black --check" run: | - python -m black --check . + python -m black --check $(git ls-files "*.py") - name: Run "isort --check" run: | - python -m isort --check --profile black . + python -m isort --check $(git ls-files "*.py") diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index dc9b3381..743b6d86 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -7,9 +7,11 @@ name: Test on: push: - branches: [ "main", "develop" ] + branches: + - main pull_request: - branches: [ "main", "develop" ] + branches: + - main permissions: contents: read @@ -31,69 +33,50 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3 + uses: actions/checkout@v4 # See https://github.com/marketplace/actions/setup-python - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: 'pip' # caching pip dependencies - cache-dependency-path: setup.cfg + cache-dependency-path: pyproject.toml - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install flake8 pep8-naming wheel coveralls - if [ -f requirements.txt ]; then pip install --prefer-binary -r requirements.txt; fi - - # See https://docs.github.com/en/enterprise-cloud@latest/actions/using-github-hosted-runners/customizing-github-hosted-runners - name: Install libgraphviz-dev run: | sudo apt-get update sudo apt-get -y install libgraphviz-dev - - name: Run some basic checks with flake8 - run: | - # Stop the build if there are Python syntax errors or undefined names. - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics - - - name: Check against PEP8 naming conventions + - name: Install dependencies run: | - # This needs pep8-naming, installed above - flake8 . --count --select=N8 --show-source --statistics + python -m pip install --upgrade pip + pip install .[test,pygraphviz,lint] - - name: Run complexity checks with flake8 - run: | - # Note that `--exit-zero` treats all errors as warnings. - # Code that is too complex is not considered an error, for - # now. - flake8 . --count --exit-zero --max-complexity=10 + - name: Lint with ruff + run: ruff check $(git ls-files "*.py") - name: Run tests run: | - # Install package so that tests can be run. - pip install .[test,pygraphviz] # Run tests and collect coverage data. python -m pytest + # Generate LCOV format coverage data for coveralls. + python -m coverage lcov -o coverage.lcov - name: Send coverage data to coveralls.io - run: | - python -m coveralls --service=github - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - COVERALLS_FLAG_NAME: ${{ matrix.python-version }} - COVERALLS_PARALLEL: true + uses: coverallsapp/github-action@v2 + with: + flag-name: run-${{ join(matrix.*, '-') }} + file: coverage.lcov + parallel: true finalize: name: finalize needs: test - runs-on: ubuntu-latest - container: python:3-slim + runs-on: ubuntu-latest + if: ${{ always() }} steps: - name: Indicate completion to coveralls.io - run: | - pip --no-cache-dir install --upgrade coveralls - python -m coveralls --service=github --finish - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + uses: coverallsapp/github-action@v2 + with: + parallel-finished: true diff --git a/.gitignore b/.gitignore index eac45aa3..30a1d2b7 100644 --- a/.gitignore +++ b/.gitignore @@ -142,4 +142,4 @@ tests/data/topologies/amlight.png /coverage.xml # Generated by setuptools_scm -/src/sdx/pce/_version.py +/src/sdx_pce/_version.py diff --git a/.mailmap b/.mailmap new file mode 100644 index 00000000..280ae582 --- /dev/null +++ b/.mailmap @@ -0,0 +1,2 @@ +Yifei Wang <43050198+yifei666@users.noreply.github.com> +Sajith Sasidharan diff --git a/README.md b/README.md index 054fd86a..5d8c9eb8 100644 --- a/README.md +++ b/README.md @@ -19,17 +19,21 @@ PCE's API is still evolving. With that caveat, and omitting some details, the general usage is like this: ```python -from sdx.pce.load_balancing.te_solver import TESolver -from sdx.pce.topology.temanager import TEManager +from sdx_pce.load_balancing.te_solver import TESolver +from sdx_pce.topology.temanager import TEManager -temanager = TEManager(initial_topology, connection_request) +temanager = TEManager(initial_topology) for topology in topologies: temanager.add_topology(topology) graph = temanager.generate_graph_te() -traffic_matrix = temanager.generate_connection_te() +traffic_matrix = temanager.generate_traffic_matrix(connection_request) solution = TESolver(graph, traffic_matrix).solve() + +breakdown = temanager.generate_connection_breakdown(solution) +for domain, link in breakdown.items(): + # publish(domain, link) ``` Note that PCE requires two inputs: network topology and connection @@ -86,28 +90,52 @@ compiler and development libraries and headers of graphviz installed. ### Running tests -To run tests, using [tox] is recommended: +Use [pytest] to run all tests: -```console +``` +$ pip install --editable .[test] +$ pytest +``` + +If you want to print console and logging messages when running a test, +do: + +``` +$ pytest --log-cli-level=info [-s|--capture=no] \ + tests/test_te_manager.py::TEManagerTests::test_generate_solver_input +``` + +Use [tox] to run tests using several versions of Python in isolated +virtual environments: + +``` $ tox ``` -With tox, you can run single tests like so: +With tox, you can run a single test verbosely like so: -```console -$ tox -- [-s] tests/test_te_manager.py::TestTEManager::test_generate_solver_input +``` +$ tox -e py311 -- --log-cli-level=info [-s|--capture=no] \ + tests/test_te_manager.py::TEManagerTests::test_generate_solver_input ``` The test that depend on pygraphviz are skipped by default. If you are able to install pygraphviz in your setup, you can run that test too with: -```console +``` $ tox -e extras ``` Test data is stored in [tests/data](./tests/data) as JSON files. +There are also some code checks (ruff, black, and isort) that you can +run with: + +```console +$ tox -e lint +``` + @@ -124,6 +152,11 @@ Test data is stored in [tests/data](./tests/data) as JSON files. [NetworkX]: https://networkx.org/ [OR-Tools]: https://developers.google.com/optimization/ +[pytest]: https://docs.pytest.org/ [tox]: https://tox.wiki/en/latest/index.html [test_request.json]: ./src/sdx/pce/data/requests/test_request.json + +[ruff]: https://pypi.org/project/ruff/ +[black]: https://pypi.org/project/black/ +[isort]: https://pypi.org/project/isort/ diff --git a/pyproject.toml b/pyproject.toml index 730851d3..a5adf262 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,7 @@ authors = [ { name = "Yifei Wang", email = "ywang13@renci.org" } ] readme = "README.md" -requires-python = ">=3.6" +requires-python = ">=3.8" license = {file = "LICENSE"} classifiers = [ "Programming Language :: Python :: 3", @@ -30,8 +30,7 @@ dependencies = [ "prtpy", "pydot", "dataclasses-json", - "importlib-resources; python_version < '3.9'", - "sdxdatamodel @ git+https://github.com/atlanticwave-sdx/datamodel@2.0.0" + "sdx-datamodel @ git+https://github.com/atlanticwave-sdx/datamodel@2.0.6.rc4", ] [project.urls] @@ -42,16 +41,22 @@ Issues = "https://github.com/atlanticwave-sdx/pce/issues" test = [ "pytest >= 7.1.2", "pytest-cov >= 3.0.0", + "importlib-resources; python_version < '3.9'", ] pygraphviz = [ "pygraphviz" -] +] +lint = [ + "ruff == 0.0.285", + "black == 24.*", + "isort == 5.*", +] [options.packages.find] where = "src" [tool.pytest.ini_options] -addopts = "--cov=sdx.pce --cov-report html --cov-report term-missing" +addopts = "--cov=sdx_pce --cov-report html --cov-report term-missing" testpaths = [ "tests" ] @@ -59,14 +64,11 @@ testpaths = [ [tool.setuptools_scm] # Write version info collected from git to a file. This happens when # we run `python -m build`. -write_to = "src/sdx/pce/_version.py" - -[tool.black] -src_paths = ["src", "tests", "setup.py"] +write_to = "src/sdx_pce/_version.py" [tool.isort] profile = "black" -src_paths = ["src", "tests", "setup.py"] +src_paths = ["src", "tests", "scripts"] [tool.coverage.run] branch = true @@ -85,3 +87,9 @@ source = [ # In tox environments. ".tox/**/site-packages/", ] + +[tool.ruff] +ignore = [ + "E501" # Ignore 'line too long' errors since we auto-format + # using black. +] diff --git a/scripts/results_plot_simulation.py b/scripts/results_plot_simulation.py index 9b25c113..cd2dea3d 100644 --- a/scripts/results_plot_simulation.py +++ b/scripts/results_plot_simulation.py @@ -235,7 +235,7 @@ def plot_heur(title, path): # Driver Code: if __name__ == "__main__": - # fname = '/Users/yxin/NSF/aw-sdx/sdx/pce/tests/results/simulation.57036414_10.out' + # fname = '/Users/yxin/NSF/aw-sdx/sdx_pce/tests/results/simulation.57036414_10.out' # N = 2 # LastNlines(fname, N) path = "../../tests/results/" diff --git a/scripts/simulation.py b/scripts/simulation.py index 80729da5..1c1db5f1 100644 --- a/scripts/simulation.py +++ b/scripts/simulation.py @@ -2,12 +2,9 @@ import numpy as np -from sdx.pce.heuristic.heur import TEGroupSolver, matrix_to_connection, random_graph -from sdx.pce.load_balancing.te_solver import TESolver -from sdx.pce.models import ConnectionSolution -from sdx.pce.utils.constants import Constants -from sdx.pce.utils.random_connection_generator import RandomConnectionGenerator -from sdx.pce.utils.random_topology_generator import RandomTopologyGenerator +from sdx_pce.heuristic.heur import TEGroupSolver, matrix_to_connection, random_graph +from sdx_pce.load_balancing.te_solver import TESolver +from sdx_pce.utils.constants import Constants def dot_file(g_file, tm_file): diff --git a/src/sdx/pce/data/requests/test_request.json b/src/sdx/pce/data/requests/test_request.json deleted file mode 100644 index d747b127..00000000 --- a/src/sdx/pce/data/requests/test_request.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "id": "test-connection-request", - "name": "Test connection request", - "start_time": "2000-01-23T04:56:07.000Z", - "end_time": "2000-01-23T04:56:07.000Z", - "bandwidth_required": 10, - "latency_required": 300, - "egress_port": - { - "id": "urn:sdx:port:amlight.net:A1:1", - "name": "Novi100:1", - "node": "urn:sdx:node:amlight.net:A1", - "status": "up" - }, - "ingress_port": - { - "id": "urn:ogf:network:sdx:port:zaoxi:A1:2", - "name": "Novi100:2", - "node": "urn:ogf:network:sdx:node:zaoxi:A1", - "status": "up" - } -} diff --git a/src/sdx/pce/data/topologies/amlight.json b/src/sdx/pce/data/topologies/amlight.json deleted file mode 100644 index 1d87ef73..00000000 --- a/src/sdx/pce/data/topologies/amlight.json +++ /dev/null @@ -1,321 +0,0 @@ -{ - "id": "urn:ogf:network:sdx:topology:amlight.net", - "name": "AmLight-OXP", - "model_version":"1.0.0", - "time_stamp": "2000-01-23T04:56:07+00:00", - "version": 1, - "links": [ - { - "availability": 56.37376656633328, - "residual_bandwidth": 100000, - "id": "urn:ogf:network:sdx:link:amlight:B1-B2", - "latency": 5, - "name": "amlight:B1-B2", - "packet_loss": 59.621339166831824, - "ports": [ - { - "id": "urn:sdx:port:amlight.net:B1:2", - "name": "Novi01:2", - "node": "urn:sdx:node:amlight.net:B1", - "short_name": "B1:2", - "label_range": [ - "100-200", - "10001" - ], - "status": "up" - }, - { - "id": "urn:sdx:port:amlight.net:B2:2", - "label_range": [ - "100-200", - "10001" - ], - "name": "Novi02:2", - "node": "urn:sdx:node:amlight.net:B2", - "short_name": "B2:2", - "status": "up" - } - ], - "short_name": "Miami-BocaRaton", - "bandwidth": 100000 - }, - { - "availability": 56.37376656633328, - "residual_bandwidth": 100000, - "id": "urn:ogf:network:sdx:link:amlight:A1-B1", - "latency": 5, - "name": "amlight:A1-B1", - "packet_loss": 59.621339166831824, - "ports": [ - { - "id": "urn:sdx:port:amlight.net:A1:1", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi100:1", - "node": "urn:sdx:node:amlight.net:A1", - "short_name": "A1:1", - "status": "up" - }, - { - "id": "urn:sdx:port:amlight.net:B1:3", - "name": "Novi01:3", - "node": "urn:sdx:node:amlight.net:B1", - "short_name": "B1:3", - "label_range": [ - "100-200", - "10001" - ], - "status": "up" - } - ], - "short_name": "redclara-miami", - "bandwidth": 100000 - }, - { - "availability": 56.37376656633328, - "residual_bandwidth": 100000, - "id": "urn:ogf:network:sdx:link:amlight:A1-B2", - "latency": 5, - "name": "amlight:A1-B2", - "packet_loss": 59.621339166831824, - "ports": [ - { - "id": "urn:sdx:port:amlight.net:A1:2", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi100:2", - "node": "urn:sdx:node:amlight.net:A1", - "short_name": "A1:2", - "status": "up" - }, - { - "id": "urn:sdx:port:amlight.net:B2:3", - "label_range": [ - "100-200", - "10001" - ], - "name": "Novi02:3", - "node": "urn:sdx:node:amlight.net:B2", - "short_name": "B2:3", - "status": "up" - } - ], - "short_name": "redclara-BocaRaton", - "bandwidth": 100000 - }, - { - "availability": 56.37376656633328, - "residual_bandwidth": 100000, - "id": "urn:ogf:network:sdx:link:nni:Miami-Sanpaolo", - "latency": 10, - "name": "nni:Miami-Sanpaolo", - "packet_loss": 59.621339166831824, - "nni": "True", - "ports": [ - { - "id": "urn:sdx:port:amlight:B1:1", - "name": "Novi01:1", - "node": "urn:sdx:node:amlight.net:B1", - "short_name": "B1:1", - "label_range": [ - "100-200", - "10001" - ], - "status": "up" - }, - { - "id": "urn:ogf:network:sdx:port:sax:B1:1", - "name": "Novi01:1", - "node": "urn:ogf:network:sdx:port:sax:B1", - "short_name": "B1:1", - "label_range": [ - "100-200", - "10001" - ], - "status": "up" - } - ], - "short_name": "Miami-Sanpaolo", - "bandwidth": 100000 - }, - { - "availability": 56.37376656633328, - "residual_bandwidth": 100000, - "id": "urn:ogf:network:sdx:link:nni:BocaRaton-Fortaleza", - "latency": 10, - "name": "nni:BocaRaton-Fortaleza", - "packet_loss": 59.621339166831824, - "nni": "True", - "ports": [ - { - "id": "urn:sdx:port:amlight.net:B2:1", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi02:1", - "node": "urn:sdx:node:amlight.net:B2", - "short_name": "B2:1", - "status": "up" - }, - { - "id": "urn:ogf:network:sdx:port:sax:B2:1", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi02:1", - "node": "urn:ogf:network:sdx:node:sax:B2", - "short_name": "B2:1", - "status": "up" - } - ], - "short_name": "BocaRaton-Fortaleza", - "bandwidth": 100000 - } - ], - "nodes": [ - { - "id": "urn:sdx:node:amlight.net:B1", - "location": { - "address": "Miami", - "latitude": 25.75633040531146, - "longitude": -80.37676058477908, - "ISO3166-2-lvl4": "US-MIA" - }, - "name": "amlight:Novi01", - "ports": [ - { - "id": "urn:sdx:port:amlight:B1:1", - "name": "Novi01:1", - "node": "urn:sdx:node:amlight.net:B1", - "short_name": "B1:1", - "label_range": [ - "100-200", - "10001" - ], - "status": "up", - "state": "enabled" - }, - { - "id": "urn:sdx:port:amlight.net:B1:2", - "name": "Novi01:2", - "node": "urn:sdx:node:amlight.net:B1", - "short_name": "B1:2", - "label_range": [ - "100-200", - "10001" - ], - "status": "up", - "state": "enabled" - }, - { - "id": "urn:sdx:port:amlight.net:B1:3", - "name": "Novi01:3", - "node": "urn:sdx:node:amlight.net:B1", - "short_name": "B1:3", - "label_range": [ - "100-200", - "10001" - ], - "status": "up", - "state": "enabled" - } - ], - "short_name": "B1" - }, - { - "id": "urn:sdx:node:amlight.net:B2", - "location": { - "address": "BocaRaton", - "latitude": 26.381437356374075, - "longitude": -80.10225977485742, - "ISO3166-2-lvl4": "US-BC" - }, - "name": "amlight:Novi02", - "ports": [ - { - "id": "urn:sdx:port:amlight.net:B2:1", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi02:1", - "node": "urn:sdx:node:amlight.net:B2", - "short_name": "B2:1", - "status": "up", - "state": "enabled" - }, - { - "id": "urn:sdx:port:amlight.net:B2:2", - "label_range": [ - "100-200", - "10001" - ], - "name": "Novi02:2", - "node": "urn:sdx:node:amlight.net:B2", - "short_name": "B2:2", - "status": "up", - "state": "enabled" - }, - { - "id": "urn:sdx:port:amlight.net:B2:3", - "label_range": [ - "100-200", - "10001" - ], - "name": "Novi02:3", - "node": "urn:sdx:node:amlight.net:B2", - "short_name": "B2:3", - "status": "up", - "state": "enabled" - } - ], - "short_name": "B2" - }, - { - "id": "urn:sdx:node:amlight.net:A1", - "location": { - "address": "redclara", - "latitude": 30.34943181039702, - "longitude": -81.66666016473143, - "ISO3166-2-lvl4": "US-RC" - }, - "name": "amlight:Novi100", - "ports": [ - { - "id": "urn:sdx:port:amlight.net:A1:1", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi100:1", - "node": "urn:sdx:node:amlight.net:A1", - "short_name": "A1:1", - "status": "up", - "state": "enabled" - }, - { - "id": "urn:sdx:port:amlight.net:A1:2", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi100:2", - "node": "urn:sdx:node:amlight.net:A1", - "short_name": "A1:2", - "status": "up", - "state": "enabled" - } - ], - "short_name": "A1" - } - ], - "domain_service": { - "owner":"FIU" - } - } \ No newline at end of file diff --git a/src/sdx/pce/data/topologies/sax.json b/src/sdx/pce/data/topologies/sax.json deleted file mode 100644 index a4f6a7b2..00000000 --- a/src/sdx/pce/data/topologies/sax.json +++ /dev/null @@ -1,498 +0,0 @@ -{ - "id": "urn:ogf:network:sdx:topology:sax.net", - "name": "SAX-OXP", - "time_stamp": "2000-01-23T04:56:07+00:00", - "version": 1, - "model_version":"1.0.0", - "links": [ - { - "availability": 56.37376656633328, - "residual_bandwidth": 100000, - "id": "urn:ogf:network:sdx:link:sax:B1-B2", - "latency": 5, - "name": "sax:B1-B2", - "packet_loss": 59.621339166831824, - "ports": [ - { - "id": "urn:ogf:network:sdx:port:sax:B1:2", - "name": "Novi01:2", - "node": "urn:ogf:network:sdx:node:sax:B1", - "short_name": "B1:2", - "label_range": [ - "100-200", - "10001" - ], - "status": "up" - }, - { - "id": "urn:ogf:network:sdx:port:sax:B2:2", - "label_range": [ - "100-200", - "10001" - ], - "name": "Novi02:2", - "node": "urn:ogf:network:sdx:node:sax:B2", - "short_name": "B2:2", - "status": "up" - } - ], - "short_name": "SaoPaulo-Fortaleza", - "bandwidth": 100000 - }, - { - "availability": 56.37376656633328, - "residual_bandwidth": 100000, - "id": "urn:ogf:network:sdx:link:sax:Panama-Fortaleza", - "latency": 5, - "name": "sax:Panama-Fortaleza", - "packet_loss": 59.621339166831824, - "ports": [ - { - "id": "urn:ogf:network:sdx:port:sax:B3:2", - "label_range": [ - "100-200", - "10001" - ], - "name": "Novi02:3", - "node": "urn:ogf:network:sdx:node:sax:B3", - "short_name": "B3:2", - "status": "up" - }, - { - "id": "urn:ogf:network:sdx:port:sax:B2:4", - "label_range": [ - "100-200", - "10001" - ], - "name": "Novi02:4", - "node": "urn:ogf:network:sdx:node:sax:B2", - "short_name": "B2:3", - "status": "up" - } - ], - "short_name": "Panama-Fortaleza", - "bandwidth": 100000 - }, - { - "availability": 56.37376656633328, - "residual_bandwidth": 100000, - "id": "urn:ogf:network:sdx:link:sax:SanPaolo-Fortaleza", - "latency": 5, - "name": "nni:SanPaolo-Fortaleza", - "packet_loss": 59.621339166831824, - "ports": [ - { - "id": "urn:ogf:network:sdx:port:sax:B3:3", - "label_range": [ - "100-200", - "10001" - ], - "name": "Novi03:3", - "node": "urn:ogf:network:sdx:node:sax:B3", - "short_name": "B3:3", - "status": "up" - }, - { - "id": "urn:ogf:network:sdx:port:sax:B1:4", - "name": "Novi01:3", - "node": "urn:ogf:network:sdx:node:sax:B1", - "short_name": "B1:4", - "label_range": [ - "100-200", - "10001" - ], - "status": "up" - } - ], - "short_name": "BocaRaton-Fortaleza", - "bandwidth": 100000 - }, - { - "availability": 56.37376656633328, - "residual_bandwidth": 100000, - "id": "urn:ogf:network:sdx:link:sax:A1-B1", - "latency": 5, - "name": "sax:A1-B1", - "packet_loss": 59.621339166831824, - "ports": [ - { - "id": "urn:ogf:network:sdx:port:sax:A1:1", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi100:1", - "node": "urn:ogf:network:sdx:node:sax:A1", - "short_name": "A1:1", - "status": "up" - }, - { - "id": "urn:ogf:network:sdx:port:sax:B1:3", - "name": "Novi01:3", - "node": "urn:ogf:network:sdx:node:sax:B1", - "short_name": "B1:3", - "label_range": [ - "100-200", - "10001" - ], - "status": "up" - } - ], - "short_name": "redclara-SaoPaulo", - "bandwidth": 100000 - }, - { - "availability": 56.37376656633328, - "residual_bandwidth": 100000, - "id": "urn:ogf:network:sdx:link:sax:A1-B2", - "latency": 5, - "name": "sax:A1-B2", - "packet_loss": 59.621339166831824, - "ports": [ - { - "id": "urn:ogf:network:sdx:port:sax:A1:2", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi100:2", - "node": "urn:ogf:network:sdx:node:sax:A1", - "short_name": "A1:2", - "status": "up" - }, - { - "id": "urn:ogf:network:sdx:port:sax:B2:3", - "label_range": [ - "100-200", - "10001" - ], - "name": "Novi02:3", - "node": "urn:ogf:network:sdx:node:sax:B2", - "short_name": "B2:3", - "status": "up" - } - ], - "short_name": "redclara-Fortaleza", - "bandwidth": 100000 - }, - { - "availability": 56.37376656633328, - "residual_bandwidth": 100000, - "id": "urn:ogf:network:sdx:link:nni:Miami-Sanpaolo", - "nni": "True", - "latency": 10, - "name": "nni:Miami-Sanpaolo", - "packet_loss": 59.621339166831824, - "ports": [ - { - "id": "urn:sdx:port:amlight:B1:1", - "name": "Novi01:1", - "node": "urn:sdx:node:amlight.net:B1", - "short_name": "B1:1", - "label_range": [ - "100-200", - "10001" - ], - "status": "up" - }, - { - "id": "urn:ogf:network:sdx:port:sax:B1:1", - "name": "Novi01:1", - "node": "urn:ogf:network:sdx:node:sax:B1", - "short_name": "B1:1", - "label_range": [ - "100-200", - "10001" - ], - "status": "up" - } - ], - "short_name": "Miami-Sanpaolo", - "bandwidth": 100000 - }, - { - "availability": 56.37376656633328, - "residual_bandwidth": 100000, - "id": "urn:ogf:network:sdx:link:nni:BocaRaton-Fortaleza", - "latency": 10, - "name": "nni:BocaRaton-Fortaleza", - "packet_loss": 59.621339166831824, - "nni": "True", - "ports": [ - { - "id": "urn:sdx:port:amlight.net:B2:1", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi02:1", - "node": "urn:sdx:node:amlight.net:B2", - "short_name": "B2:1", - "status": "up" - }, - { - "id": "urn:ogf:network:sdx:port:sax:B2:1", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi02:1", - "node": "urn:ogf:network:sdx:node:sax:B2", - "short_name": "B2:1", - "status": "up" - } - ], - "short_name": "BocaRaton-Fortaleza", - "bandwidth": 100000 - }, - { - "availability": 56.37376656633328, - "residual_bandwidth": 100000, - "id": "urn:ogf:network:sdx:link:nni:Fortaleza-Sangano", - "latency": 5, - "name": "nni:Fortaleza-Sangano", - "packet_loss": 59.621339166831824, - "nni": "True", - "ports": [ - { - "id": "urn:ogf:network:sdx:port:sax:B3:1", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi02:3", - "node": "urn:ogf:network:sdx:node:sax:B3", - "short_name": "B3:1", - "status": "up" - }, - { - "id": "urn:ogf:network:sdx:port:zaoxi:B1:1", - "name": "Novi01:1", - "node": "urn:ogf:network:sdx:node:zaoxi:B1", - "short_name": "B1:1", - "label_range": [ - "100-200", - "10001" - ], - "status": "up" - } - ], - "short_name": "Fortaleza-Sangano", - "bandwidth": 100000 - } - ], - "nodes": [ - { - "id": "urn:ogf:network:sdx:node:sax:B1", - "location": { - "address": "SaoPaulo", - "latitude": -23.5311561958366, - "longitude": -46.650271781410524, - "ISO3166-2-lvl4": "BR-SP" - }, - "name": "sax:Novi01", - "ports": [ - { - "id": "urn:ogf:network:sdx:port:sax:B1:1", - "name": "Novi01:1", - "node": "urn:ogf:network:sdx:port:sax:B1", - "short_name": "B1:1", - "label_range": [ - "100-200", - "10001" - ], - "status": "up", - "state": "enabled" - }, - { - "id": "urn:ogf:network:sdx:port:sax:B1:2", - "name": "Novi01:2", - "node": "urn:ogf:network:sdx:port:sax:B1", - "short_name": "B1:2", - "label_range": [ - "100-200", - "10001" - ], - "status": "up", - "state": "enabled" - }, - { - "id": "urn:ogf:network:sdx:port:sax:B1:3", - "name": "Novi01:3", - "node": "urn:ogf:network:sdx:port:sax:B1", - "short_name": "B1:3", - "label_range": [ - "100-200", - "10001" - ], - "status": "up", - "state": "enabled" - }, - { - "id": "urn:ogf:network:sdx:port:sax:B1:4", - "name": "Novi01:3", - "node": "urn:ogf:network:sdx:port:sax:B1", - "short_name": "B1:4", - "label_range": [ - "100-200", - "10001" - ], - "status": "up", - "state": "enabled" - } - ], - "short_name": "B1" - }, - { - "id": "urn:ogf:network:sdx:node:sax:B2", - "location": { - "address": "PanamaCity", - "latitude": 8.993040465928525, - "longitude": -79.4947050137491, - "ISO3166-2-lvl4": "US-PN" - }, - "name": "sax:Novi02", - "ports": [ - { - "id": "urn:ogf:network:sdx:port:sax:B2:1", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi02:1", - "node": "urn:ogf:network:sdx:node:sax:B2", - "short_name": "B2:1", - "status": "up", - "state": "enabled" - }, - { - "id": "urn:ogf:network:sdx:port:sax:B2:2", - "label_range": [ - "100-200", - "10001" - ], - "name": "Novi02:2", - "node": "urn:ogf:network:sdx:node:sax:B2", - "short_name": "B2:2", - "status": "up", - "state": "enabled" - }, - { - "id": "urn:ogf:network:sdx:port:sax:B2:3", - "label_range": [ - "100-200", - "10001" - ], - "name": "Novi02:3", - "node": "urn:ogf:network:sdx:node:sax:B2", - "short_name": "B2:3", - "status": "up", - "state": "enabled" - }, - { - "id": "urn:ogf:network:sdx:port:sax:B2:4", - "label_range": [ - "100-200", - "10001" - ], - "name": "Novi02:4", - "node": "urn:ogf:network:sdx:node:sax:B2", - "short_name": "B2:3", - "status": "up", - "state": "enabled" - } - ], - "short_name": "B2" - }, - { - "id": "urn:ogf:network:sdx:node:sax:B3", - - "location": { - "address": "Fortaleza", - "latitude": -3.73163824920348, - "longitude": -38.52443289673026, - "ISO3166-2-lvl4": "BR-FR" - }, - "name": "sax:Novi03", - "ports": [ - { - "id": "urn:ogf:network:sdx:port:sax:B3:1", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi02:3", - "node": "urn:ogf:network:sdx:node:sax:B3", - "short_name": "B3:1", - "status": "up", - "state": "enabled" - }, - { - "id": "urn:ogf:network:sdx:port:sax:B3:2", - "label_range": [ - "100-200", - "10001" - ], - "name": "Novi02:3", - "node": "urn:ogf:network:sdx:node:sax:B3", - "short_name": "B3:2", - "status": "up", - "state": "enabled" - }, - { - "id": "urn:ogf:network:sdx:port:sax:B3:3", - "label_range": [ - "100-200", - "10001" - ], - "name": "Novi03:3", - "node": "urn:ogf:network:sdx:node:sax:B3", - "short_name": "B3:3", - "status": "up", - "state": "enabled" - } - ], - "short_name": "B3" - }, - { - "id": "urn:ogf:network:sdx:node:sax:A1", - "location": { - "address": "Santiago", - "latitude": -33.4507049233331, - "longitude": -70.64634765264213, - "ISO3166-2-lvl4": "CL-SN" - }, - "name": "sax:Novi100", - "ports": [ - { - "id": "urn:ogf:network:sdx:port:sax:A1:1", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi100:1", - "node": "urn:ogf:network:sdx:node:sax:A1", - "short_name": "A1:1", - "status": "up", - "state": "enabled" - }, - { - "id": "urn:ogf:network:sdx:port:sax:A1:2", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi100:2", - "node": "urn:ogf:network:sdx:node:sax:A1", - "short_name": "A1:2", - "status": "up", - "state": "enabled" - } - ], - "short_name": "A1" - } - ], - "domain_service": { - "owner":"RNP" - } -} diff --git a/src/sdx/pce/data/topologies/zaoxi.json b/src/sdx/pce/data/topologies/zaoxi.json deleted file mode 100644 index 2488d380..00000000 --- a/src/sdx/pce/data/topologies/zaoxi.json +++ /dev/null @@ -1,278 +0,0 @@ -{ - "id": "urn:ogf:network:sdx:topology:zaoxi.net", - "name": "ZAOXI-OXP", - "time_stamp": "2000-01-23T04:56:07+00:00", - "version": 1, - "model_version":"1.0.0", - "links": [ - { - "availability": 56.37376656633328, - "residual_bandwidth": 100000, - "id": "urn:ogf:network:sdx:link:zaoxi:B1-B2", - "latency": 5, - "name": "zaoxi:B1-B2", - "packet_loss": 59.621339166831824, - "ports": [ - { - "id": "urn:ogf:network:sdx:port:zaoxi:B1:2", - "name": "Novi01:2", - "node": "urn:ogf:network:sdx:node:zaoxi:B1", - "status": "up" - }, - { - "id": "urn:ogf:network:sdx:port:zaoxi:B2:2", - "label_range": [ - "100-200", - "10001" - ], - "name": "Novi02:2", - "node": "urn:ogf:network:sdx:node:zaoxi:B2", - "short_name": "B2:2", - "status": "up" - } - ], - "short_name": "Sangano-Capetown", - "bandwidth": 100000 - }, - { - "availability": 56.37376656633328, - "residual_bandwidth": 100000, - "id": "urn:ogf:network:sdx:link:zaoxi:A1-B1", - "latency": 5, - "name": "zaoxi:A1-B1", - "packet_loss": 59.621339166831824, - "ports": [ - { - "id": "urn:ogf:network:sdx:port:zaoxi:A1:1", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi100:1", - "node": "urn:ogf:network:sdx:node:zaoxi:A1", - "short_name": "A1:1", - "status": "up" - }, - { - "id": "urn:ogf:network:sdx:port:zaoxi:B1:3", - "name": "Novi01:3", - "node": "urn:ogf:network:sdx:node:zaoxi:B1", - "short_name": "B1:3", - "label_range": [ - "100-200", - "10001" - ], - "status": "up" - } - ], - "short_name": "Karoo-Sangano", - "bandwidth": 100000 - }, - { - "availability": 56.37376656633328, - "residual_bandwidth": 100000, - "id": "urn:ogf:network:sdx:link:zaoxi:A1-B2", - "latency": 5, - "name": "zaoxi:A1-B2", - "packet_loss": 59.621339166831824, - "ports": [ - { - "id": "urn:ogf:network:sdx:port:zaoxi:A1:2", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi100:2", - "node": "urn:ogf:network:sdx:node:zaoxi:A1", - "short_name": "A1:2", - "status": "up" - }, - { - "id": "urn:ogf:network:sdx:port:zaoxi:B2:3", - "label_range": [ - "100-200", - "10001" - ], - "name": "Novi02:3", - "node": "urn:ogf:network:sdx:node:zaoxi:B2", - "short_name": "B2:3", - "status": "up" - } - ], - "short_name": "Karoo-Capetown", - "bandwidth": 100000 - }, - { - "availability": 56.37376656633328, - "residual_bandwidth": 100000, - "id": "urn:ogf:network:sdx:link:nni:Fortaleza-Sangano", - "latency": 10, - "name": "nni:Fortaleza-Sangano", - "packet_loss": 59.621339166831824, - "nni": "True", - "ports": [ - { - "id": "urn:ogf:network:sdx:port:sax:B3:1", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi02:3", - "node": "urn:ogf:network:sdx:node:sax:B3", - "short_name": "B3:1", - "status": "up" - }, - { - "id": "urn:ogf:network:sdx:port:zaoxi:B1:1", - "name": "Novi01:1", - "node": "urn:ogf:network:sdx:node:zaoxi:B1", - "short_name": "B1:1", - "label_range": [ - "100-200", - "10001" - ], - "status": "up" - } - ], - "short_name": "Fortaleza-Sangano", - "bandwidth": 100000 - } - ], - "nodes": [ - { - "id": "urn:ogf:network:sdx:node:zaoxi:B1", - "location": { - "address": "Sangano", - "latitude": -9.533459658700743, - "longitude": 13.216709879405311, - "ISO3166-2-lvl4": "IT-SN" - }, - "name": "zaoxi:Novi01", - "ports": [ - { - "id": "urn:ogf:network:sdx:port:zaoxi:B1:1", - "name": "Novi01:1", - "node": "urn:ogf:network:sdx:node:zaoxi:B1", - "short_name": "B1:1", - "label_range": [ - "100-200", - "10001" - ], - "status": "up", - "state": "enabled" - }, - { - "id": "urn:ogf:network:sdx:port:zaoxi:B1:2", - "name": "Novi01:2", - "node": "urn:ogf:network:sdx:node:zaoxi:B1", - "short_name": "B1:2", - "label_range": [ - "100-200", - "10001" - ], - "status": "up", - "state": "enabled" - }, - { - "id": "urn:ogf:network:sdx:port:zaoxi:B1:3", - "name": "Novi01:3", - "node": "urn:ogf:network:sdx:node:zaoxi:B1", - "short_name": "B1:3", - "label_range": [ - "100-200", - "10001" - ], - "status": "up", - "state": "enabled" - } - ], - "short_name": "B1" - }, - { - "id": "urn:ogf:network:sdx:node:zaoxi:B2", - "location": { - "address": "CapeTown", - "latitude": -3.73163824920348, - "longitude": -38.52443289673026, - "ISO3166-2-lvl4": "ZA-WC" - }, - "name": "zaoxi:Novi02", - "ports": [ - { - "id": "urn:ogf:network:sdx:port:zaoxi:B2:1", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi02:1", - "node": "urn:ogf:network:sdx:node:zaoxi:B2", - "short_name": "B2:1", - "status": "up", - "state": "enabled" - }, - { - "id": "urn:ogf:network:sdx:port:zaoxi:B2:2", - "label_range": [ - "100-200", - "10001" - ], - "name": "Novi02:2", - "node": "urn:ogf:network:sdx:node:zaoxi:B2", - "short_name": "B2:2", - "status": "up", - "state": "enabled" - }, - { - "id": "urn:ogf:network:sdx:port:zaoxi:B2:3", - "label_range": [ - "100-200", - "10001" - ], - "name": "Novi02:3", - "node": "urn:ogf:network:sdx:node:zaoxi:B2", - "short_name": "B2:3", - "status": "up", - "state": "enabled" - } - ], - "short_name": "B2" - }, - { - "id": "urn:ogf:network:sdx:node:zaoxi:A1", - "location": { - "address": "Karoo", - "latitude": -32.3632301851245, - "longitude": 22.541224555821298, - "ISO3166-2-lvl4": "ZA-KR" - }, - "name": "zaoxi:Novi100", - "ports": [ - { - "id": "urn:ogf:network:sdx:port:zaoxi:A1:1", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi100:1", - "node": "urn:ogf:network:sdx:node:zaoxi:A1", - "short_name": "A1:1", - "status": "up", - "state": "enabled" - }, - { - "id": "urn:ogf:network:sdx:port:zaoxi:A1:2", - "label_range": [ - "100-200", - "1000" - ], - "name": "Novi100:2", - "node": "urn:ogf:network:sdx:node:zaoxi:A1", - "short_name": "A1:2", - "status": "up", - "state": "enabled" - } - ], - "short_name": "A1" - } - ] -} diff --git a/src/sdx/pce/topology/temanager.py b/src/sdx/pce/topology/temanager.py deleted file mode 100644 index 41803ff1..00000000 --- a/src/sdx/pce/topology/temanager.py +++ /dev/null @@ -1,682 +0,0 @@ -import threading -from itertools import chain -from typing import List, Optional - -import networkx as nx -from networkx.algorithms import approximation as approx - -from sdx.datamodel.parsing.connectionhandler import ConnectionHandler -from sdx.pce.models import ( - ConnectionPath, - ConnectionRequest, - ConnectionSolution, - TrafficMatrix, - VlanTag, - VlanTaggedBreakdown, - VlanTaggedBreakdowns, - VlanTaggedPort, -) -from sdx.pce.topology.manager import TopologyManager - - -class TEManager: - - """ - TE Manager for connection - topology operations. - - Functions of this class are: - - - generate inputs to the PCE solver - - - converter the solver output. - - - VLAN reservation and unreservation. - """ - - def __init__(self, topology_data, connection_data): - super().__init__() - - self.topology_manager = TopologyManager() - self.connection_handler = ConnectionHandler() - - # A lock to safely perform topology operations. - self._topology_lock = threading.Lock() - - # A {domain, {port, {vlan, in_use}}} mapping. - self._vlan_tags_table = {} - - # Making topology_data optional while investigating - # https://github.com/atlanticwave-sdx/sdx-controller/issues/145. - # - # TODO: a nicer thing to do would be to keep less state around. - # https://github.com/atlanticwave-sdx/pce/issues/122 - if topology_data: - self.topology_manager.add_topology(topology_data) - self.graph = self.generate_graph_te() - self._update_vlan_tags_table( - domain_name=topology_data.get("id"), - port_list=self.topology_manager.port_list, - ) - else: - self.graph = None - - print(f"TEManager: connection_data: {connection_data}") - - self.connection = self.connection_handler.import_connection_data( - connection_data - ) - - print(f"TEManager: self.connection: {self.connection}") - - def add_topology(self, topology_data: dict): - """ - Add a new topology to TEManager. - - :param topology_data: a dictionary that represents a topology. - """ - self.topology_manager.add_topology(topology_data) - - # Ports appear in two places in the combined topology - # maintained by TopologyManager: attached to each of the - # nodes, and attached to links. Here we are using the ports - # attached to links. - self._update_vlan_tags_table( - domain_name=topology_data.get("id"), - port_list=self.topology_manager.port_list, - ) - - def update_topology(self, topology_data: dict): - """ - Update an existing topology in TEManager. - - :param topology_data: a dictionary that represents a topology. - """ - self.topology_manager.update_topology(topology_data) - - # TODO: careful here when updating VLAN tags table -- what do - # we do when an in use VLAN tag becomes invalid in the update? - # See https://github.com/atlanticwave-sdx/pce/issues/123 - # - # self._update_vlan_tags_table_from_links( - # domain_name=topology_data.get("id"), - # port_list=self.topology_manager.port_list, - # ) - - def _update_vlan_tags_table(self, domain_name, port_list): - """ - Update VLAN tags table. - """ - self._vlan_tags_table[domain_name] = {} - - for port_id, link in port_list.items(): - # TODO: port here seems to be a dict, not sdx.datamodel.models.Port - for port in link.ports: - # TODO: sometimes port_id and "inner" port_id below - # can be different. Why? For example, port_id of - # urn:sdx:port:amlight.net:B1:2 and port_id_inner of - # urn:sdx:port:amlight.net:B2:2. - # - # See https://github.com/atlanticwave-sdx/pce/issues/124 - # - # port_id_inner = port.get("id") - # print(f"port_id: {port_id}, port_id_inner: {port_id_inner}") - # assert port_id == port_id_inner - - label_range = port.get("label_range") - - # TODO: why is label_range sometimes None, and what to - # do when that happens? - if label_range is None: - continue - - # assert label_range is not None, "label_range is None" - - # label_range is of the form ['100-200', '1000']; let - # us expand it. Would have been ideal if this was - # already in some parsed form, but it is not, so this - # is a work-around. - all_labels = self._expand_label_range(label_range) - - # Make a map lik: `{tag1: True, tag2: True, tag3: True...}` - labels_available = {label: True for label in all_labels} - - self._vlan_tags_table[domain_name][port_id] = labels_available - - def _expand_label_range(self, label_range: List[str]) -> List[int]: - """ - Expand the label range to a list of numbers. - """ - labels = [self._expand_label(label) for label in label_range] - # flatten result and return it. - return list(chain.from_iterable(labels)) - - def _expand_label(self, label: str) -> List[int]: - """ - Expand items in label range to a list of numbers. - - Items in label ranges can be of the form "100-200" or "100". - For the first case, we return [100,101,...200]; for the second - case, we return [100]. - """ - if not isinstance(label, str): - raise ValueError("Label must be a string.") - - parts = label.split("-") - start = int(parts[0]) - stop = int(parts[-1]) + 1 - - return list(range(start, stop)) - - def generate_connection_te(self) -> TrafficMatrix: - """ - Generate a Traffic Matrix from the connection request we have. - """ - ingress_port = self.connection.ingress_port - egress_port = self.connection.egress_port - - print( - f"generate_connection_te(), ports: " - f"ingress_port.id: {ingress_port.id}, " - f"egress_port.id: {egress_port.id}" - ) - - ingress_node = self.topology_manager.topology.get_node_by_port(ingress_port.id) - egress_node = self.topology_manager.topology.get_node_by_port(egress_port.id) - - if ingress_node is None: - print(f"No ingress node was found for ingress port ID '{ingress_port.id}'") - return None - - if egress_node is None: - print(f"No egress node is found for egress port ID '{egress_port.id}'") - return None - - ingress_nodes = [ - x for x, y in self.graph.nodes(data=True) if y["id"] == ingress_node.id - ] - - egress_nodes = [ - x for x, y in self.graph.nodes(data=True) if y["id"] == egress_node.id - ] - - if len(ingress_nodes) <= 0: - print(f"No ingress node '{ingress_node.id}' found in the graph") - return None - - if len(egress_nodes) <= 0: - print(f"No egress node '{egress_node.id}' found in the graph") - return None - - required_bandwidth = self.connection.bandwidth or 0 - required_latency = self.connection.latency or 0 - - print( - f"Setting required_latency: {required_latency}, " - f"required_bandwidth: {required_bandwidth}" - ) - - request = ConnectionRequest( - source=ingress_nodes[0], - destination=egress_nodes[0], - required_bandwidth=required_bandwidth, - required_latency=required_latency, - ) - - return TrafficMatrix(connection_requests=[request]) - - def generate_graph_te(self) -> nx.Graph: - """ - Return the topology graph that we have. - """ - graph = self.topology_manager.generate_graph() - graph = nx.convert_node_labels_to_integers(graph, label_attribute="id") - - # TODO: why is this needed? - self.graph = graph - # print(list(graph.nodes(data=True))) - - return graph - - def graph_node_connectivity(self, source=None, dest=None): - """ - Check that a source and destination node have connectivity. - """ - # TODO: is this method really needed? - return approx.node_connectivity(self.graph, source, dest) - - def requests_connectivity(self, tm: TrafficMatrix) -> bool: - """ - Check that connectivity is possible. - """ - # TODO: consider using filter() and reduce(), maybe? - # TODO: write some tests for this method. - for request in tm.connection_requests: - conn = self.graph_node_connectivity(request.source, request.destination) - print( - f"Request connectivity: source {request.source}, destination: {request.destination} = {conn}" - ) - if conn is False: - return False - - return True - - def generate_connection_breakdown(self, connection) -> dict: - """ - A "router" method for backward compatibility. - """ - if isinstance(connection, ConnectionSolution): - return self._generate_connection_breakdown_tm(connection) - return self._generate_connection_breakdown_old(connection) - - def _generate_connection_breakdown_tm(self, connection: ConnectionSolution) -> dict: - """ - Take a connection and generate a breakdown. - - This is an alternative to generate_connection_breakdown() - below which uses the newly defined types from sdx.pce.models. - """ - if connection is None or connection.connection_map is None: - print(f"Can't find a breakdown for {connection}") - return None - - breakdown = {} - paths = connection.connection_map # p2p for now - - # i_port = None - # e_port = None - - for domain, links in paths.items(): - print(f"domain: {domain}, links: {links}") - - current_link_set = [] - - for count, link in enumerate(links): - print(f"count: {count}, link: {link}") - - assert isinstance(link, ConnectionPath) - - src_node = self.graph.nodes.get(link.source) - assert src_node is not None - - dst_node = self.graph.nodes.get(link.destination) - assert dst_node is not None - - print(f"source node: {src_node}, destination node: {dst_node}") - - src_domain = self.topology_manager.get_domain_name(src_node.get("id")) - dst_domain = self.topology_manager.get_domain_name(dst_node.get("id")) - - # TODO: what do we do when a domain can't be - # determined? Can a domain be `None`? - print(f"source domain: {src_domain}, destination domain: {dst_domain}") - - current_link_set.append(link) - current_domain = src_domain - if src_domain == dst_domain: - # current_domain = domain_1 - if count == len(links) - 1: - breakdown[current_domain] = current_link_set.copy() - else: - breakdown[current_domain] = current_link_set.copy() - current_domain = None - current_link_set = [] - - print(f"[intermediate] breakdown: {breakdown}") - - # now starting with the ingress_port - first = True - i = 0 - domain_breakdown = {} - - for domain, links in breakdown.items(): - print(f"Creating domain_breakdown: domain: {domain}, links: {links}") - segment = {} - if first: - first = False - last_link = links[-1] - n1 = self.graph.nodes[last_link.source]["id"] - n2 = self.graph.nodes[last_link.destination]["id"] - n1, p1, n2, p2 = self.topology_manager.topology.get_port_by_link(n1, n2) - i_port = self.connection.ingress_port.to_dict() - e_port = p1 - next_i = p2 - elif i == len(breakdown) - 1: - i_port = next_i - e_port = self.connection.egress_port.to_dict() - else: - last_link = links[-1] - n1 = self.graph.nodes[last_link.source]["id"] - n2 = self.graph.nodes[last_link.destination]["id"] - n1, p1, n2, p2 = self.topology_manager.topology.get_port_by_link(n1, n2) - i_port = next_i - e_port = p1 - next_i = p2 - segment["ingress_port"] = i_port - segment["egress_port"] = e_port - domain_breakdown[domain] = segment.copy() - i = i + 1 - - print(f"generate_connection_breakdown(): domain_breakdown: {domain_breakdown}") - - tagged_breakdown = self._reserve_vlan_breakdown(domain_breakdown) - print(f"generate_connection_breakdown(): tagged_breakdown: {tagged_breakdown}") - - # Make tests pass, temporarily. - if tagged_breakdown is None: - return None - - assert isinstance(tagged_breakdown, VlanTaggedBreakdowns) - - # Return a dict containing VLAN-tagged breakdown in the - # expected format. - return tagged_breakdown.to_dict().get("breakdowns") - - def _generate_connection_breakdown_old(self, connection): - """ - Take a connection and generate a breakdown. - - TODO: remove this when convenient. - https://github.com/atlanticwave-sdx/pce/issues/125 - """ - assert connection is not None - - breakdown = {} - paths = connection[0] # p2p for now - # cost = connection[1] - i_port = None - e_port = None - - print(f"Domain breakdown with graph: {self.graph}") - print(f"Graph nodes: {self.graph.nodes}") - print(f"Graph edges: {self.graph.edges}") - - print(f"Paths: {paths}") - - for i, j in paths.items(): - print(f"i: {i}, j: {j}") - current_link_set = [] - for count, link in enumerate(j): - print(f"count: {count}, link: {link}") - assert len(link) == 2 - - node_1 = self.graph.nodes.get(link[0]) - assert node_1 is not None - - node_2 = self.graph.nodes.get(link[1]) - assert node_2 is not None - - print(f"node_1: {node_1}, node_2: {node_2}") - - domain_1 = self.topology_manager.get_domain_name(node_1["id"]) - domain_2 = self.topology_manager.get_domain_name(node_2["id"]) - - # # TODO: handle the cases where a domain was not found. - # if domain_1 is None: - # domain_1 = f"domain_{i}" - # if domain_2 is None: - # domain_2 = f"domain_{i}" - - print(f"domain_1: {domain_1}, domain_2: {domain_2}") - - current_link_set.append(link) - current_domain = domain_1 - if domain_1 == domain_2: - # current_domain = domain_1 - if count == len(j) - 1: - breakdown[current_domain] = current_link_set.copy() - else: - breakdown[current_domain] = current_link_set.copy() - current_domain = None - current_link_set = [] - - print(f"[intermediate] breakdown: {breakdown}") - - # now starting with the ingress_port - first = True - i = 0 - domain_breakdown = {} - - for domain, links in breakdown.items(): - print(f"Creating domain_breakdown: domain: {domain}, links: {links}") - segment = {} - if first: - first = False - last_link = links[-1] - n1 = self.graph.nodes[last_link[0]]["id"] - n2 = self.graph.nodes[last_link[1]]["id"] - n1, p1, n2, p2 = self.topology_manager.topology.get_port_by_link(n1, n2) - i_port = self.connection.ingress_port.to_dict() - e_port = p1 - next_i = p2 - elif i == len(breakdown) - 1: - i_port = next_i - e_port = self.connection.egress_port.to_dict() - else: - last_link = links[-1] - n1 = self.graph.nodes[last_link[0]]["id"] - n2 = self.graph.nodes[last_link[1]]["id"] - n1, p1, n2, p2 = self.topology_manager.topology.get_port_by_link(n1, n2) - i_port = next_i - e_port = p1 - next_i = p2 - segment["ingress_port"] = i_port - segment["egress_port"] = e_port - domain_breakdown[domain] = segment.copy() - i = i + 1 - - print(f"generate_connection_breakdown(): domain_breakdown: {domain_breakdown}") - return domain_breakdown - - """ - functions for vlan reservation. - - Operations are: - - - obtain the available vlan lists - - - find the vlan continuity on a path if possible. - - - find the vlan translation on the multi-domain path if - continuity not possible - - - reserve the vlan on all the ports on the path - - - unreserve the vlan when the path is removed - """ - - def _reserve_vlan_breakdown( - self, domain_breakdown: dict - ) -> Optional[VlanTaggedBreakdowns]: - """ - Upate domain breakdown with VLAN reservation information. - - This is the top-level function, to be called after - _generate_connection_breakdown_tm(), and should be a private - implementation detail. It should be always called, meaning, - the VLAN tags should be present in the final breakdown, - regardless of whether the connection request explicitly asked - for it or not. - - For this to work, TEManager should maintain a table of VLAN - allocation from each of the domains. The ones that are not in - use can be reserved, and the ones that are not in use anymore - should be returned to the pool by calling unreserve(). - - :param domain_breakdown: per port available vlan range is - pased in datamodel._parse_available_vlans(self, vlan_str) - - :return: Updated domain_breakdown with the VLAN assigned to - each port along a path, or None if failure. - """ - - # # Check if there exist a path of vlan continuity. This is - # # disabled for now, until the simple case is handled. - # selected_vlan = self.find_vlan_on_path(domain_breakdown) - # if selected_vlan is not None: - # return self._reserve_vlan_on_path(domain_breakdown, selected_vlan) - - # if not, assuming vlan translation on the domain border port - - print(f"reserve_vlan_breakdown: domain_breakdown: {domain_breakdown}") - - breakdowns = {} - - # upstream_o_vlan = "" - for domain, segment in domain_breakdown.items(): - ingress_port = segment.get("ingress_port") - egress_port = segment.get("egress_port") - - print( - f"VLAN reservation: domain: {domain}, " - f"ingress_port: {ingress_port}, egress_port: {egress_port}" - ) - - if ingress_port is None or egress_port is None: - return None - - ingress_vlan = self._reserve_vlan(domain, ingress_port) - egress_vlan = self._reserve_vlan(domain, egress_port) - - ingress_port_id = ingress_port.get("id") - egress_port_id = egress_port.get("id") - - print( - f"VLAN reservation: domain: {domain}, " - f"ingress_vlan: {ingress_vlan}, egress_vlan: {egress_vlan}" - ) - - # if one has empty vlan range, first resume reserved vlans - # in the previous domain, then return false. - if egress_vlan is None: - self._unreserve_vlan(ingress_vlan) - return None - - if ingress_vlan is None: - self._unreserve_vlan(egress_vlan) - return None - - # # vlan translation from upstream_o_vlan to i_vlan - # segment["ingress_upstream_vlan"] = upstream_o_vlan - # segment["ingress_vlan"] = ingress_vlan - # segment["egress_vlan"] = egress_vlan - # upstream_o_vlan = egress_vlan - - port_a = VlanTaggedPort( - VlanTag(value=ingress_vlan, tag_type=1), port_id=ingress_port_id - ) - port_z = VlanTaggedPort( - VlanTag(value=egress_vlan, tag_type=1), port_id=egress_port_id - ) - - # Names look like "AMLIGHT_vlan_201_202_Ampath_Tenet". We - # can form the initial part, but where did the - # `Ampath_Tenet` at the end come from? - domain_name = domain.split(":")[-1].split(".")[0].upper() - name = f"{domain_name}_vlan_{ingress_vlan}_{egress_vlan}" - - breakdowns[domain] = VlanTaggedBreakdown( - name=name, - dynamic_backup_path=True, - uni_a=port_a, - uni_z=port_z, - ) - - return VlanTaggedBreakdowns(breakdowns=breakdowns) - - def _find_vlan_on_path(self, path): - """ - Find an unused available VLAN on path. - - Finds a VLAN that's not being used at the moment on a provided - path. Returns an available VLAN if possible, None if none are - available on the submitted path. - - output: vlan_tag string or None - """ - - # TODO: implement this - # https://github.com/atlanticwave-sdx/pce/issues/126 - - assert False, "Not implemented" - - def _reserve_vlan_on_path(self, domain_breakdown, selected_vlan): - # TODO: what is the difference between reserve_vlan and - # reserve_vlan_on_path? - - # TODO: implement this - # https://github.com/atlanticwave-sdx/pce/issues/126 - - # return domain_breakdown - assert False, "Not implemented" - - def _reserve_vlan(self, domain: str, port: dict, tag=None): - # with self._topology_lock: - # pass - - port_id = port.get("id") - print(f"reserve_vlan domain: {domain} port_id: {port_id}") - - if port_id is None: - return None - - # Look up available VLAN tags by domain and port ID. - domain_table = self._vlan_tags_table.get(domain) - - if domain_table is None: - print(f"reserve_vlan domain: {domain} entry: {domain_table}") - return None - - vlan_table = domain_table.get(port_id) - - print(f"reserve_vlan domain: {domain} vlan_table: {vlan_table}") - - # TODO: figure out when vlan_table can be None - if vlan_table is None: - print(f"Can't find a mapping for domain:{domain} port:{port_id}") - return None - - available_tag = None - - if tag is None: - # Find the first available VLAN tag from the table. - for vlan_tag, vlan_available in vlan_table.items(): - if vlan_available: - available_tag = vlan_tag - else: - if vlan_table[tag] is True: - available_tag = tag - else: - return None - - if available_tag is not None: - # mark the tag as in-use. - vlan_table[available_tag] = False - - # available_tag = 200 - return available_tag - - # to be called by delete_connection() - def _unreserve_vlan_breakdown(self, break_down): - # TODO: implement this. - # https://github.com/atlanticwave-sdx/pce/issues/127 - # with self._topology_lock: - # pass - assert False, "Not implemented" - - def _unreserve_vlan(self, domain: str, port: dict, tag=None): - """ - Mark a VLAN tag as not in use. - """ - # TODO: implement this. - # https://github.com/atlanticwave-sdx/pce/issues/127 - - # with self._topology_lock: - # pass - assert False, "Not implemented" - - def _print_vlan_tags_table(self): - import pprint - - print("------ VLAN TAGS TABLE -------") - pprint.pprint(self._vlan_tags_table) - print("------------------------------") diff --git a/src/sdx/pce/utils/__init__.py b/src/sdx/pce/utils/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/sdx/__init__.py b/src/sdx_pce/__init__.py similarity index 100% rename from src/sdx/__init__.py rename to src/sdx_pce/__init__.py diff --git a/src/sdx/pce/heuristic/heur.py b/src/sdx_pce/heuristic/heur.py similarity index 96% rename from src/sdx/pce/heuristic/heur.py rename to src/sdx_pce/heuristic/heur.py index 27ee65e1..5207606d 100644 --- a/src/sdx/pce/heuristic/heur.py +++ b/src/sdx_pce/heuristic/heur.py @@ -1,5 +1,4 @@ import argparse -import json # importing the module from datetime import datetime @@ -7,11 +6,10 @@ import numpy as np import prtpy -from sdx.pce.load_balancing.te_solver import TESolver -from sdx.pce.models import ConnectionRequest, TrafficMatrix -from sdx.pce.utils.constants import Constants -from sdx.pce.utils.random_connection_generator import RandomConnectionGenerator -from sdx.pce.utils.random_topology_generator import RandomTopologyGenerator +from sdx_pce.load_balancing.te_solver import TESolver +from sdx_pce.models import ConnectionRequest, TrafficMatrix +from sdx_pce.utils.random_connection_generator import RandomConnectionGenerator +from sdx_pce.utils.random_topology_generator import RandomTopologyGenerator def random_graph(n, p, m): diff --git a/src/sdx/pce/__init__.py b/src/sdx_pce/load_balancing/__init__.py similarity index 100% rename from src/sdx/pce/__init__.py rename to src/sdx_pce/load_balancing/__init__.py diff --git a/src/sdx/pce/load_balancing/te_solver.py b/src/sdx_pce/load_balancing/te_solver.py similarity index 84% rename from src/sdx/pce/load_balancing/te_solver.py rename to src/sdx_pce/load_balancing/te_solver.py index 58c4c1ab..2b773152 100644 --- a/src/sdx/pce/load_balancing/te_solver.py +++ b/src/sdx_pce/load_balancing/te_solver.py @@ -7,22 +7,18 @@ """ import copy +import logging from dataclasses import dataclass from itertools import chain, cycle -from typing import List, Mapping, Tuple, Union +from typing import List, Tuple, Union import networkx as nx import numpy as np from ortools.linear_solver import pywraplp -from sdx.pce.models import ( - ConnectionPath, - ConnectionRequest, - ConnectionSolution, - TrafficMatrix, -) -from sdx.pce.utils.constants import Constants -from sdx.pce.utils.functions import GraphFunction +from sdx_pce.models import ConnectionPath, ConnectionSolution, TrafficMatrix +from sdx_pce.utils.constants import Constants +from sdx_pce.utils.functions import GraphFunction @dataclass @@ -69,14 +65,18 @@ def __init__( self.links = [] # list of links[src][dest], 2*numEdges + self._logger = logging.getLogger(__name__) + def solve(self) -> Tuple[Union[ConnectionSolution, None], float]: """ Return the computed path and associated cost. """ data = self._create_data_model() if data is None: - print(f"Could not create a data model") - return ConnectionSolution(connection_map=None, cost=0) + self._logger.warning("Could not create a data model") + return ConnectionSolution( + connection_map=None, cost=0, request_id=self.tm.request_id + ) # Create the mip solver with the SCIP backend. solver = pywraplp.Solver.CreateSolver("SCIP") @@ -85,9 +85,11 @@ def solve(self) -> Tuple[Union[ConnectionSolution, None], float]: for j in range(data.num_vars): x[j] = solver.IntVar(0, 1, "x[%i]" % j) - print(f"Number of variables = {solver.NumVariables()}") - print(f"num_constraints: {data.num_constraints}") - print(f"num_inequality: {data.num_inequality}") + self._logger.info( + f"Number of variables = {solver.NumVariables()}, " + f"num_constraints: {data.num_constraints}, " + f"num_inequality: {data.num_inequality}" + ) for i in range(data.num_constraints - data.num_inequality): constraint_expr = [ @@ -95,11 +97,11 @@ def solve(self) -> Tuple[Union[ConnectionSolution, None], float]: ] solver.Add(sum(constraint_expr) == data.bounds[i]) - print(len(data.bounds)) - # print(data.bounds) - print(len(data.constraint_coeffs)) - # print(data.constraint_coeffs) - print(data.num_inequality) + self._logger.info( + f"len(data.bounds): {len(data.bounds)}, " + f"len(data.constraint_coeffs)): {len(data.constraint_coeffs)}, " + f"data.num_inequality: {data.num_inequality}" + ) for i in range( data.num_constraints - data.num_inequality, data.num_constraints @@ -109,7 +111,7 @@ def solve(self) -> Tuple[Union[ConnectionSolution, None], float]: ] solver.Add(sum(constraint_expr) <= data.bounds[i]) - print(f"Number of constraints = {solver.NumConstraints()}") + self._logger.info(f"Number of constraints = {solver.NumConstraints()}") objective = solver.Objective() for j in range(data.num_vars): @@ -120,20 +122,20 @@ def solve(self) -> Tuple[Union[ConnectionSolution, None], float]: solution = [] paths = None if status == pywraplp.Solver.OPTIMAL: - print(f"Objective value = {solver.Objective().Value()}") + self._logger.info(f"Objective value = {solver.Objective().Value()}") for j in range(data.num_vars): # print(x[j].name(), ' = ', x[j].solution_value()) solution.append(x[j].solution_value()) paths = np.array(solution).reshape(len(self.tm.connection_requests), -1) - print(paths.shape) + self._logger.info(f"paths.shape={paths.shape}") # print(path) # print('Problem solved in %f milliseconds' % solver.wall_time()) # print('Problem solved in %d iterations' % solver.iterations()) # print('Problem solved in %d branch-and-bound nodes' % solver.nodes()) else: - print("The problem does not have an optimal solution.") + self._logger.warning("The problem does not have an optimal solution.") # returns: dict(conn request, [path]), cost return self._solution_translator(paths, solver.Objective().Value()) @@ -144,8 +146,10 @@ def _solution_translator( # extract the edge/path real_paths = [] if paths is None: - print("No solution: empty input") - return ConnectionSolution(connection_map=None, cost=cost) + self._logger.warning("No solution: empty input") + return ConnectionSolution( + connection_map=None, cost=cost, request_id=self.tm.request_id + ) for path in paths: real_path = [] i = 0 @@ -157,14 +161,14 @@ def _solution_translator( # associate with the TM requests id_connection = 0 - ordered_paths = {} - result = ConnectionSolution(connection_map={}, cost=cost) + result = ConnectionSolution( + connection_map={}, cost=cost, request_id=self.tm.request_id + ) for request in self.tm.connection_requests: src = request.source dest = request.destination - bw = request.required_bandwidth # latency = connection[3] # latency is unused # Add request as the key to solution map @@ -180,7 +184,7 @@ def _solution_translator( for edge in path: # print("edge:"+str(edge)) if edge[0] == src: - print(f"Adding edge {edge} for request {request}") + self._logger.info(f"Adding edge {edge} for request {request}") # ordered_paths.append(edge) # Make a path and add it to the solution map @@ -197,7 +201,7 @@ def _solution_translator( # print("ordered paths:"+str(ordered_paths)) # return ordered_paths - print(f"solution_translator result: {result}") + self._logger.info(f"solution_translator result: {result}") return result def update_graph(self, graph, pathsconnection): @@ -264,7 +268,7 @@ def _create_data_model(self) -> DataModel: nodenum = self.graph.number_of_nodes() linknum = self.graph.number_of_edges() - print(f"Creating data model: #nodes: {nodenum}, #links: {linknum}") + self._logger.info(f"Creating data model: #nodes: {nodenum}, #links: {linknum}") # graph flow matrix inputmatrix, links = self._flow_matrix(self.graph) @@ -286,7 +290,7 @@ def _create_data_model(self) -> DataModel: # Avoid going past array bounds. if request.source > nodenum or request.destination > nodenum: - print( + self._logger.warning( f"Cannot create data model: " f"request.source ({request.source}) or " f"request.destination ({request.destination}) " @@ -298,7 +302,7 @@ def _create_data_model(self) -> DataModel: rhs[request.destination] = 1 bounds += list(rhs) - print(f"bound 1: {len(bounds)}") + self._logger.info(f"bound 1: {len(bounds)}") # rhsbw -TODO *2 edges bwlinklist = [] @@ -314,45 +318,50 @@ def _create_data_model(self) -> DataModel: # add the bwconstraint rhs bounds += bwlinklist - print(f"bound 2: {len(bounds)}") + self._logger.info(f"bound 2: {len(bounds)}") # add the latconstraint rhs if latency: bounds += latconstraint["rhs"] - print(f"bound 3: {len(bounds)}") + self._logger.info(f"bound 3: {len(bounds)}") # print(bounds) # form the constraints: lhs flowconstraints = self._lhsflow(self.tm.connection_requests, inputmatrix) bwconstraints = self._lhsbw(self.tm.connection_requests, inputmatrix) - print(f"\nConstraints Shape:{len(flowconstraints)}:{len(bwconstraints)}") + self._logger.info( + f"Constraints Shape:{len(flowconstraints)}:{len(bwconstraints)}" + ) # print("\n flow"+str(flowconstraints)) # print("\n bw:"+str(type(bwconstraints))) bw_np = np.array(bwconstraints) - print(f"np:{flowconstraints.shape} : {bw_np.shape}") + self._logger.info(f"np:{flowconstraints.shape} : {bw_np.shape}") flow_lhs = np.concatenate((flowconstraints, bw_np)) - print(f"flow_lhs: {np.shape(flow_lhs)}") + self._logger.info(f"flow_lhs: {np.shape(flow_lhs)}") if latency: - print(f"latcons: {np.shape(latconstraint['lhs'])}") + self._logger.info(f"latcons: {np.shape(latconstraint['lhs'])}") lhs = np.concatenate((flow_lhs, latconstraint["lhs"])) else: lhs = flow_lhs # objective function if self.objective == Constants.OBJECTIVE_COST: - print("Objecive: Cost") + self._logger.info("Objecive: Cost") cost = self._mc_cost(links) if self.objective == Constants.OBJECTIVE_LOAD_BALANCING: - print("Objecive: Load Balance") + self._logger.info("Objecive: Load Balance") cost = self._lb_cost(links) - print(f"cost len: {len(cost)}") + self._logger.info( + f"cost len: {len(cost)}, " + f"lhs shape: {lhs.shape}, " + f"rhs shape: {len(bounds)}" + ) + # print(cost) - print(f"lhs shape: {lhs.shape}") - print(f"rhs shape: {len(bounds)}") coeffs = [] for i in range(lhs.shape[0]): @@ -409,7 +418,7 @@ def _lhsflow(self, request_list, inputmatrix): """ r = len(request_list) m, n = inputmatrix.shape - print(f"r={r}:m={m}:n={n}") + self._logger.info(f"_lhsflow: r={r}:m={m}:n={n}") # out = np.zeros((r,m,r,n), dtype=inputmatrix.dtype) # diag = np.einsum('ijik->ijk',out) # diag[:] = inputmatrix @@ -419,7 +428,7 @@ def _lhsflow(self, request_list, inputmatrix): out = np.zeros((r * m, r * n), dtype=inputmatrix.dtype) for i in range(r): out[i * m : (i + 1) * m, i * n : (i + 1) * n] = inputmatrix - print(f"out: {out.shape}") + self._logger.info(f"lhsflow: out: {out.shape}") return out def _lhsbw(self, request_list, inputmatrix): @@ -463,8 +472,8 @@ def _make_latency_constaints(self, links): rhs = [] request_list = self.tm.connection_requests - print(f"request: {len(request_list)}") - print(f"links: {len(links)}") + + self._logger.info(f"request: {len(request_list)}, links: {len(links)}") zerolist = np.zeros(len(links), dtype=int) latency_list = [] diff --git a/src/sdx/pce/models.py b/src/sdx_pce/models.py similarity index 98% rename from src/sdx/pce/models.py rename to src/sdx_pce/models.py index 23b7f5fb..6a9440cc 100644 --- a/src/sdx/pce/models.py +++ b/src/sdx_pce/models.py @@ -35,6 +35,7 @@ class TrafficMatrix: """ connection_requests: List[ConnectionRequest] + request_id: str @dataclass_json @@ -59,6 +60,7 @@ class ConnectionSolution: connection_map: Mapping[ConnectionRequest, List[ConnectionPath]] cost: float + request_id: str # The classess below should help us construct a breakdown of the below diff --git a/src/sdx/pce/load_balancing/__init__.py b/src/sdx_pce/topology/__init__.py similarity index 100% rename from src/sdx/pce/load_balancing/__init__.py rename to src/sdx_pce/topology/__init__.py diff --git a/src/sdx/pce/topology/grenmlconverter.py b/src/sdx_pce/topology/grenmlconverter.py similarity index 74% rename from src/sdx/pce/topology/grenmlconverter.py rename to src/sdx_pce/topology/grenmlconverter.py index 2aadd660..5b5a8511 100644 --- a/src/sdx/pce/topology/grenmlconverter.py +++ b/src/sdx_pce/topology/grenmlconverter.py @@ -1,7 +1,10 @@ +import logging + from grenml import GRENMLManager from grenml.models.nodes import Node +from sdx_datamodel.models.topology import Topology -from sdx.datamodel.models.topology import Topology +from sdx_pce.utils.constants import Constants class GrenmlConverter(object): @@ -13,30 +16,31 @@ def set_topology(self, topology: Topology): self.topology = topology def read_topology(self): - domain_service = self.topology.get_domain_service() - owner = domain_service.owner + domain_service = self.topology.services + if domain_service is not None: + owner = domain_service.owner + else: + owner = Constants.DEFAULT_OWNER self.grenml_manager.set_primary_owner(owner) - self.grenml_manager.add_institution(owner, owner) - self.add_nodes(self.topology.get_nodes()) + self.add_nodes(self.topology.nodes) - self.add_links(self.topology.get_links()) + self.add_links(self.topology.links) self.topology_str = self.grenml_manager.write_to_string() - # print(self.topology_str) - def add_nodes(self, nodes): for node in nodes: - location = node.get_location() - print(f"adding node: {node.id}") + location = node.location + logging.info(f"adding node: {node.id}") self.grenml_manager.add_node( node.id, node.name, node.short_name, longitude=location.longitude, latitude=location.latitude, + iso3166_2_lvl4=location.iso3166_2_lvl4, address=location.address, ) @@ -48,18 +52,19 @@ def add_links(self, links): for port in ports: node = self.topology.get_node_by_port(port["id"]) if node is not None: - location = node.get_location() + location = node.location grenml_node = Node( node.id, node.name, node.short_name, longitude=location.longitude, latitude=location.latitude, + iso3166_2_lvl4=location.iso3166_2_lvl4, address=location.address, ) end_nodes.append(grenml_node) else: - print( + logging.warning( f"This port ({port['id']}) doesn't belong to any " f"node in the topology, likely an Interdomain port?" ) diff --git a/src/sdx/pce/topology/manager.py b/src/sdx_pce/topology/manager.py similarity index 51% rename from src/sdx/pce/topology/manager.py rename to src/sdx_pce/topology/manager.py index 3230531f..e8c8da2e 100644 --- a/src/sdx/pce/topology/manager.py +++ b/src/sdx_pce/topology/manager.py @@ -1,19 +1,19 @@ import copy import datetime +import logging +from typing import Mapping import networkx as nx - -from sdx.datamodel.models.topology import ( +from sdx_datamodel.models.topology import ( TOPOLOGY_INITIAL_VERSION, SDX_TOPOLOGY_ID_prefix, ) -from sdx.datamodel.parsing.topologyhandler import TopologyHandler +from sdx_datamodel.parsing.topologyhandler import TopologyHandler from .grenmlconverter import GrenmlConverter class TopologyManager: - """ Manager for topology operations. @@ -25,65 +25,102 @@ class TopologyManager: """ def __init__(self): - super().__init__() + # The merged "super" topology of topologies of different + # domains, with inter-domain links between them computed. + self._topology = None + + # Mapping from topology ID to topology. + self._topology_map = {} + + # Mapping from port ID to link. + self._port_map = {} - self.topology_handler = TopologyHandler() + # Mapping from port ID to node. + self._port_node_map = {} - self.topology = None - self.topology_list = {} - self.port_list = {} # {port, link} + # Number of interdomain links we computed. + self._num_interdomain_link = 0 - self.num_interdomain_link = 0 + self._logger = logging.getLogger(__name__) def get_handler(self): return self.topology_handler def topology_id(self, id): - self.topology._id(id) + self._topology._id(id) def set_topology(self, topology): - self.topology = topology + self._topology = topology def get_topology(self): - return self.topology + return self._topology + + def get_topology_map(self) -> dict: + return self._topology_map + + def get_port_map(self) -> Mapping[str, dict]: + """ + Return a mapping between port IDs and links. + """ + return self._port_map + + def get_port_node_map(self) -> Mapping[str, dict]: + """ + Return a mapping between port IDs and nodes. + """ + return self._port_node_map def clear_topology(self): - self.topology = None - self.topology_list = {} - self.port_list = {} + self._topology = None + self._topology_map = {} + self._port_map = {} def add_topology(self, data): - topology = self.topology_handler.import_topology_data(data) - self.topology_list[topology.id] = topology + topology = TopologyHandler().import_topology_data(data) + self._topology_map[topology.id] = topology - if self.topology is None: - self.topology = copy.deepcopy(topology) + if self._topology is None: + self._topology = copy.deepcopy(topology) # Generate a new topology id self.generate_id() # Addding to the port list - links = topology.get_links() - for link in links: - for port in link.ports: - self.port_list[port["id"]] = link + # links = topology.links + # for link in links: + # for port in link.ports: + # self._port_map[port["id"]] = link else: # check the inter-domain links first. - self.num_interdomain_link += self.inter_domain_check(topology) - if self.num_interdomain_link == 0: - print(f"Warning: no interdomain links detected in {topology.id}!") + self._num_interdomain_link += self.inter_domain_check(topology) + if self._num_interdomain_link == 0: + self._logger.debug( + f"Warning: no interdomain links detected in {topology.id}!" + ) # Nodes - nodes = topology.get_nodes() - self.topology.add_nodes(nodes) + nodes = topology.nodes + self._topology.add_nodes(nodes) # links - links = topology.get_links() - self.topology.add_links(links) + links = topology.links + self._topology.add_links(links) # version self.update_version(False) + # Addding to the port list + links = topology.links + for link in links: + for port in link.ports: + self._port_map[port["id"]] = link + + # Addding to the port node + nodes = topology.nodes + for node in nodes: + for port in node.ports: + self._port_node_map[port.id] = node + self.update_timestamp() def get_domain_name(self, node_id): @@ -91,14 +128,14 @@ def get_domain_name(self, node_id): Find the topology ID associated with the given node ID. A topology ID is expected to be of the format - "urn:ogf:network:sdx:topology:amlight.net", and from this, we + "urn:sdx:topology:amlight.net", and from this, we can find the domain name associated with the topology. TODO: This function name may be a misnomer? """ domain_id = None - # print(f"len of topology_list: {len(self.topology_list)}") - for topology_id, topology in self.topology_list.items(): + # print(f"len of topology_list: {len(self._topology_map)}") + for topology_id, topology in self._topology_map.items(): if topology.has_node_by_id(node_id): domain_id = topology_id break @@ -106,61 +143,70 @@ def get_domain_name(self, node_id): return domain_id def generate_id(self): - self.topology.set_id(SDX_TOPOLOGY_ID_prefix) - self.topology.version = TOPOLOGY_INITIAL_VERSION + self._topology.id = SDX_TOPOLOGY_ID_prefix + self._topology.version = TOPOLOGY_INITIAL_VERSION return id def remove_topology(self, topology_id): - self.topology_list.pop(topology_id, None) + self._topology_map.pop(topology_id, None) self.update_version(False) self.update_timestamp() + def is_link_interdomain(self, link, topology): + """ + Check if a link is an interdomain link. + """ + for port in link.ports: + if port["id"] not in self._port_map: + return True + return False + def update_topology(self, data): # likely adding new inter-domain links update_handler = TopologyHandler() topology = update_handler.import_topology_data(data) - self.topology_list[topology.id] = topology + self._topology_map[topology.id] = topology # Nodes. - nodes = topology.get_nodes() + nodes = topology.nodes for node in nodes: - self.topology.remove_node(node.id) + self._topology.remove_node(node.id) # Links. - links = topology.get_links() + links = topology.links for link in links: - if not link.nni: + if not self.is_link_interdomain(link, topology): # print(link.id+";......."+str(link.nni)) - self.topology.remove_link(link.id) + self._topology.remove_link(link.id) for port in link.ports: - self.port_list.pop(port["id"]) + self._port_map.pop(port["id"]) # Check the inter-domain links first. num_interdomain_link = self.inter_domain_check(topology) if num_interdomain_link == 0: - print("Warning: no interdomain links detected!") + self._logger.warning("Warning: no interdomain links detected!") # Nodes. - nodes = topology.get_nodes() - self.topology.add_nodes(nodes) + nodes = topology.nodes + self._topology.add_nodes(nodes) # Links. - links = topology.get_links() - self.topology.add_links(links) + links = topology.links + self._topology.add_links(links) self.update_version(True) self.update_timestamp() def update_version(self, sub: bool): try: - [ver, sub_ver] = self.topology.version.split(".") + [ver, sub_ver] = self._topology.version.split(".") except ValueError: - ver = self.topology.version + ver = self._topology.version sub_ver = "0" - self.topology.version = self.new_version(ver, sub_ver, sub) + self._topology.version = self.new_version(ver, sub_ver, sub) - return self.topology.version + return self._topology.version def new_version(self, ver, sub_ver, sub: bool): if not sub: @@ -173,14 +219,14 @@ def new_version(self, ver, sub_ver, sub: bool): def update_timestamp(self): ct = datetime.datetime.now().isoformat() - self.topology.time_stamp = ct + self._topology.timestamp = ct return ct def inter_domain_check(self, topology): interdomain_port_dict = {} num_interdomain_link = 0 - links = topology.get_links() + links = topology.links link_dict = {} for link in links: link_dict[link.id] = link @@ -189,38 +235,43 @@ def inter_domain_check(self, topology): # ToDo: raise an warning or exception if len(interdomain_port_dict) == 0: - print("interdomain_port_dict==0") + self._logger.info("interdomain_port_dict==0") return False # match any ports in the existing topology for port_id in interdomain_port_dict: # print("interdomain_port:") # print(port_id) - for existing_port, existing_link in self.port_list.items(): + for existing_port, existing_link in self._port_map.items(): # print(existing_port) if port_id == existing_port: # print("Interdomain port:" + port_id) # remove redundant link between two domains - self.topology.remove_link(existing_link.id) + self._topology.remove_link(existing_link.id) num_interdomain_link = +1 - self.port_list[port_id] = interdomain_port_dict[port_id] + self._port_map[port_id] = interdomain_port_dict[port_id] return num_interdomain_link # adjacent matrix of the graph, in jason? def generate_graph(self): graph = nx.Graph() - links = self.topology.links + + if self._topology is None: + self._logger.warning("We do not have a topology yet") + return None + + links = self._topology.links for link in links: inter_domain_link = False ports = link.ports end_nodes = [] for port in ports: - node = self.topology.get_node_by_port(port["id"]) + node = self._topology.get_node_by_port(port["id"]) if node is None: - print( - "This port doesn't belong to any node in the topology, likely a Non-SDX port!" - + port["id"] + self._logger.warning( + f"This port (id: {port['id']}) does not belong to " + f"any node in the topology, likely a Non-SDX port!" ) inter_domain_link = True break @@ -241,7 +292,7 @@ def generate_graph(self): return graph def generate_grenml(self): - self.converter = GrenmlConverter(self.topology) + self.converter = GrenmlConverter(self._topology) return self.converter.read_topology() @@ -255,22 +306,22 @@ def update_private_properties(self): # on performance properties for now def update_link_property(self, link_id, property, value): # 1. update the individual topology - for id, topology in self.topology_list.items(): - links = topology.get_links() + for id, topology in self._topology_map.items(): + links = topology.links for link in links: - print(link.id + ";" + id) + self._logger.info(f"link.id={link.id}; id={id}") if link.id == link_id: setattr(link, property, value) - print("updated the link.") + self._logger.info("updated the link.") # 1.2 need to change the sub_ver of the topology? # 2. check on the inter-domain link? # 3. update the interodamin topology - links = self.topology.get_links() + links = self._topology.links for link in links: if link.id == link_id: setattr(link, property, value) - print("updated the link.") + self._logger.info("updated the link.") # 2.2 need to change the sub_ver of the topology? self.update_version(True) @@ -292,7 +343,30 @@ def update_element_property_json(self, data, element, element_id, property, valu sub_ver = "0" data["version"] = self.new_version(ver, sub_ver, True) - data["time_stamp"] = datetime.datetime.now().isoformat() + data["timestamp"] = datetime.datetime.now().isoformat() + + def get_port_by_id(self, port_id: str): + """ + Given port id, returns a Port. + """ + for node in self.get_topology().nodes: + for port in node.ports: + if port.id == port_id: + return port.to_dict() + return None + + def are_two_ports_same_domain(self, port1_id: str, port2_id: str): + """ + Check if two ports are in the same domain. + """ + node1 = self.get_topology().get_node_by_port(port1_id) + node2 = self.get_topology().get_node_by_port(port2_id) + if node1 is None or node2 is None: + return False + + domain1 = self.get_domain_name(node1.id) + domain2 = self.get_domain_name(node2.id) + return domain1 == domain2 def update_node_property(self): pass diff --git a/src/sdx_pce/topology/temanager.py b/src/sdx_pce/topology/temanager.py new file mode 100644 index 00000000..edc08dd9 --- /dev/null +++ b/src/sdx_pce/topology/temanager.py @@ -0,0 +1,936 @@ +import logging +import threading +from itertools import chain +from typing import List, Optional + +import networkx as nx +from networkx.algorithms import approximation as approx +from sdx_datamodel.models.port import Port +from sdx_datamodel.parsing.connectionhandler import ConnectionHandler + +from sdx_pce.models import ( + ConnectionPath, + ConnectionRequest, + ConnectionSolution, + TrafficMatrix, + VlanTag, + VlanTaggedBreakdown, + VlanTaggedBreakdowns, + VlanTaggedPort, +) +from sdx_pce.topology.manager import TopologyManager +from sdx_pce.utils.exceptions import ValidationError + +UNUSED_VLAN = None + + +class TEManager: + """ + TE Manager for connection - topology operations. + + Functions of this class are: + + - generate inputs to the PCE solver + + - converter the solver output. + + - VLAN reservation and unreservation. + """ + + def __init__(self, topology_data): + self.topology_manager = TopologyManager() + + # A lock to safely perform topology operations. + self._topology_lock = threading.Lock() + + self._logger = logging.getLogger(__name__) + + # A {domain, {port, {vlan, in_use}}} mapping. + self._vlan_tags_table = {} + + # Making topology_data optional while investigating + # https://github.com/atlanticwave-sdx/sdx-controller/issues/145. + # + # TODO: a nicer thing to do would be to keep less state around. + # https://github.com/atlanticwave-sdx/pce/issues/122 + if topology_data: + self.topology_manager.add_topology(topology_data) + self.graph = self.generate_graph_te() + self._update_vlan_tags_table( + domain_name=topology_data.get("id"), + port_map=self.topology_manager.get_port_node_map(), + ) + else: + self.graph = None + + def add_topology(self, topology_data: dict): + """ + Add a new topology to TEManager. + + :param topology_data: a dictionary that represents a topology. + """ + self.topology_manager.add_topology(topology_data) + + # Ports appear in two places in the combined topology + # maintained by TopologyManager: attached to each of the + # nodes, and attached to links. Here we are using the ports + # attached to links. + self._update_vlan_tags_table( + domain_name=topology_data.get("id"), + port_map=self.topology_manager.get_port_node_map(), + ) + + def update_topology(self, topology_data: dict): + """ + Update an existing topology in TEManager. + + :param topology_data: a dictionary that represents a topology. + """ + self.topology_manager.update_topology(topology_data) + + # TODO: careful here when updating VLAN tags table -- what do + # we do when an in use VLAN tag becomes invalid in the update? + # See https://github.com/atlanticwave-sdx/pce/issues/123 + # + # self._update_vlan_tags_table_from_links( + # domain_name=topology_data.get("id"), + # port_list=self.topology_manager.port_list, + # ) + + def get_topology_map(self) -> dict: + """ + Get {topology_id: topology, ..} map. + """ + return self.topology_manager.get_topology_map() + + def get_port_services_label_range(self, port: dict) -> List[str]: + vlan_range = None + services = port.get("services") + if services and services.get("l2vpn-ptp"): + vlan_range = services.get("l2vpn-ptp").get("vlan_range") + return vlan_range + + def get_port_obj_services_label_range(self, port: Port) -> List[str]: + vlan_range = None + services = port.services + if services and services.l2vpn_ptp: + vlan_range = services.l2vpn_ptp.get("vlan_range") + return vlan_range + + def _update_vlan_tags_table(self, domain_name: str, port_map: dict): + """ + Update VLAN tags table. + """ + self._vlan_tags_table[domain_name] = {} + + for port_id, node in port_map.items(): + # TODO: port here seems to be a dict, not sdx_datamodel.models.Port + for port in node.ports: + # Collect all port IDs in this link. Each link should + # have two ports. + node_port_ids = [x.id for x in node.ports] + + # Do some error checks. + node_port_count = len(node_port_ids) + + if node_port_count < 1: + raise ValidationError( + f"Node has {node_port_count} ports, not greater than 0" + ) + + if port_id not in node_port_ids: + raise ValidationError(f"port {port_id} not in {node_port_ids}") + + # Get the label range for this port: either from the + # port itself (v1), or from the services attached to it (v2). + label_range = self.get_port_obj_services_label_range(port) + if label_range is None: + label_range = port.label_range + + # TODO: why is label_range sometimes None, and what to + # do when that happens? + if label_range is None: + self._logger.info(f"label_range on {port.id} is None") + continue + + # label_range is of the form ['100-200', '1000']; let + # us expand it. Would have been ideal if this was + # already in some parsed form, but it is not, so this + # is a work-around. + all_labels = self._expand_label_range(label_range) + + # Make a map like: `{label1: UNUSED_VLAN, label2: UNUSED_VLAN,...}` + labels_available = {label: UNUSED_VLAN for label in all_labels} + + self._vlan_tags_table[domain_name][port_id] = labels_available + + def _update_vlan_tags_table_from_links(self, domain_name: str, port_map: dict): + """ + Update VLAN tags table. + """ + self._vlan_tags_table[domain_name] = {} + + for port_id, link in port_map.items(): + # TODO: port here seems to be a dict, not sdx_datamodel.models.Port + for port in link.ports: + # Collect all port IDs in this link. Each link should + # have two ports. + link_port_ids = [x["id"] for x in link.ports] + + # Do some error checks. + link_port_count = len(link_port_ids) + + if link_port_count != 2: + raise ValidationError(f"Link has {link_port_count} ports, not 2") + + if port_id not in link_port_ids: + raise ValidationError(f"port {port_id} not in {link_port_ids}") + + # Get the label range for this port: either from the + # port itself (v1), or from the services attached to it (v2). + label_range = self.get_port_services_label_range(port) + if label_range is None: + label_range = port.get("label_range") + + # TODO: why is label_range sometimes None, and what to + # do when that happens? + if label_range is None: + self._logger.info(f"label_range on {port['id']} is None") + continue + + # label_range is of the form ['100-200', '1000']; let + # us expand it. Would have been ideal if this was + # already in some parsed form, but it is not, so this + # is a work-around. + all_labels = self._expand_label_range(label_range) + + # Make a map like: `{label1: UNUSED_VLAN, label2: UNUSED_VLAN,...}` + labels_available = {label: UNUSED_VLAN for label in all_labels} + + self._vlan_tags_table[domain_name][port_id] = labels_available + + def _expand_label_range(self, label_range: []) -> List[int]: + """ + Expand the label range to a list of numbers. + """ + labels = [self._expand_label(label) for label in label_range] + # flatten result and return it. + return list(chain.from_iterable(labels)) + + def _expand_label(self, label) -> List[int]: + + start = stop = 0 + """ + Expand items in label range to a list of numbers. + + Items in label ranges can be of the form "100-200" or "100". + For the first case, we return [100,101,...200]; for the second + case, we return [100]. + """ + if isinstance(label, str): + parts = label.split("-") + start = int(parts[0]) + stop = int(parts[-1]) + 1 + + if isinstance(label, int): + start = label + stop = label + 1 + """ + Items in label ranges can be of the form [100, 200]. + For the first case, we return [100,101,...200]. + """ + if isinstance(label, list): + start = label[0] + stop = label[1] + 1 + + """ + Items in label ranges can not be of the tuple form (100, 200), per JSON schema. + """ + + if start == 0 or stop == 0 or start > stop: + raise ValidationError(f"Invalid label range: {label}") + + return list(range(start, stop)) + + def generate_traffic_matrix(self, connection_request: dict) -> TrafficMatrix: + """ + Generate a Traffic Matrix from the connection request we have. + + A connection request specifies an ingress port, an egress + port, and some other properties. The ports may belong to + different domains. We need to break that request down into a + set of requests, each of them specific to a domain. We call + such a domain-wise set of requests a traffic matrix. + """ + self._logger.info( + f"generate_traffic_matrix: connection_request: {connection_request}" + ) + + request = ConnectionHandler().import_connection_data(connection_request) + + self._logger.info(f"generate_traffic_matrix: decoded request: {request}") + + ingress_port = request.ingress_port + egress_port = request.egress_port + + self._logger.info( + f"generate_traffic_matrix, ports: " + f"ingress_port.id: {ingress_port.id}, " + f"egress_port.id: {egress_port.id}" + ) + + topology = self.topology_manager.get_topology() + + ingress_node = topology.get_node_by_port(ingress_port.id) + egress_node = topology.get_node_by_port(egress_port.id) + + if ingress_node is None: + self._logger.warning( + f"No ingress node was found for ingress port ID '{ingress_port.id}'" + ) + return None + + if egress_node is None: + self._logger.warning( + f"No egress node is found for egress port ID '{egress_port.id}'" + ) + return None + + ingress_nodes = [ + x for x, y in self.graph.nodes(data=True) if y["id"] == ingress_node.id + ] + + egress_nodes = [ + x for x, y in self.graph.nodes(data=True) if y["id"] == egress_node.id + ] + + if len(ingress_nodes) <= 0: + self._logger.warning( + f"No ingress node '{ingress_node.id}' found in the graph" + ) + return None + + if len(egress_nodes) <= 0: + self._logger.warning( + f"No egress node '{egress_node.id}' found in the graph" + ) + return None + + required_bandwidth = request.bandwidth_required or 0 + required_latency = request.latency_required or float("inf") + request_id = request.id + + self._logger.info( + f"Setting required_latency: {required_latency}, " + f"required_bandwidth: {required_bandwidth}" + ) + + request = ConnectionRequest( + source=ingress_nodes[0], + destination=egress_nodes[0], + required_bandwidth=required_bandwidth, + required_latency=required_latency, + ) + + return TrafficMatrix(connection_requests=[request], request_id=request_id) + + def generate_graph_te(self) -> Optional[nx.Graph]: + """ + Return the topology graph that we have. + """ + graph = self.topology_manager.generate_graph() + + if graph is None: + self._logger.warning("No graph could be generated") + return None + + graph = nx.convert_node_labels_to_integers(graph, label_attribute="id") + + # TODO: why is this needed? + self.graph = graph + # print(list(graph.nodes(data=True))) + + return graph + + def graph_node_connectivity(self, source=None, dest=None): + """ + Check that a source and destination node have connectivity. + """ + # TODO: is this method really needed? + return approx.node_connectivity(self.graph, source, dest) + + def requests_connectivity(self, tm: TrafficMatrix) -> bool: + """ + Check that connectivity is possible. + """ + # TODO: consider using filter() and reduce(), maybe? + # TODO: write some tests for this method. + for request in tm.connection_requests: + conn = self.graph_node_connectivity(request.source, request.destination) + self._logger.info( + f"Request connectivity: source {request.source}, " + f"destination: {request.destination} = {conn}" + ) + if conn is False: + return False + + return True + + def get_links_on_path(self, solution: ConnectionSolution) -> list: + """ + Return all the links on a connection solution. + + The result will be a list of dicts, like so: + + .. code-block:: + + [{'source': 'urn:ogf:network:sdx:port:zaoxi:A1:1', + 'destination': 'urn:ogf:network:sdx:port:zaoxi:B1:3'}, + {'source': 'urn:ogf:network:sdx:port:zaoxi:B1:1', + 'destination': 'urn:ogf:network:sdx:port:sax:B3:1'}, + {'source': 'urn:ogf:network:sdx:port:sax:B3:3', + 'destination': 'urn:ogf:network:sdx:port:sax:B1:4'}, + {'source': 'urn:ogf:network:sdx:port:sax:B1:1', + 'destination': 'urn:sdx:port:amlight:B1:1'}, + {'source': 'urn:sdx:port:amlight.net:B1:3', + 'destination': 'urn:sdx:port:amlight.net:A1:1'}] + + """ + if solution is None or solution.connection_map is None: + self._logger.warning(f"Can't find paths for {solution}") + return None + + result = [] + + for domain, links in solution.connection_map.items(): + for link in links: + assert isinstance(link, ConnectionPath) + + src_node = self.graph.nodes.get(link.source) + assert src_node is not None + + dst_node = self.graph.nodes.get(link.destination) + assert dst_node is not None + + ports = self._get_ports_by_link(link) + + self._logger.info( + f"get_links_on_path: src_node: {src_node} (#{link.source}), " + f"dst_node: {dst_node} (#{link.destination}), " + f"ports: {ports}" + ) + + if ports: + p1, p2 = ports + result.append({"source": p1["id"], "destination": p2["id"]}) + + return result + + def add_breakdowns_to_connection(self, connection_request: dict, breakdowns: dict): + """ + add breakdowns to connection request for the sdx-controller to process. + """ + connection_request["breakdowns"] = breakdowns + + return connection_request + + def generate_connection_breakdown( + self, solution: ConnectionSolution, connection_request: dict + ) -> dict: + """ + Take a connection solution and generate a breakdown. + """ + if solution is None or solution.connection_map is None: + self._logger.warning(f"Can't find a breakdown for {solution}") + return None + + breakdown = {} + paths = solution.connection_map # p2p for now + + for domain, links in paths.items(): + self._logger.info(f"domain: {domain}, links: {links}") + + current_link_set = [] + + for count, link in enumerate(links): + self._logger.info(f"count: {count}, link: {link}") + + assert isinstance(link, ConnectionPath) + + src_node = self.graph.nodes.get(link.source) + assert src_node is not None + + dst_node = self.graph.nodes.get(link.destination) + assert dst_node is not None + + self._logger.info( + f"source node: {src_node}, destination node: {dst_node}" + ) + + src_domain = self.topology_manager.get_domain_name(src_node["id"]) + dst_domain = self.topology_manager.get_domain_name(dst_node["id"]) + + # TODO: what do we do when a domain can't be + # determined? Can a domain be `None`? + self._logger.info( + f"source domain: {src_domain}, destination domain: {dst_domain}" + ) + + current_link_set.append(link) + current_domain = src_domain + if src_domain == dst_domain: + # current_domain = domain_1 + if count == len(links) - 1: + breakdown[current_domain] = current_link_set.copy() + else: + breakdown[current_domain] = current_link_set.copy() + current_domain = None + current_link_set = [] + + self._logger.info(f"[intermediate] breakdown: {breakdown}") + + # now starting with the ingress_port + first = True + i = 0 + domain_breakdown = {} + + # TODO: using dict to represent a breakdown is dubious, and + # may lead to incorrect results. Dicts are lexically ordered, + # and that may break some assumptions about the order in which + # we form and traverse the breakdown. + + # Note:Extra flag to indicate if the connection request is in the format of TrafficMatrix or not + # If the connection request is in the format of TrafficMatrix, then the ingress_port and egress_port + # are not present in the connection_request + request_format_is_tm = isinstance(connection_request, list) + self._logger.info( + f"connection_requst: {connection_request}; type:{type(request_format_is_tm)}" + ) + same_domain_port_flag = False + if not request_format_is_tm: + connection_request = ( + ConnectionHandler().import_connection_data(connection_request).to_dict() + ) + self._logger.info( + f'connection_requst ingress_port: {connection_request["ingress_port"]["id"]}' + ) + self._logger.info( + f'connection_requst egress_port: {connection_request["egress_port"]["id"]}' + ) + # flag to indicate if the request ingress and egress ports belong to the same domain + same_domain_port_flag = self.topology_manager.are_two_ports_same_domain( + connection_request["ingress_port"]["id"], + connection_request["egress_port"]["id"], + ) + self._logger.info(f"same_domain_user_port_flag: {same_domain_port_flag}") + + # Now generate the breakdown with potential user specified tags + ingress_user_port = None + egress_user_port = None + for domain, links in breakdown.items(): + self._logger.debug( + f"Creating domain_breakdown: domain: {domain}, links: {links}" + ) + segment = {} + + if first: + first = False + # ingress port for this domain is on the first link. + if ( + not request_format_is_tm + and connection_request["ingress_port"]["id"] + not in self.topology_manager.get_port_map() + ): + self._logger.warning( + f"Port {connection_request['ingress_port']['id']} not found in port map, it's a user port" + ) + ingress_port_id = connection_request["ingress_port"]["id"] + ingress_user_port = connection_request["ingress_port"] + ingress_port = self.topology_manager.get_port_by_id(ingress_port_id) + else: + if request_format_is_tm: + ingress_port, _ = self._get_ports_by_link(links[0]) + else: + ingress_port = self.topology_manager.get_port_by_id( + connection_request["ingress_port"]["id"] + ) + + # egress port for this domain is on the last link. + if ( + not request_format_is_tm + and same_domain_port_flag + and connection_request["egress_port"]["id"] + not in self.topology_manager.get_port_map() + ): + self._logger.warning( + f"Port {connection_request['egress_port']['id']} not found in port map, it's a user port" + ) + egress_port_id = connection_request["egress_port"]["id"] + egress_user_port = connection_request["egress_port"] + egress_port = self.topology_manager.get_port_by_id(egress_port_id) + _, next_ingress_port = self._get_ports_by_link(links[-1]) + else: + egress_port, next_ingress_port = self._get_ports_by_link(links[-1]) + if same_domain_port_flag: + egress_port = next_ingress_port + self._logger.info( + f"ingress_port:{ingress_port}, egress_port:{egress_port}, next_ingress_port:{next_ingress_port}" + ) + elif i == len(breakdown) - 1: + ingress_port = next_ingress_port + if ( + not request_format_is_tm + and connection_request["egress_port"]["id"] + not in self.topology_manager.get_port_map() + ): + self._logger.warning( + f"Port {connection_request['egress_port']['id']} not found in port map, it's a user port" + ) + egress_port_id = connection_request["egress_port"]["id"] + egress_user_port = connection_request["egress_port"] + egress_port = self.topology_manager.get_port_by_id(egress_port_id) + else: + _, egress_port = self._get_ports_by_link(links[-1]) + + self._logger.info(f"links[-1]: {links[-1]}") + self._logger.info( + f"ingress_port:{ingress_port}, egress_port:{egress_port}" + ) + else: + ingress_port = next_ingress_port + egress_port, next_ingress_port = self._get_ports_by_link(links[-1]) + + segment = {} + segment["ingress_port"] = ingress_port + segment["egress_port"] = egress_port + + self._logger.info(f"segment for {domain}: {segment}") + + domain_breakdown[domain] = segment.copy() + i = i + 1 + + self._logger.info( + f"generate_connection_breakdown(): domain_breakdown: {domain_breakdown}" + ) + + tagged_breakdown = self._reserve_vlan_breakdown( + domain_breakdown=domain_breakdown, + request_id=solution.request_id, + ingress_user_port=ingress_user_port, + egress_user_port=egress_user_port, + ) + self._logger.info( + f"generate_connection_breakdown(): tagged_breakdown: {tagged_breakdown}" + ) + + # Make tests pass, temporarily. + if tagged_breakdown is None: + return None + + assert isinstance(tagged_breakdown, VlanTaggedBreakdowns) + + # Return a dict containing VLAN-tagged breakdown in the + # expected format. + return tagged_breakdown.to_dict().get("breakdowns") + + def _get_ports_by_link(self, link: ConnectionPath): + """ + Given a link, find the ports associated with it. + + Returns a (Port, Port) tuple. + """ + assert isinstance(link, ConnectionPath) + + node1 = self.graph.nodes[link.source]["id"] + node2 = self.graph.nodes[link.destination]["id"] + + ports = self.topology_manager.get_topology().get_port_by_link(node1, node2) + + # Avoid some possible crashes. + if ports is None: + return None, None + + n1, p1, n2, p2 = ports + + assert n1 == node1 + assert n2 == node2 + + return p1, p2 + + """ + functions for vlan reservation. + + Operations are: + + - obtain the available vlan lists + + - find the vlan continuity on a path if possible. + + - find the vlan translation on the multi-domain path if + continuity not possible + + - reserve the vlan on all the ports on the path + + - unreserve the vlan when the path is removed + """ + + def _reserve_vlan_breakdown( + self, + domain_breakdown: dict, + request_id: str, + ingress_user_port=None, + egress_user_port=None, + ) -> Optional[VlanTaggedBreakdowns]: + """ + Upate domain breakdown with VLAN reservation information. + + This is the top-level function, to be called after + _generate_connection_breakdown_tm(), and should be a private + implementation detail. It should be always called, meaning, + the VLAN tags should be present in the final breakdown, + regardless of whether the connection request explicitly asked + for it or not. + + For this to work, TEManager should maintain a table of VLAN + allocation from each of the domains. The ones that are not in + use can be reserved, and the ones that are not in use anymore + should be returned to the pool by calling unreserve(). + + :param domain_breakdown: per port available vlan range is + pased in datamodel._parse_available_vlans(self, vlan_str) + + :return: Updated domain_breakdown with the VLAN assigned to + each port along a path, or None if failure. + """ + + # # Check if there exist a path of vlan continuity. This is + # # disabled for now, until the simple case is handled. + # selected_vlan = self.find_vlan_on_path(domain_breakdown) + # if selected_vlan is not None: + # return self._reserve_vlan_on_path(domain_breakdown, selected_vlan) + + # if not, assuming vlan translation on the domain border port + + self._logger.info( + f"reserve_vlan_breakdown: domain_breakdown: {domain_breakdown}" + ) + + breakdowns = {} + + # upstream_o_vlan = "" + for domain, segment in domain_breakdown.items(): + # These are topology ports + ingress_port = segment.get("ingress_port") + egress_port = segment.get("egress_port") + + self._logger.debug( + f"VLAN reservation: domain: {domain}, " + f"ingress_port: {ingress_port}, egress_port: {egress_port}" + ) + + if ingress_port is None or egress_port is None: + return None + + ingress_user_port_tag = None + egress_user_port_tag = None + if ( + ingress_user_port is not None + and ingress_port["id"] == ingress_user_port["id"] + ): + ingress_user_port_tag = ingress_user_port.get("vlan_range") + if ( + egress_user_port is not None + and egress_port["id"] == egress_user_port["id"] + ): + egress_user_port_tag = egress_user_port.get("vlan_range") + + ingress_vlan = self._reserve_vlan( + domain, ingress_port, request_id, ingress_user_port_tag + ) + egress_vlan = self._reserve_vlan( + domain, egress_port, request_id, egress_user_port_tag + ) + + ingress_port_id = ingress_port["id"] + egress_port_id = egress_port["id"] + + # TODO: what to do when a port is not in the port map which only has all the ports on links? + # User facing ports need clarification from the custermers. + # For now, we are assuming that the user facing port either (1) provides the vlan + # or (2) uses the OXP vlan if (2.1) not provided or provided (2.2) is not in the vlan range in the topology port. + # And we do't allow user specified vlan on a OXP port. + if ( + ingress_port_id not in self.topology_manager.get_port_map() + and ingress_vlan is None + ): + self._logger.warning( + f"Port {ingress_port_id} not found in port map, it's a user port, by default uses the OXP vlan" + ) + ingress_vlan = egress_vlan + + if ( + egress_port_id not in self.topology_manager.get_port_map() + and egress_vlan is None + ): + self._logger.warning( + f"Port {egress_port_id} not found in port map, it's a user port, by default uses the OXP vlan" + ) + egress_vlan = ingress_vlan + + self._logger.info( + f"VLAN reservation: domain: {domain}, " + f"ingress_vlan: {ingress_vlan}, egress_vlan: {egress_vlan}" + ) + + # if one has empty vlan range, first resume reserved vlans + # in the previous domain, then return false. + if ingress_vlan is None: + self._unreserve_vlan(domain, ingress_port, ingress_vlan) + return None + + if egress_vlan is None: + self._unreserve_vlan(domain, egress_port, egress_vlan) + return None + + # # vlan translation from upstream_o_vlan to i_vlan + # segment["ingress_upstream_vlan"] = upstream_o_vlan + # segment["ingress_vlan"] = ingress_vlan + # segment["egress_vlan"] = egress_vlan + # upstream_o_vlan = egress_vlan + + port_a = VlanTaggedPort( + VlanTag(value=ingress_vlan, tag_type=1), port_id=ingress_port_id + ) + port_z = VlanTaggedPort( + VlanTag(value=egress_vlan, tag_type=1), port_id=egress_port_id + ) + + # Names look like "AMLIGHT_vlan_201_202_Ampath_Tenet". We + # can form the initial part, but where did the + # `Ampath_Tenet` at the end come from? + domain_name = domain.split(":")[-1].split(".")[0].upper() + name = f"{domain_name}_vlan_{ingress_vlan}_{egress_vlan}" + + breakdowns[domain] = VlanTaggedBreakdown( + name=name, + dynamic_backup_path=True, + uni_a=port_a, + uni_z=port_z, + ) + + return VlanTaggedBreakdowns(breakdowns=breakdowns) + + def _find_vlan_on_path(self, path): + """ + Find an unused available VLAN on path. + + Finds a VLAN that's not being used at the moment on a provided + path. Returns an available VLAN if possible, None if none are + available on the submitted path. + + output: vlan_tag string or None + """ + + # TODO: implement this + # https://github.com/atlanticwave-sdx/pce/issues/126 + + assert False, "Not implemented" + + def _reserve_vlan_on_path(self, domain_breakdown, selected_vlan): + # TODO: what is the difference between reserve_vlan and + # reserve_vlan_on_path? + + # TODO: implement this + # https://github.com/atlanticwave-sdx/pce/issues/126 + + # return domain_breakdown + assert False, "Not implemented" + + def _reserve_vlan(self, domain: str, port: dict, request_id: str, tag=None): + # with self._topology_lock: + # pass + + port_id = port["id"] + self._logger.debug(f"reserve_vlan domain: {domain} port_id: {port_id}") + + if port_id is None: + return None + + # Look up available VLAN tags by domain and port ID. + # self._logger.debug(f"vlan tags table: {self._vlan_tags_table}") + domain_table = self._vlan_tags_table.get(domain) + # self._logger.debug(f"domain vlan table: {domain} domain_table: {domain_table}") + + if domain_table is None: + self._logger.warning(f"reserve_vlan domain: {domain} entry: {domain_table}") + return None + + vlan_table = domain_table.get(port_id) + + self._logger.debug(f"reserve_vlan domain: {domain} vlan_table: {vlan_table}") + + # TODO: figure out when vlan_table can be None + if vlan_table is None: + self._logger.warning( + f"Can't find a mapping for domain:{domain} port:{port_id}" + ) + return None + + available_tag = None + + if tag is None: + # Find the first available VLAN tag from the table. + for vlan_tag, vlan_usage in vlan_table.items(): + if vlan_usage is UNUSED_VLAN: + available_tag = vlan_tag + else: + if vlan_table[tag] is UNUSED_VLAN: + available_tag = tag + else: + return None + + # mark the tag as in-use. + vlan_table[available_tag] = request_id + + self._logger.debug( + f"reserve_vlan domain {domain}, after reservation: " + f"vlan_table: {vlan_table}, available_tag: {available_tag}" + ) + + return available_tag + + def unreserve_vlan(self, request_id: str): + """ + Return previously reserved VLANs back to the pool. + """ + for domain, port_table in self._vlan_tags_table.items(): + for port, vlan_table in port_table.items(): + for vlan, assignment in vlan_table.items(): + if assignment == request_id: + vlan_table[vlan] = UNUSED_VLAN + + # to be called by delete_connection() + def _unreserve_vlan_breakdown(self, break_down): + # TODO: implement this. + # https://github.com/atlanticwave-sdx/pce/issues/127 + # with self._topology_lock: + # pass + assert False, "Not implemented" + + def _unreserve_vlan(self, domain: str, port: dict, tag=None): + """ + Mark a VLAN tag as not in use. + """ + # TODO: implement this. + # https://github.com/atlanticwave-sdx/pce/issues/127 + + # with self._topology_lock: + # pass + assert False, "Not implemented" + + def _print_vlan_tags_table(self): + import pprint + + self._logger.info("------ VLAN TAGS TABLE -------") + self._logger.info(pprint.pformat(self._vlan_tags_table)) + self._logger.info("------------------------------") diff --git a/src/sdx/pce/topology/__init__.py b/src/sdx_pce/utils/__init__.py similarity index 100% rename from src/sdx/pce/topology/__init__.py rename to src/sdx_pce/utils/__init__.py diff --git a/src/sdx/pce/utils/constants.py b/src/sdx_pce/utils/constants.py similarity index 94% rename from src/sdx/pce/utils/constants.py rename to src/sdx_pce/utils/constants.py index c7add914..206967e7 100644 --- a/src/sdx/pce/utils/constants.py +++ b/src/sdx_pce/utils/constants.py @@ -25,3 +25,5 @@ class Constants: MAX_L_LAT = 25 ALPHA = 10 ^ 6 + + DEFAULT_OWNER = "AmLight" diff --git a/src/sdx_pce/utils/exceptions.py b/src/sdx_pce/utils/exceptions.py new file mode 100644 index 00000000..c08a3c24 --- /dev/null +++ b/src/sdx_pce/utils/exceptions.py @@ -0,0 +1,7 @@ +class ValidationError(Exception): + """ + A custom exception class to indicate errors. + """ + + def __init__(self, message): + super().__init__(message) diff --git a/src/sdx/pce/utils/functions.py b/src/sdx_pce/utils/functions.py similarity index 97% rename from src/sdx/pce/utils/functions.py rename to src/sdx_pce/utils/functions.py index 7b04a816..aac39ca3 100644 --- a/src/sdx/pce/utils/functions.py +++ b/src/sdx_pce/utils/functions.py @@ -10,7 +10,7 @@ from networkx.algorithms import approximation as approx -from sdx.pce.utils.constants import Constants +from sdx_pce.utils.constants import Constants class GraphFunction: @@ -112,7 +112,7 @@ def dijnew(graph, start_node, end_node): current_node = predecessor[current_node] except KeyError: print("Path not reachable") - break + return [] path.insert(0, start_node) if ( shortest_distance[end_node] != infinity @@ -167,7 +167,7 @@ def backup_path(graph, start_node, end_node): continue print("The back up path: ") - dijnew(graph_original, backupstart_node, backupend_node) + return dijnew(graph_original, backupstart_node, backupend_node) def create_unvisited_list(link_list): diff --git a/src/sdx/pce/utils/graphviz.py b/src/sdx_pce/utils/graphviz.py similarity index 97% rename from src/sdx/pce/utils/graphviz.py rename to src/sdx_pce/utils/graphviz.py index 49be4e53..517c957a 100644 --- a/src/sdx/pce/utils/graphviz.py +++ b/src/sdx_pce/utils/graphviz.py @@ -17,7 +17,6 @@ graphviz C library, so installing the latter is a little more work. """ -import json import re from pathlib import Path from typing import Union @@ -25,7 +24,7 @@ import networkx as nx from networkx.algorithms import approximation as approx -from sdx.pce.utils.constants import Constants +from sdx_pce.utils.constants import Constants __all__ = ["can_read_dot_file", "read_dot_file"] diff --git a/src/sdx/pce/utils/random_connection_generator.py b/src/sdx_pce/utils/random_connection_generator.py similarity index 87% rename from src/sdx/pce/utils/random_connection_generator.py rename to src/sdx_pce/utils/random_connection_generator.py index 15aedb80..3b89d123 100644 --- a/src/sdx/pce/utils/random_connection_generator.py +++ b/src/sdx_pce/utils/random_connection_generator.py @@ -1,8 +1,6 @@ -import json - import numpy as np -from sdx.pce.models import ConnectionRequest, TrafficMatrix +from sdx_pce.models import ConnectionRequest, TrafficMatrix class RandomConnectionGenerator: @@ -24,7 +22,7 @@ def generate(self, querynum, l_bw, u_bw, l_lat, u_lat, seed=2022) -> TrafficMatr format (source, destination, bandwidth, latency). """ np.random.seed(seed) - traffic_matrix = TrafficMatrix(connection_requests=[]) + connection_requests = [] bw = self.lognormal((l_bw + u_bw) / 2.0, 1, querynum) if querynum <= self.num_nodes: @@ -43,7 +41,7 @@ def generate(self, querynum, l_bw, u_bw, l_lat, u_lat, seed=2022) -> TrafficMatr required_latency=required_latency, ) - traffic_matrix.connection_requests.append(request) + connection_requests.append(request) else: for i in range(querynum): source = np.random.randint(0, self.num_nodes) @@ -61,9 +59,13 @@ def generate(self, querynum, l_bw, u_bw, l_lat, u_lat, seed=2022) -> TrafficMatr required_latency=required_latency, ) - traffic_matrix.connection_requests.append(request) + connection_requests.append(request) + + request_id = f"request-{np.random.randint(1, 100)}" - return traffic_matrix + return TrafficMatrix( + connection_requests=connection_requests, request_id=request_id + ) def lognormal(self, mu, sigma, size): normal_std = 0.5 diff --git a/src/sdx/pce/utils/random_topology_generator.py b/src/sdx_pce/utils/random_topology_generator.py similarity index 97% rename from src/sdx/pce/utils/random_topology_generator.py rename to src/sdx_pce/utils/random_topology_generator.py index d4bb1eb9..46226e64 100644 --- a/src/sdx/pce/utils/random_topology_generator.py +++ b/src/sdx_pce/utils/random_topology_generator.py @@ -12,8 +12,8 @@ from networkx.algorithms import approximation as approx from networkx.generators.random_graphs import erdos_renyi_graph -from sdx.pce.utils.constants import Constants -from sdx.pce.utils.functions import GraphFunction +from sdx_pce.utils.constants import Constants +from sdx_pce.utils.functions import GraphFunction class RandomTopologyGenerator: diff --git a/tests/__init__.py b/tests/__init__.py index 0ce93dcd..ccf66241 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -4,31 +4,42 @@ try: # Use stdlib modules with Python > 3.8. from importlib.resources import files -except: +except ImportError: # Use compatibility library with Python 3.8. from importlib_resources import files class TestData: - # Some data files are in src/sdx/pce/data. - PACKAGE_DATA_DIR = files("sdx.pce") / "data" - - TOPOLOGY_FILE_ZAOXI = PACKAGE_DATA_DIR / "topologies" / "zaoxi.json" - TOPOLOGY_FILE_SAX = PACKAGE_DATA_DIR / "topologies" / "sax.json" - TOPOLOGY_FILE_AMLIGHT = PACKAGE_DATA_DIR / "topologies" / "amlight.json" - CONNECTION_REQ = PACKAGE_DATA_DIR / "requests" / "test_request.json" + # Some data files are in src/sdx_datamodel/data. + TOPOLOGY_DIR = files("sdx_datamodel") / "data" / "topologies" + TOPOLOGY_FILE_ZAOXI = TOPOLOGY_DIR / "zaoxi.json" + TOPOLOGY_FILE_SAX = TOPOLOGY_DIR / "sax.json" + TOPOLOGY_FILE_AMLIGHT = TOPOLOGY_DIR / "amlight.json" + TOPOLOGY_FILE_SDX = TOPOLOGY_DIR / "sdx.json" + TOPOLOGY_FILE_AMLIGHT_USER_PORT = TOPOLOGY_DIR / "amlight_user_port.json" + + REQUESTS_DIR = files("sdx_datamodel") / "data" / "requests" + CONNECTION_REQ = REQUESTS_DIR / "test_request.json" + CONNECTION_REQ_AMLIGHT_ZAOXI_USER_PORT_v2 = ( + REQUESTS_DIR / "test_request-amlight_zaoxi-p2p-v2.json" + ) # Write test output files in OS temporary directory. TEST_OUTPUT_DIR = pathlib.Path(tempfile.gettempdir()) - TEST_OUTPUT_IMG_AMLIGHT = TEST_OUTPUT_DIR / "sdx-pce-amlight.png" - TEST_OUTPUT_IMG_SAX = TEST_OUTPUT_DIR / "sdx-pce-sax.png" - TEST_OUTPUT_IMG_ZAOXI = TEST_OUTPUT_DIR / "sdx-pce-zaoxi.png" + TEST_OUTPUT_IMG_AMLIGHT = TEST_OUTPUT_DIR / "sdx_pce-amlight.png" + TEST_OUTPUT_IMG_SAX = TEST_OUTPUT_DIR / "sdx_pce-sax.png" + TEST_OUTPUT_IMG_ZAOXI = TEST_OUTPUT_DIR / "sdx_pce-zaoxi.png" # Other test data files. TEST_DATA_DIR = pathlib.Path(__file__).parent / "data" - TOPOLOGY_FILE_SDX = TEST_DATA_DIR / "sdx.json" CONNECTION_REQ_AMLIGHT = TEST_DATA_DIR / "test_request_amlight.json" + CONNECTION_REQ_AMLIGHT_USER_PORT = ( + TEST_DATA_DIR / "test_request_amlight_user_port.json" + ) + CONNECTION_REQ_AMLIGHT_ZAOXI_USER_PORT = ( + TEST_DATA_DIR / "test_request_amlight_zaoxi_user_port.json" + ) TOPOLOGY_FILE_SAX_2 = TEST_DATA_DIR / "sax-2.json" CONNECTION_REQ_FILE_SAX_2_INVALID = TEST_DATA_DIR / "sax-2-request-invalid.json" diff --git a/tests/data/sax-2-request-invalid.json b/tests/data/sax-2-request-invalid.json index 253915cb..07f818b6 100644 --- a/tests/data/sax-2-request-invalid.json +++ b/tests/data/sax-2-request-invalid.json @@ -22,6 +22,6 @@ "quantity": 0, "start_time": "2000-01-23T04:56:07+00:00", "status": "success", - "time_stamp": "2000-01-23T04:56:07+00:00", + "timestamp": "2000-01-23T04:56:07+00:00", "version": 1 } diff --git a/tests/data/sax-2-request-valid.json b/tests/data/sax-2-request-valid.json index e161316f..2126c29d 100644 --- a/tests/data/sax-2-request-valid.json +++ b/tests/data/sax-2-request-valid.json @@ -1,9 +1,9 @@ { "complete": false, "egress_port": { - "id": "urn:ogf:network:sdx:port:sax:B1:1", + "id": "urn:sdx:port:sax:B1:1", "name": "Novi01:1", - "node": "urn:ogf:network:sdx:node:sax:B1", + "node": "urn:sdx:node:sax:B1", "short_name": "10", "state": "enabled", "status": "up" @@ -11,9 +11,9 @@ "end_time": "2000-01-23T04:56:07+00:00", "id": "id", "ingress_port": { - "id": "urn:ogf:network:sdx:port:sax:B2:1", + "id": "urn:sdx:port:sax:B2:1", "name": "Novi02:1", - "node": "urn:ogf:network:sdx:node:sax:B2", + "node": "urn:sdx:node:sax:B2", "short_name": "9", "state": "enabled", "status": "up" @@ -22,6 +22,6 @@ "quantity": 0, "start_time": "2000-01-23T04:56:07+00:00", "status": "success", - "time_stamp": "2000-01-23T04:56:07+00:00", + "timestamp": "2000-01-23T04:56:07+00:00", "version": 1 } diff --git a/tests/data/sax-2.json b/tests/data/sax-2.json index c0539212..0258df86 100644 --- a/tests/data/sax-2.json +++ b/tests/data/sax-2.json @@ -1,5 +1,5 @@ { - "id": "urn:ogf:network:sdx", + "id": "urn:sdx", "name": "SAX-OXP", "domain_service": { "monitoring_capability": null, @@ -10,20 +10,21 @@ "vendor": null }, "version": "0.0", - "time_stamp": "2023-05-04T21:01:51.954870", + "timestamp": "2023-05-04T21:01:51.954870", "nodes": [ { - "id": "urn:ogf:network:sdx:node:sax:B1", + "id": "urn:sdx:node:sax:B1", "name": "sax:Novi01", "short_name": "B1", "location": { "address": "SaoPaulo", "latitude": -46.650271781410524, - "longitude": -23.5311561958366 + "longitude": -23.5311561958366, + "iso3166_2_lvl4": "BR-SP" }, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:B1:1", + "id": "urn:sdx:port:sax:B1:1", "name": "Novi01:1", "short_name": null, "node": "B1:1", @@ -36,7 +37,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:sax:B1:2", + "id": "urn:sdx:port:sax:B1:2", "name": "Novi01:2", "short_name": null, "node": "B1:2", @@ -49,7 +50,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:sax:B1:3", + "id": "urn:sdx:port:sax:B1:3", "name": "Novi01:3", "short_name": null, "node": "B1:3", @@ -62,7 +63,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:sax:B1:4", + "id": "urn:sdx:port:sax:B1:4", "name": "Novi01:3", "short_name": null, "node": "B1:4", @@ -78,17 +79,18 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:node:sax:B2", + "id": "urn:sdx:node:sax:B2", "name": "sax:Novi02", "short_name": "B2", "location": { "address": "PanamaCity", "latitude": -79.4947050137491, - "longitude": 8.993040465928525 + "longitude": 8.993040465928525, + "iso3166_2_lvl4": "US-PN" }, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:B2:1", + "id": "urn:sdx:port:sax:B2:1", "name": "Novi02:1", "short_name": null, "node": "B2:1", @@ -101,7 +103,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:sax:B2:2", + "id": "urn:sdx:port:sax:B2:2", "name": "Novi02:2", "short_name": null, "node": "B2:2", @@ -114,7 +116,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:sax:B2:3", + "id": "urn:sdx:port:sax:B2:3", "name": "Novi02:3", "short_name": null, "node": "B2:3", @@ -127,7 +129,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:sax:B2:4", + "id": "urn:sdx:port:sax:B2:4", "name": "Novi02:4", "short_name": null, "node": "B2:3", @@ -143,17 +145,18 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:node:sax:B3", + "id": "urn:sdx:node:sax:B3", "name": "sax:Novi03", "short_name": "B3", "location": { "address": "Fortaleza", "latitude": -38.52443289673026, - "longitude": -3.73163824920348 + "longitude": -3.73163824920348, + "iso3166_2_lvl4": "BR-FR" }, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:B3:1", + "id": "urn:sdx:port:sax:B3:1", "name": "Novi02:3", "short_name": null, "node": "B3:1", @@ -166,7 +169,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:sax:B3:2", + "id": "urn:sdx:port:sax:B3:2", "name": "Novi02:3", "short_name": null, "node": "B3:2", @@ -179,7 +182,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:sax:B3:3", + "id": "urn:sdx:port:sax:B3:3", "name": "Novi03:3", "short_name": null, "node": "B3:3", @@ -195,17 +198,18 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:node:sax:A1", + "id": "urn:sdx:node:sax:A1", "name": "sax:Novi100", "short_name": "A1", "location": { "address": "Santiago", "latitude": -70.64634765264213, - "longitude": -33.4507049233331 + "longitude": -33.4507049233331, + "iso3166_2_lvl4": "CL-SN" }, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:A1:1", + "id": "urn:sdx:port:sax:A1:1", "name": "Novi100:1", "short_name": null, "node": "A1:1", @@ -218,7 +222,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:sax:A1:2", + "id": "urn:sdx:port:sax:A1:2", "name": "Novi100:2", "short_name": null, "node": "A1:2", @@ -236,15 +240,15 @@ ], "links": [ { - "id": "urn:ogf:network:sdx:link:sax:B1-B2", + "id": "urn:sdx:link:sax:B1-B2", "name": "sax:B1-B2", "short_name": "SaoPaulo-Fortaleza", "nni": null, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:B1:2", + "id": "urn:sdx:port:sax:B1:2", "name": "Novi01:2", - "node": "urn:ogf:network:sdx:node:sax:B1", + "node": "urn:sdx:node:sax:B1", "short_name": "B1:2", "label_range": [ "100-200", @@ -253,13 +257,13 @@ "status": "up" }, { - "id": "urn:ogf:network:sdx:port:sax:B2:2", + "id": "urn:sdx:port:sax:B2:2", "label_range": [ "100-200", "10001" ], "name": "Novi02:2", - "node": "urn:ogf:network:sdx:node:sax:B2", + "node": "urn:sdx:node:sax:B2", "short_name": "B2:2", "status": "up" } @@ -272,34 +276,34 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:sax:Panama-Fortaleza", + "id": "urn:sdx:link:sax:Panama-Fortaleza", "name": "sax:Panama-Fortaleza", "short_name": "Panama-Fortaleza", "nni": null, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:B3:2", + "id": "urn:sdx:port:sax:B3:2", "label_range": [ "100-200", "10001" ], "name": "Novi02:3", - "node": "urn:ogf:network:sdx:node:sax:B3", + "node": "urn:sdx:node:sax:B3", "short_name": "B3:2", "status": "up" }, { - "id": "urn:ogf:network:sdx:port:sax:B2:4", + "id": "urn:sdx:port:sax:B2:4", "label_range": [ "100-200", "10001" ], "name": "Novi02:4", - "node": "urn:ogf:network:sdx:node:sax:B2", + "node": "urn:sdx:node:sax:B2", "short_name": "B2:3", "status": "up" } @@ -312,30 +316,30 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:sax:SanPaolo-Fortaleza", + "id": "urn:sdx:link:sax:SanPaolo-Fortaleza", "name": "nni:SanPaolo-Fortaleza", "short_name": "BocaRaton-Fortaleza", "nni": null, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:B3:3", + "id": "urn:sdx:port:sax:B3:3", "label_range": [ "100-200", "10001" ], "name": "Novi03:3", - "node": "urn:ogf:network:sdx:node:sax:B3", + "node": "urn:sdx:node:sax:B3", "short_name": "B3:3", "status": "up" }, { - "id": "urn:ogf:network:sdx:port:sax:B1:4", + "id": "urn:sdx:port:sax:B1:4", "name": "Novi01:3", - "node": "urn:ogf:network:sdx:node:sax:B1", + "node": "urn:sdx:node:sax:B1", "short_name": "B1:4", "label_range": [ "100-200", @@ -352,30 +356,30 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:sax:A1-B1", + "id": "urn:sdx:link:sax:A1-B1", "name": "sax:A1-B1", "short_name": "redclara-SaoPaulo", "nni": null, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:A1:1", + "id": "urn:sdx:port:sax:A1:1", "label_range": [ "100-200", "1000" ], "name": "Novi100:1", - "node": "urn:ogf:network:sdx:node:sax:A1", + "node": "urn:sdx:node:sax:A1", "short_name": "A1:1", "status": "up" }, { - "id": "urn:ogf:network:sdx:port:sax:B1:3", + "id": "urn:sdx:port:sax:B1:3", "name": "Novi01:3", - "node": "urn:ogf:network:sdx:node:sax:B1", + "node": "urn:sdx:node:sax:B1", "short_name": "B1:3", "label_range": [ "100-200", @@ -392,34 +396,34 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:sax:A1-B2", + "id": "urn:sdx:link:sax:A1-B2", "name": "sax:A1-B2", "short_name": "redclara-Fortaleza", "nni": null, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:A1:2", + "id": "urn:sdx:port:sax:A1:2", "label_range": [ "100-200", "1000" ], "name": "Novi100:2", - "node": "urn:ogf:network:sdx:node:sax:A1", + "node": "urn:sdx:node:sax:A1", "short_name": "A1:2", "status": "up" }, { - "id": "urn:ogf:network:sdx:port:sax:B2:3", + "id": "urn:sdx:port:sax:B2:3", "label_range": [ "100-200", "10001" ], "name": "Novi02:3", - "node": "urn:ogf:network:sdx:node:sax:B2", + "node": "urn:sdx:node:sax:B2", "short_name": "B2:3", "status": "up" } @@ -432,11 +436,11 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:nni:Miami-Sanpaolo", + "id": "urn:sdx:link:nni:Miami-Sanpaolo", "name": "nni:Miami-Sanpaolo", "short_name": "Miami-Sanpaolo", "nni": null, @@ -453,9 +457,9 @@ "status": "up" }, { - "id": "urn:ogf:network:sdx:port:sax:B1:1", + "id": "urn:sdx:port:sax:B1:1", "name": "Novi01:1", - "node": "urn:ogf:network:sdx:node:sax:B1", + "node": "urn:sdx:node:sax:B1", "short_name": "B1:1", "label_range": [ "100-200", @@ -472,11 +476,11 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:nni:BocaRaton-Fortaleza", + "id": "urn:sdx:link:nni:BocaRaton-Fortaleza", "name": "nni:BocaRaton-Fortaleza", "short_name": "BocaRaton-Fortaleza", "nni": true, @@ -493,13 +497,13 @@ "status": "up" }, { - "id": "urn:ogf:network:sdx:port:sax:B2:1", + "id": "urn:sdx:port:sax:B2:1", "label_range": [ "100-200", "1000" ], "name": "Novi02:1", - "node": "urn:ogf:network:sdx:node:sax:B2", + "node": "urn:sdx:node:sax:B2", "short_name": "B2:1", "status": "up" } @@ -512,30 +516,30 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:nni:Fortaleza-Sangano", + "id": "urn:sdx:link:nni:Fortaleza-Sangano", "name": "nni:Fortaleza-Sangano", "short_name": "Fortaleza-Sangano", "nni": true, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:B3:1", + "id": "urn:sdx:port:sax:B3:1", "label_range": [ "100-200", "1000" ], "name": "Novi02:3", - "node": "urn:ogf:network:sdx:node:sax:B3", + "node": "urn:sdx:node:sax:B3", "short_name": "B3:1", "status": "up" }, { - "id": "urn:ogf:network:sdx:port:zaoxi:B1:1", + "id": "urn:sdx:port:zaoxi:B1:1", "name": "Novi01:1", - "node": "urn:ogf:network:sdx:node:zaoxi:B1", + "node": "urn:sdx:node:zaoxi:B1", "short_name": "B1:1", "label_range": [ "100-200", @@ -552,7 +556,7 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null } ], diff --git a/tests/data/sdx.json b/tests/data/sdx.json index c791d753..5bbdedd0 100644 --- a/tests/data/sdx.json +++ b/tests/data/sdx.json @@ -1,5 +1,5 @@ { - "id": "urn:ogf:network:sdx", + "id": "urn:sdx", "name": "AmLight-OXP", "domain_service": { "monitoring_capability": null, @@ -10,7 +10,7 @@ "vendor": null }, "version": "2.1", - "time_stamp": "1970-01-01T00:00:00.000000", + "timestamp": "1970-01-01T00:00:00.000000", "nodes": [ { "id": "urn:sdx:node:amlight.net:B1", @@ -19,7 +19,8 @@ "location": { "address": "Miami", "latitude": -80.37676058477908, - "longitude": 25.75633040531146 + "longitude": 25.75633040531146, + "iso3166_2_lvl4": "US-MIA" }, "ports": [ { @@ -71,7 +72,8 @@ "location": { "address": "BocaRaton", "latitude": -80.10225977485742, - "longitude": 26.381437356374075 + "longitude": 26.381437356374075, + "iso3166_2_lvl4": "US-BC" }, "ports": [ { @@ -123,7 +125,8 @@ "location": { "address": "redclara", "latitude": -81.66666016473143, - "longitude": 30.34943181039702 + "longitude": 30.34943181039702, + "iso3166_2_lvl4": "US-RC" }, "ports": [ { @@ -156,17 +159,18 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:node:sax:B1", + "id": "urn:sdx:node:sax:B1", "name": "sax:Novi01", "short_name": "B1", "location": { "address": "SaoPaulo", "latitude": -46.650271781410524, - "longitude": -23.5311561958366 + "longitude": -23.5311561958366, + "iso3166_2_lvl4": "BR-SP" }, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:B1:1", + "id": "urn:sdx:port:sax:B1:1", "name": "Novi01:1", "short_name": null, "node": "B1:1", @@ -179,7 +183,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:sax:B1:2", + "id": "urn:sdx:port:sax:B1:2", "name": "Novi01:2", "short_name": null, "node": "B1:2", @@ -192,7 +196,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:sax:B1:3", + "id": "urn:sdx:port:sax:B1:3", "name": "Novi01:3", "short_name": null, "node": "B1:3", @@ -205,7 +209,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:sax:B1:4", + "id": "urn:sdx:port:sax:B1:4", "name": "Novi01:3", "short_name": null, "node": "B1:4", @@ -221,17 +225,18 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:node:sax:B2", + "id": "urn:sdx:node:sax:B2", "name": "sax:Novi02", "short_name": "B2", "location": { "address": "PanamaCity", "latitude": -79.4947050137491, - "longitude": 8.993040465928525 + "longitude": 8.993040465928525, + "iso3166_2_lvl4": "US-PN" }, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:B2:1", + "id": "urn:sdx:port:sax:B2:1", "name": "Novi02:1", "short_name": null, "node": "B2:1", @@ -244,7 +249,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:sax:B2:2", + "id": "urn:sdx:port:sax:B2:2", "name": "Novi02:2", "short_name": null, "node": "B2:2", @@ -257,7 +262,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:sax:B2:3", + "id": "urn:sdx:port:sax:B2:3", "name": "Novi02:3", "short_name": null, "node": "B2:3", @@ -270,7 +275,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:sax:B2:4", + "id": "urn:sdx:port:sax:B2:4", "name": "Novi02:4", "short_name": null, "node": "B2:3", @@ -286,17 +291,18 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:node:sax:B3", + "id": "urn:sdx:node:sax:B3", "name": "sax:Novi03", "short_name": "B3", "location": { "address": "Fortaleza", "latitude": -38.52443289673026, - "longitude": -3.73163824920348 + "longitude": -3.73163824920348, + "iso3166_2_lvl4": "BR-FR" }, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:B3:1", + "id": "urn:sdx:port:sax:B3:1", "name": "Novi02:3", "short_name": null, "node": "B3:1", @@ -309,7 +315,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:sax:B3:2", + "id": "urn:sdx:port:sax:B3:2", "name": "Novi02:3", "short_name": null, "node": "B3:2", @@ -322,7 +328,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:sax:B3:3", + "id": "urn:sdx:port:sax:B3:3", "name": "Novi03:3", "short_name": null, "node": "B3:3", @@ -338,17 +344,18 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:node:sax:A1", + "id": "urn:sdx:node:sax:A1", "name": "sax:Novi100", "short_name": "A1", "location": { "address": "Santiago", "latitude": -70.64634765264213, - "longitude": -33.4507049233331 + "longitude": -33.4507049233331, + "iso3166_2_lvl4": "CL-SN" }, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:A1:1", + "id": "urn:sdx:port:sax:A1:1", "name": "Novi100:1", "short_name": null, "node": "A1:1", @@ -361,7 +368,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:sax:A1:2", + "id": "urn:sdx:port:sax:A1:2", "name": "Novi100:2", "short_name": null, "node": "A1:2", @@ -377,17 +384,18 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:node:zaoxi:B1", + "id": "urn:sdx:node:zaoxi:B1", "name": "zaoxi:Novi01", "short_name": "B1", "location": { "address": "Sangano", "latitude": 13.216709879405311, - "longitude": -9.533459658700743 + "longitude": -9.533459658700743, + "iso3166_2_lvl4": "IT-SN" }, "ports": [ { - "id": "urn:ogf:network:sdx:port:zaoxi:B1:1", + "id": "urn:sdx:port:zaoxi:B1:1", "name": "Novi01:1", "short_name": null, "node": "B1:1", @@ -400,7 +408,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:zaoxi:B1:2", + "id": "urn:sdx:port:zaoxi:B1:2", "name": "Novi01:2", "short_name": null, "node": "B1:2", @@ -413,7 +421,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:zaoxi:B1:3", + "id": "urn:sdx:port:zaoxi:B1:3", "name": "Novi01:3", "short_name": null, "node": "B1:3", @@ -429,17 +437,18 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:node:zaoxi:B2", + "id": "urn:sdx:node:zaoxi:B2", "name": "zaoxi:Novi02", "short_name": "B2", "location": { "address": "CapeTown", "latitude": -38.52443289673026, - "longitude": -3.73163824920348 + "longitude": -3.73163824920348, + "iso3166_2_lvl4": "ZA-WC" }, "ports": [ { - "id": "urn:ogf:network:sdx:port:zaoxi:B2:1", + "id": "urn:sdx:port:zaoxi:B2:1", "name": "Novi02:1", "short_name": null, "node": "B2:1", @@ -452,7 +461,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:zaoxi:B2:2", + "id": "urn:sdx:port:zaoxi:B2:2", "name": "Novi02:2", "short_name": null, "node": "B2:2", @@ -465,7 +474,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:zaoxi:B2:3", + "id": "urn:sdx:port:zaoxi:B2:3", "name": "Novi02:3", "short_name": null, "node": "B2:3", @@ -481,17 +490,18 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:node:zaoxi:A1", + "id": "urn:sdx:node:zaoxi:A1", "name": "zaoxi:Novi100", "short_name": "A1", "location": { "address": "Karoo", "latitude": 22.541224555821298, - "longitude": -32.3632301851245 + "longitude": -32.3632301851245, + "iso3166_2_lvl4": "ZA-KR" }, "ports": [ { - "id": "urn:ogf:network:sdx:port:zaoxi:A1:1", + "id": "urn:sdx:port:zaoxi:A1:1", "name": "Novi100:1", "short_name": null, "node": "A1:1", @@ -504,7 +514,7 @@ "private_attributes": null }, { - "id": "urn:ogf:network:sdx:port:zaoxi:A1:2", + "id": "urn:sdx:port:zaoxi:A1:2", "name": "Novi100:2", "short_name": null, "node": "A1:2", @@ -522,7 +532,7 @@ ], "links": [ { - "id": "urn:ogf:network:sdx:link:amlight:B1-B2", + "id": "urn:sdx:link:amlight:B1-B2", "name": "amlight:B1-B2", "short_name": "Miami-BocaRaton", "nni": null, @@ -558,11 +568,11 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:amlight:A1-B1", + "id": "urn:sdx:link:amlight:A1-B1", "name": "amlight:A1-B1", "short_name": "redclara-miami", "nni": null, @@ -598,11 +608,11 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:amlight:A1-B2", + "id": "urn:sdx:link:amlight:A1-B2", "name": "amlight:A1-B2", "short_name": "redclara-BocaRaton", "nni": null, @@ -638,19 +648,19 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:sax:B1-B2", + "id": "urn:sdx:link:sax:B1-B2", "name": "sax:B1-B2", "short_name": "SaoPaulo-Fortaleza", "nni": null, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:B1:2", + "id": "urn:sdx:port:sax:B1:2", "name": "Novi01:2", - "node": "urn:ogf:network:sdx:node:sax:B1", + "node": "urn:sdx:node:sax:B1", "short_name": "B1:2", "label_range": [ "100-200", @@ -659,13 +669,13 @@ "status": "up" }, { - "id": "urn:ogf:network:sdx:port:sax:B2:2", + "id": "urn:sdx:port:sax:B2:2", "label_range": [ "100-200", "10001" ], "name": "Novi02:2", - "node": "urn:ogf:network:sdx:node:sax:B2", + "node": "urn:sdx:node:sax:B2", "short_name": "B2:2", "status": "up" } @@ -678,34 +688,34 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:sax:Panama-Fortaleza", + "id": "urn:sdx:link:sax:Panama-Fortaleza", "name": "sax:Panama-Fortaleza", "short_name": "Panama-Fortaleza", "nni": null, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:B3:2", + "id": "urn:sdx:port:sax:B3:2", "label_range": [ "100-200", "10001" ], "name": "Novi02:3", - "node": "urn:ogf:network:sdx:node:sax:B3", + "node": "urn:sdx:node:sax:B3", "short_name": "B3:2", "status": "up" }, { - "id": "urn:ogf:network:sdx:port:sax:B2:4", + "id": "urn:sdx:port:sax:B2:4", "label_range": [ "100-200", "10001" ], "name": "Novi02:4", - "node": "urn:ogf:network:sdx:node:sax:B2", + "node": "urn:sdx:node:sax:B2", "short_name": "B2:3", "status": "up" } @@ -718,30 +728,30 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:sax:SanPaolo-Fortaleza", + "id": "urn:sdx:link:sax:SanPaolo-Fortaleza", "name": "nni:SanPaolo-Fortaleza", "short_name": "BocaRaton-Fortaleza", "nni": null, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:B3:3", + "id": "urn:sdx:port:sax:B3:3", "label_range": [ "100-200", "10001" ], "name": "Novi03:3", - "node": "urn:ogf:network:sdx:node:sax:B3", + "node": "urn:sdx:node:sax:B3", "short_name": "B3:3", "status": "up" }, { - "id": "urn:ogf:network:sdx:port:sax:B1:4", + "id": "urn:sdx:port:sax:B1:4", "name": "Novi01:3", - "node": "urn:ogf:network:sdx:node:sax:B1", + "node": "urn:sdx:node:sax:B1", "short_name": "B1:4", "label_range": [ "100-200", @@ -758,30 +768,30 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:sax:A1-B1", + "id": "urn:sdx:link:sax:A1-B1", "name": "sax:A1-B1", "short_name": "redclara-SaoPaulo", "nni": null, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:A1:1", + "id": "urn:sdx:port:sax:A1:1", "label_range": [ "100-200", "1000" ], "name": "Novi100:1", - "node": "urn:ogf:network:sdx:node:sax:A1", + "node": "urn:sdx:node:sax:A1", "short_name": "A1:1", "status": "up" }, { - "id": "urn:ogf:network:sdx:port:sax:B1:3", + "id": "urn:sdx:port:sax:B1:3", "name": "Novi01:3", - "node": "urn:ogf:network:sdx:node:sax:B1", + "node": "urn:sdx:node:sax:B1", "short_name": "B1:3", "label_range": [ "100-200", @@ -798,34 +808,34 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:sax:A1-B2", + "id": "urn:sdx:link:sax:A1-B2", "name": "sax:A1-B2", "short_name": "redclara-Fortaleza", "nni": null, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:A1:2", + "id": "urn:sdx:port:sax:A1:2", "label_range": [ "100-200", "1000" ], "name": "Novi100:2", - "node": "urn:ogf:network:sdx:node:sax:A1", + "node": "urn:sdx:node:sax:A1", "short_name": "A1:2", "status": "up" }, { - "id": "urn:ogf:network:sdx:port:sax:B2:3", + "id": "urn:sdx:port:sax:B2:3", "label_range": [ "100-200", "10001" ], "name": "Novi02:3", - "node": "urn:ogf:network:sdx:node:sax:B2", + "node": "urn:sdx:node:sax:B2", "short_name": "B2:3", "status": "up" } @@ -838,11 +848,11 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:nni:Miami-Sanpaolo", + "id": "urn:sdx:link:nni:Miami-Sanpaolo", "name": "nni:Miami-Sanpaolo", "short_name": "Miami-Sanpaolo", "nni": true, @@ -859,9 +869,9 @@ "status": "up" }, { - "id": "urn:ogf:network:sdx:port:sax:B1:1", + "id": "urn:sdx:port:sax:B1:1", "name": "Novi01:1", - "node": "urn:ogf:network:sdx:node:sax:B1", + "node": "urn:sdx:node:sax:B1", "short_name": "B1:1", "label_range": [ "100-200", @@ -878,11 +888,11 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:nni:BocaRaton-Fortaleza", + "id": "urn:sdx:link:nni:BocaRaton-Fortaleza", "name": "nni:BocaRaton-Fortaleza", "short_name": "BocaRaton-Fortaleza", "nni": true, @@ -899,13 +909,13 @@ "status": "up" }, { - "id": "urn:ogf:network:sdx:port:sax:B2:1", + "id": "urn:sdx:port:sax:B2:1", "label_range": [ "100-200", "1000" ], "name": "Novi02:1", - "node": "urn:ogf:network:sdx:node:sax:B2", + "node": "urn:sdx:node:sax:B2", "short_name": "B2:1", "status": "up" } @@ -918,29 +928,29 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:zaoxi:B1-B2", + "id": "urn:sdx:link:zaoxi:B1-B2", "name": "zaoxi:B1-B2", "short_name": "Sangano-Capetown", "nni": null, "ports": [ { - "id": "urn:ogf:network:sdx:port:zaoxi:B1:2", + "id": "urn:sdx:port:zaoxi:B1:2", "name": "Novi01:2", - "node": "urn:ogf:network:sdx:node:zaoxi:B1", + "node": "urn:sdx:node:zaoxi:B1", "status": "up" }, { - "id": "urn:ogf:network:sdx:port:zaoxi:B2:2", + "id": "urn:sdx:port:zaoxi:B2:2", "label_range": [ "100-200", "10001" ], "name": "Novi02:2", - "node": "urn:ogf:network:sdx:node:zaoxi:B2", + "node": "urn:sdx:node:zaoxi:B2", "short_name": "B2:2", "status": "up" } @@ -953,30 +963,30 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:zaoxi:A1-B1", + "id": "urn:sdx:link:zaoxi:A1-B1", "name": "zaoxi:A1-B1", "short_name": "Karoo-Sangano", "nni": null, "ports": [ { - "id": "urn:ogf:network:sdx:port:zaoxi:A1:1", + "id": "urn:sdx:port:zaoxi:A1:1", "label_range": [ "100-200", "1000" ], "name": "Novi100:1", - "node": "urn:ogf:network:sdx:node:zaoxi:A1", + "node": "urn:sdx:node:zaoxi:A1", "short_name": "A1:1", "status": "up" }, { - "id": "urn:ogf:network:sdx:port:zaoxi:B1:3", + "id": "urn:sdx:port:zaoxi:B1:3", "name": "Novi01:3", - "node": "urn:ogf:network:sdx:node:zaoxi:B1", + "node": "urn:sdx:node:zaoxi:B1", "short_name": "B1:3", "label_range": [ "100-200", @@ -993,34 +1003,34 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:zaoxi:A1-B2", + "id": "urn:sdx:link:zaoxi:A1-B2", "name": "zaoxi:A1-B2", "short_name": "Karoo-Capetown", "nni": null, "ports": [ { - "id": "urn:ogf:network:sdx:port:zaoxi:A1:2", + "id": "urn:sdx:port:zaoxi:A1:2", "label_range": [ "100-200", "1000" ], "name": "Novi100:2", - "node": "urn:ogf:network:sdx:node:zaoxi:A1", + "node": "urn:sdx:node:zaoxi:A1", "short_name": "A1:2", "status": "up" }, { - "id": "urn:ogf:network:sdx:port:zaoxi:B2:3", + "id": "urn:sdx:port:zaoxi:B2:3", "label_range": [ "100-200", "10001" ], "name": "Novi02:3", - "node": "urn:ogf:network:sdx:node:zaoxi:B2", + "node": "urn:sdx:node:zaoxi:B2", "short_name": "B2:3", "status": "up" } @@ -1033,30 +1043,30 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null }, { - "id": "urn:ogf:network:sdx:link:nni:Fortaleza-Sangano", + "id": "urn:sdx:link:nni:Fortaleza-Sangano", "name": "nni:Fortaleza-Sangano", "short_name": "Fortaleza-Sangano", "nni": true, "ports": [ { - "id": "urn:ogf:network:sdx:port:sax:B3:1", + "id": "urn:sdx:port:sax:B3:1", "label_range": [ "100-200", "1000" ], "name": "Novi02:3", - "node": "urn:ogf:network:sdx:node:sax:B3", + "node": "urn:sdx:node:sax:B3", "short_name": "B3:1", "status": "up" }, { - "id": "urn:ogf:network:sdx:port:zaoxi:B1:1", + "id": "urn:sdx:port:zaoxi:B1:1", "name": "Novi01:1", - "node": "urn:ogf:network:sdx:node:zaoxi:B1", + "node": "urn:sdx:node:zaoxi:B1", "short_name": "B1:1", "label_range": [ "100-200", @@ -1073,7 +1083,7 @@ "status": null, "state": null, "private_attributes": null, - "time_stamp": null, + "timestamp": null, "measurement_period": null } ], diff --git a/tests/data/test_request_amlight_user_port.json b/tests/data/test_request_amlight_user_port.json new file mode 100644 index 00000000..d9bda895 --- /dev/null +++ b/tests/data/test_request_amlight_user_port.json @@ -0,0 +1,20 @@ +{ + "id": "id", + "name": "AMLight", + "start_time": "2000-01-23T04:56:07.000Z", + "end_time": "2000-01-23T04:56:07.000Z", + "bandwidth_required": 100, + "latency_required": 20, + "egress_port": + { + "id": "urn:sdx:port:amlight.net:A1:3", + "name": "Novi100:1", + "vlan_range": 150 + }, + "ingress_port": + { + "id": "urn:sdx:port:amlight.net:B1:2", + "name": "Novi100:2", + "vlan_range": 160 + } +} diff --git a/tests/data/test_request_amlight_zaoxi_user_port.json b/tests/data/test_request_amlight_zaoxi_user_port.json new file mode 100644 index 00000000..cdb7819c --- /dev/null +++ b/tests/data/test_request_amlight_zaoxi_user_port.json @@ -0,0 +1,22 @@ +{ + "id": "285eea4b-1e86-4d54-bd75-f14b8cb4a63a", + "name": "Test connection request", + "start_time": "2000-01-23T04:56:07.000Z", + "end_time": "2000-01-23T04:56:07.000Z", + "bandwidth_required": 10, + "latency_required": 300, + "egress_port": { + "id": "urn:sdx:port:amlight.net:A1:3", + "name": "Novi100:1", + "node": "urn:sdx:node:amlight.net:A1", + "status": "up", + "vlan_range": 150 + }, + "ingress_port": { + "id": "urn:sdx:port:zaoxi:B2:1", + "name": "Novi100:2", + "node": "urn:sdx:node:zaoxi:B2", + "status": "up", + "vlan_range": 160 + } +} diff --git a/tests/test_path_function.py b/tests/test_path_function.py new file mode 100644 index 00000000..a8e5578e --- /dev/null +++ b/tests/test_path_function.py @@ -0,0 +1,60 @@ +import unittest + +from sdx_pce.utils.functions import backup_path, dijnew + + +class TestDijnew(unittest.TestCase): + def test_shortest_path(self): + graph = { + "A": {"B": 2, "C": 4}, + "B": {"A": 2, "C": 1, "D": 4}, + "C": {"A": 4, "B": 1, "D": 3}, + "D": {"B": 4, "C": 3}, + } + start_node = "A" + end_node = "D" + expected_path = ["A", "B", "D"] + + result = dijnew(graph, start_node, end_node) + print(result) + + self.assertEqual(result, expected_path) + + def test_unreachable_path(self): + graph = { + "A": {"B": 2, "C": 4}, + "B": {"A": 2, "C": 1}, + "C": {"A": 4, "B": 1}, + "D": {"E": 4}, + "E": {"D": 4}, + } + start_node = "A" + end_node = "D" + expected_path = [] + + result = dijnew(graph, start_node, end_node) + + print(result) + + self.assertEqual(result, expected_path) + + def test_backup_path(self): + graph = { + "A": {"B": 2, "C": 4}, + "B": {"A": 2, "C": 1, "D": 4}, + "C": {"A": 4, "B": 1, "D": 3}, + "D": {"B": 4, "C": 3}, + } + start_node = "A" + end_node = "D" + expected_path = ["A", "C", "D"] + + result = backup_path(graph, start_node, end_node) + + print(result) + + self.assertEqual(result, expected_path) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_random_connection_generator.py b/tests/test_random_connection_generator.py new file mode 100644 index 00000000..d5e66614 --- /dev/null +++ b/tests/test_random_connection_generator.py @@ -0,0 +1,42 @@ +import unittest + +from sdx_pce.utils.random_connection_generator import RandomConnectionGenerator + + +class RandomConnectionGeneratorTest(unittest.TestCase): + def setUp(self): + self.generator = RandomConnectionGenerator(20) + + def test_generate(self): + querynum = 3 + l_bw = 100 + u_bw = 1000 + l_lat = 1000 + u_lat = 1500 + seed = 2022 + + traffic_matrix = self.generator.generate( + querynum, l_bw, u_bw, l_lat, u_lat, seed + ) + + # Assert that the generated traffic matrix has the correct number of requests + self.assertEqual(len(traffic_matrix.connection_requests), querynum) + + # Assert that each request has the correct bandwidth and latency values + for request in traffic_matrix.connection_requests: + self.assertGreaterEqual(request.required_bandwidth, l_bw) + self.assertLessEqual(request.required_bandwidth, u_bw) + self.assertGreaterEqual(request.required_latency, l_lat) + self.assertLessEqual(request.required_latency, u_lat) + + def test_lognormal(self): + # Add test cases for the lognormal method if needed + pass + + def test_random(self): + # Add test cases for the random method if needed + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_random_topology_generator.py b/tests/test_random_topology_generator.py new file mode 100644 index 00000000..7401ce86 --- /dev/null +++ b/tests/test_random_topology_generator.py @@ -0,0 +1,38 @@ +import unittest + +import networkx as nx + +from sdx_pce.utils.random_topology_generator import RandomTopologyGenerator + + +class RandomTopologyGeneratorTest(unittest.TestCase): + def setUp(self): + self.generator = RandomTopologyGenerator(num_node=5) + + def test_generate_graph(self): + graph = self.generator.generate_graph(plot=False) + self.assertIsInstance(graph, nx.Graph) + self.assertEqual(len(graph.nodes), 5) + + def test_link_property_assign(self): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3]) + graph.add_edges_from([(1, 2), (2, 3)]) + self.generator.set_graph(graph) + self.generator.link_property_assign() + self.assertIsNotNone(self.generator.get_latency_list()) + + def test_nodes_connected(self): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3]) + graph.add_edges_from([(1, 2), (2, 3)]) + self.assertTrue(self.generator.nodes_connected(graph, 1, 2)) + self.assertFalse(self.generator.nodes_connected(graph, 1, 3)) + + def test_get_connectivity(self): + self.generator.generate_graph(plot=False) + self.assertGreaterEqual(self.generator.get_connectivity(), 2) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_te_manager.py b/tests/test_te_manager.py index c7924160..075d9494 100644 --- a/tests/test_te_manager.py +++ b/tests/test_te_manager.py @@ -1,18 +1,12 @@ import json -import pathlib import pprint import unittest import networkx as nx -from sdx.pce.load_balancing.te_solver import TESolver -from sdx.pce.models import ( - ConnectionPath, - ConnectionRequest, - ConnectionSolution, - TrafficMatrix, -) -from sdx.pce.topology.temanager import TEManager +from sdx_pce.load_balancing.te_solver import TESolver +from sdx_pce.models import ConnectionRequest, ConnectionSolution, TrafficMatrix +from sdx_pce.topology.temanager import TEManager from . import TestData @@ -24,36 +18,53 @@ class TEManagerTests(unittest.TestCase): def setUp(self): topology = json.loads(TestData.TOPOLOGY_FILE_AMLIGHT.read_text()) - request = json.loads(TestData.CONNECTION_REQ_AMLIGHT.read_text()) - self.temanager = TEManager(topology, request) + self.temanager = TEManager(topology) def tearDown(self): self.temanager = None + def test_expand_label_range(self): + """ + Test the _expand_label_range() method. + """ + # Test case 1: Single label range + label_range = [[100, 105], 110] + expanded_range = self.temanager._expand_label_range(label_range) + expected_range = [100, 101, 102, 103, 104, 105, 110] + self.assertEqual(expanded_range, expected_range) + + # Test case 2: Multiple label ranges + label_ranges = [[200, 205], 309, "310-312"] + expanded_ranges = self.temanager._expand_label_range(label_ranges) + expected_ranges = [ + 200, + 201, + 202, + 203, + 204, + 205, + 309, + 310, + 311, + 312, + ] + self.assertEqual(expanded_ranges, expected_ranges) + + # Test case 3: Empty label range + label_range = [] + expanded_range = self.temanager._expand_label_range(label_range) + expected_range = [] + self.assertEqual(expanded_range, expected_range) + def test_generate_solver_input(self): print("Test Convert Connection To Topology") - connection = self._make_connection() + request = json.loads(TestData.CONNECTION_REQ_AMLIGHT.read_text()) + connection = self._make_traffic_matrix_from_request(request) self.assertIsNotNone(connection) def test_connection_breakdown_none_input(self): - # Expect an error to be raised. - self.assertRaises( - AssertionError, self.temanager.generate_connection_breakdown, None - ) - - def test_connection_breakdown_simple(self): - # Test that the old way, which had plain old dicts and arrays - # representing connection requests, still works. - request = [ - { - "1": [[0, 1], [1, 2]], - }, - 1.0, - ] - - breakdown = self.temanager.generate_connection_breakdown(request) - print(f"Breakdown: {breakdown}") - self.assertIsNotNone(breakdown) + # Expect no breakdown when input is None. + self.assertIsNone(self.temanager.generate_connection_breakdown(None, None)) def test_connection_breakdown_tm(self): # Breaking down a traffic matrix. @@ -70,7 +81,7 @@ def test_connection_breakdown_tm(self): self.assertIsNotNone(solution.connection_map) self.assertNotEqual(solution.cost, 0) - breakdown = self.temanager.generate_connection_breakdown(solution) + breakdown = self.temanager.generate_connection_breakdown(solution, request) print(f"Breakdown: {breakdown}") self.assertIsNotNone(breakdown) self.assertIsInstance(breakdown, dict) @@ -79,7 +90,7 @@ def test_connection_breakdown_tm(self): # Make sure that breakdown contains domains as keys, and dicts # as values. The domain name is a little goofy, because the # topology we have is goofy. - link = breakdown.get("urn:ogf:network:sdx:topology:amlight.net") + link = breakdown.get("urn:sdx:topology:amlight.net") self.assertIsInstance(link, dict) def test_connection_breakdown_two_similar_requests(self): @@ -96,14 +107,14 @@ def test_connection_breakdown_two_similar_requests(self): self.assertIsNotNone(solution.connection_map) self.assertNotEqual(solution.cost, 0) - breakdown = self.temanager.generate_connection_breakdown(solution) + breakdown = self.temanager.generate_connection_breakdown(solution, request) print(f"Breakdown: {breakdown}") self.assertIsNotNone(breakdown) self.assertIsInstance(breakdown, dict) self.assertEqual(len(breakdown), 1) - link = breakdown.get("urn:ogf:network:sdx:topology:amlight.net") + link = breakdown.get("urn:sdx:topology:amlight.net") self.assertIsInstance(link, dict) def test_connection_breakdown_three_domains(self): @@ -116,6 +127,12 @@ def test_connection_breakdown_three_domains(self): zaoxi_topology = json.loads(TestData.TOPOLOGY_FILE_ZAOXI.read_text()) self.temanager.add_topology(zaoxi_topology) + topology_map = self.temanager.get_topology_map() + self.assertIsInstance(topology_map, dict) + + for num, val in enumerate(topology_map.values()): + print(f"TE topology #{num}: {val}") + request = [ { "1": [[1, 2], [3, 4]], @@ -129,16 +146,16 @@ def test_connection_breakdown_three_domains(self): self.assertIsNotNone(solution.connection_map) self.assertNotEqual(solution.cost, 0) - breakdown = self.temanager.generate_connection_breakdown(solution) + breakdown = self.temanager.generate_connection_breakdown(solution, request) print(f"Breakdown: {breakdown}") self.assertIsNotNone(breakdown) self.assertIsInstance(breakdown, dict) self.assertEqual(len(breakdown), 3) - amlight = breakdown.get("urn:ogf:network:sdx:topology:amlight.net") - zaoxi = breakdown.get("urn:ogf:network:sdx:topology:zaoxi.net") - sax = breakdown.get("urn:ogf:network:sdx:topology:sax.net") + amlight = breakdown.get("urn:sdx:topology:amlight.net") + zaoxi = breakdown.get("urn:sdx:topology:zaoxi.net") + sax = breakdown.get("urn:sdx:topology:sax.net") for segment in [zaoxi, sax, amlight]: self.assertIsInstance(segment, dict) @@ -176,19 +193,19 @@ def test_connection_breakdown_three_domains_sax_connection(self): solution = self._make_tm_and_solve(request) - print(f"topology: {self.temanager.topology_manager.topology}") - print(f"topology_list: {self.temanager.topology_manager.topology_list}") + print(f"topology: {self.temanager.topology_manager.get_topology()}") + # print(f"topology_list: {self.temanager.topology_manager._topology_map}") self.assertIsNotNone(solution.connection_map) self.assertNotEqual(solution.cost, 0) - breakdown = self.temanager.generate_connection_breakdown(solution) + breakdown = self.temanager.generate_connection_breakdown(solution, request) print(f"Breakdown: {breakdown}") - sax = breakdown.get("urn:ogf:network:sdx:topology:sax.net") + sax = breakdown.get("urn:sdx:topology:sax.net") print(f"Breakdown, SAX: {sax}") - zaoxi = breakdown.get("urn:ogf:network:sdx:topology:zaoxi.net") + zaoxi = breakdown.get("urn:sdx:topology:zaoxi.net") print(f"Breakdown, ZAOXI: {zaoxi}") def test_connection_breakdown_some_input(self): @@ -209,7 +226,7 @@ def test_connection_breakdown_some_input(self): self.assertEqual(solution.cost, 0) # If there's no solution, there should be no breakdown either. - breakdown = self.temanager.generate_connection_breakdown(solution) + breakdown = self.temanager.generate_connection_breakdown(solution, request) self.assertIsNone(breakdown) def test_generate_graph_and_connection_with_sax_2_invalid(self): @@ -220,9 +237,8 @@ def test_generate_graph_and_connection_with_sax_2_invalid(self): TODO: Use a better name for this method. """ topology = json.loads(TestData.TOPOLOGY_FILE_SAX_2.read_text()) - request = json.loads(TestData.CONNECTION_REQ_FILE_SAX_2_INVALID.read_text()) + temanager = TEManager(topology) - temanager = TEManager(topology, request) self.assertIsNotNone(temanager) graph = temanager.generate_graph_te() @@ -232,8 +248,9 @@ def test_generate_graph_and_connection_with_sax_2_invalid(self): # Expect None because the connection_data contains # unresolvable port IDs, which are not present in the given # topology. - connection = temanager.generate_connection_te() - self.assertIsNone(connection) + request = json.loads(TestData.CONNECTION_REQ_FILE_SAX_2_INVALID.read_text()) + tm = temanager.generate_traffic_matrix(request) + self.assertIsNone(tm) def test_generate_graph_and_connection_with_sax_2_valid(self): """ @@ -243,18 +260,17 @@ def test_generate_graph_and_connection_with_sax_2_valid(self): TODO: Use a better name for this method. """ topology = json.loads(TestData.TOPOLOGY_FILE_SAX_2.read_text()) - request = json.loads(TestData.CONNECTION_REQ_FILE_SAX_2_VALID.read_text()) + temanager = TEManager(topology) - temanager = TEManager(topology, request) self.assertIsNotNone(temanager) graph = temanager.generate_graph_te() - print(f"graph: {graph}") self.assertIsNotNone(graph) self.assertIsInstance(graph, nx.Graph) - tm = temanager.generate_connection_te() + request = json.loads(TestData.CONNECTION_REQ_FILE_SAX_2_VALID.read_text()) + tm = temanager.generate_traffic_matrix(request) print(f"traffic matrix: {tm}") self.assertIsInstance(tm, TrafficMatrix) @@ -264,28 +280,181 @@ def test_generate_graph_and_connection_with_sax_2_valid(self): self.assertEqual(request.source, 1) self.assertEqual(request.destination, 0) self.assertEqual(request.required_bandwidth, 0) - self.assertEqual(request.required_latency, 0) + self.assertEqual(request.required_latency, float("inf")) solver = TESolver(graph, tm) self.assertIsNotNone(solver) - # Solver will fail to find a solution here. + # Solver will find a solution here. solution = solver.solve() print(f"Solution to tm {tm}: {solution}") - self.assertIsNone(solution.connection_map, None) - self.assertEqual(solution.cost, 0.0) + self.assertIsNotNone(solution.connection_map, None) + self.assertEqual(solution.cost, 1.0) + + def test_connection_amlight(self): + """ + Test with just one topology/domain. + """ + temanager = TEManager(topology_data=None) + + topology = json.loads(TestData.TOPOLOGY_FILE_AMLIGHT.read_text()) + temanager.add_topology(topology) + graph = temanager.generate_graph_te() + + self.assertIsInstance(graph, nx.Graph) + + request = json.loads(TestData.CONNECTION_REQ_AMLIGHT.read_text()) + print(f"connection request: {request}") + + traffic_matrix = temanager.generate_traffic_matrix(request) + self.assertIsInstance(traffic_matrix, TrafficMatrix) + + solution = TESolver(graph, traffic_matrix).solve() + print(f"TESolver result: {solution}") + self.assertIsInstance(solution, ConnectionSolution) + + def test_connection_amlight_user_port(self): + """ + Test with just one topology/domain. + """ + temanager = TEManager(topology_data=None) + + topology = json.loads(TestData.TOPOLOGY_FILE_AMLIGHT_USER_PORT.read_text()) + temanager.add_topology(topology) + graph = temanager.generate_graph_te() + + self.assertIsInstance(graph, nx.Graph) + + connection_request = json.loads( + TestData.CONNECTION_REQ_AMLIGHT_USER_PORT.read_text() + ) + print(f"connection request: {connection_request}") + + traffic_matrix = temanager.generate_traffic_matrix(connection_request) + self.assertIsInstance(traffic_matrix, TrafficMatrix) + + solution = TESolver(graph, traffic_matrix).solve() + print(f"TESolver result: {solution}") + self.assertIsInstance(solution, ConnectionSolution) + + links = temanager.get_links_on_path(solution) + print(f"Links on path: {links}") + + # Make a flat list of links in connection solution dict, and + # check that we have the same number of links. + values = sum([v for v in solution.connection_map.values()], []) + self.assertEqual(len(links), len(values)) + + breakdown = temanager.generate_connection_breakdown( + solution, connection_request + ) + print(f"breakdown: {json.dumps(breakdown)}") def test_connection_amlight_to_zaoxi(self): """ Exercise a connection request between Amlight and Zaoxi. """ + temanager = TEManager(topology_data=None) + + for path in ( + TestData.TOPOLOGY_FILE_AMLIGHT, + TestData.TOPOLOGY_FILE_SAX, + TestData.TOPOLOGY_FILE_ZAOXI, + ): + topology = json.loads(path.read_text()) + temanager.add_topology(topology) + + graph = temanager.generate_graph_te() + connection_request = json.loads(TestData.CONNECTION_REQ.read_text()) print(f"connection_request: {connection_request}") + traffic_matrix = temanager.generate_traffic_matrix(connection_request) + + print(f"Generated graph: '{graph}', traffic matrix: '{traffic_matrix}'") + + self.assertIsNotNone(graph) + self.assertIsNotNone(traffic_matrix) + + conn = temanager.requests_connectivity(traffic_matrix) + print(f"Graph connectivity: {conn}") + + solution = TESolver(graph, traffic_matrix).solve() + print(f"TESolver result: {solution}") + + self.assertIsNotNone(solution.connection_map) + + links = temanager.get_links_on_path(solution) + print(f"Links on path: {links}") + + # Make a flat list of links in connection solution dict, and + # check that we have the same number of links. + values = sum([v for v in solution.connection_map.values()], []) + self.assertEqual(len(links), len(values)) + + breakdown = temanager.generate_connection_breakdown( + solution, connection_request + ) + print(f"breakdown: {json.dumps(breakdown)}") + + connection_request = temanager.add_breakdowns_to_connection( + connection_request, breakdown + ) + + temanager._logger.info( + f"connection_request with breakdowns: {connection_request}" + ) + + # Note that the "domain" key is correct in the breakdown + # result when we initialize TEManager with None for topology, + # and later add individual topologies with add_topology(). + zaoxi = breakdown.get("urn:sdx:topology:zaoxi.net") + sax = breakdown.get("urn:sdx:topology:sax.net") + amlight = breakdown.get("urn:sdx:topology:amlight.net") + + # Per https://github.com/atlanticwave-sdx/pce/issues/101, each + # breakdown should be of the below form: + # + # { + # "name": "TENET_vlan_201_203_Ampath_Tenet", + # "dynamic_backup_path": true, + # "uni_a": { + # "tag": { + # "value": 203, + # "tag_type": 1 + # }, + # "interface_id": "cc:00:00:00:00:00:00:07:41" + # }, + # "uni_z": { + # "tag": { + # "value": 201, + # "tag_type": 1 + # }, + # "interface_id": "cc:00:00:00:00:00:00:08:50" + # } + # } + for segment in [zaoxi, sax, amlight]: + self.assertIsInstance(segment, dict) + self.assertIsInstance(segment.get("name"), str) + self.assertIsInstance(segment.get("dynamic_backup_path"), bool) + self.assertIsInstance(segment.get("uni_a"), dict) + self.assertIsInstance(segment.get("uni_a").get("tag"), dict) + self.assertIsInstance(segment.get("uni_a").get("tag").get("value"), int) + self.assertIsInstance(segment.get("uni_a").get("tag").get("tag_type"), int) + self.assertIsInstance(segment.get("uni_a").get("port_id"), str) + self.assertIsInstance(segment.get("uni_z"), dict) + self.assertIsInstance(segment.get("uni_z").get("tag"), dict) + self.assertIsInstance(segment.get("uni_z").get("tag").get("value"), int) + self.assertIsInstance(segment.get("uni_z").get("tag").get("tag_type"), int) + self.assertIsInstance(segment.get("uni_z").get("port_id"), str) - temanager = TEManager(topology_data=None, connection_data=connection_request) + def test_connection_amlight_to_zaoxi_user_port(self): + """ + Exercise a connection request between Amlight and Zaoxi. + """ + temanager = TEManager(topology_data=None) for path in ( - TestData.TOPOLOGY_FILE_AMLIGHT, + TestData.TOPOLOGY_FILE_AMLIGHT_USER_PORT, TestData.TOPOLOGY_FILE_SAX, TestData.TOPOLOGY_FILE_ZAOXI, ): @@ -293,7 +462,12 @@ def test_connection_amlight_to_zaoxi(self): temanager.add_topology(topology) graph = temanager.generate_graph_te() - traffic_matrix = temanager.generate_connection_te() + + connection_request = json.loads( + TestData.CONNECTION_REQ_AMLIGHT_ZAOXI_USER_PORT.read_text() + ) + print(f"connection_request: {connection_request}") + traffic_matrix = temanager.generate_traffic_matrix(connection_request) print(f"Generated graph: '{graph}', traffic matrix: '{traffic_matrix}'") @@ -308,15 +482,25 @@ def test_connection_amlight_to_zaoxi(self): self.assertIsNotNone(solution.connection_map) - breakdown = temanager.generate_connection_breakdown(solution) + links = temanager.get_links_on_path(solution) + print(f"Links on path: {links}") + + # Make a flat list of links in connection solution dict, and + # check that we have the same number of links. + values = sum([v for v in solution.connection_map.values()], []) + self.assertEqual(len(links), len(values)) + + breakdown = temanager.generate_connection_breakdown( + solution, connection_request + ) print(f"breakdown: {json.dumps(breakdown)}") # Note that the "domain" key is correct in the breakdown # result when we initialize TEManager with None for topology, # and later add individual topologies with add_topology(). - zaoxi = breakdown.get("urn:ogf:network:sdx:topology:zaoxi.net") - sax = breakdown.get("urn:ogf:network:sdx:topology:sax.net") - amlight = breakdown.get("urn:ogf:network:sdx:topology:amlight.net") + zaoxi = breakdown.get("urn:sdx:topology:zaoxi.net") + sax = breakdown.get("urn:sdx:topology:sax.net") + amlight = breakdown.get("urn:sdx:topology:amlight.net") # Per https://github.com/atlanticwave-sdx/pce/issues/101, each # breakdown should be of the below form: @@ -354,6 +538,252 @@ def test_connection_amlight_to_zaoxi(self): self.assertIsInstance(segment.get("uni_z").get("tag").get("tag_type"), int) self.assertIsInstance(segment.get("uni_z").get("port_id"), str) + def test_connection_amlight_to_zaoxi_two_identical_requests(self): + """ + Exercise two identical connection requests. + """ + temanager = TEManager(topology_data=None) + + for path in ( + TestData.TOPOLOGY_FILE_AMLIGHT, + TestData.TOPOLOGY_FILE_SAX, + TestData.TOPOLOGY_FILE_ZAOXI, + ): + topology = json.loads(path.read_text()) + temanager.add_topology(topology) + + graph = temanager.generate_graph_te() + + connection_request = json.loads(TestData.CONNECTION_REQ.read_text()) + print(f"connection_request: {connection_request}") + traffic_matrix = temanager.generate_traffic_matrix(connection_request) + + print(f"Generated graph: '{graph}', traffic matrix: '{traffic_matrix}'") + + self.assertIsNotNone(graph) + self.assertIsNotNone(traffic_matrix) + + conn = temanager.requests_connectivity(traffic_matrix) + print(f"Graph connectivity: {conn}") + + solution = TESolver(graph, traffic_matrix).solve() + print(f"TESolver result: {solution}") + + self.assertIsNotNone(solution.connection_map) + + breakdown = temanager.generate_connection_breakdown( + solution, connection_request + ) + print(f"breakdown: {json.dumps(breakdown)}") + + zaoxi = breakdown.get("urn:sdx:topology:zaoxi.net") + sax = breakdown.get("urn:sdx:topology:sax.net") + amlight = breakdown.get("urn:sdx:topology:amlight.net") + + # Find solution for another identical connection request, and + # compare solutions. They should be different. + traffic_matrix2 = temanager.generate_traffic_matrix(connection_request) + + solution = TESolver(graph, traffic_matrix2).solve() + print(f"TESolver result: {solution}") + + self.assertIsNotNone(solution.connection_map) + + breakdown2 = temanager.generate_connection_breakdown( + solution, connection_request + ) + print(f"breakdown2: {json.dumps(breakdown2)}") + + self.assertNotEqual(breakdown, breakdown2) + + zaoxi2 = breakdown2.get("urn:sdx:topology:zaoxi.net") + sax2 = breakdown2.get("urn:sdx:topology:sax.net") + amlight2 = breakdown2.get("urn:sdx:topology:amlight.net") + + self.assertNotEqual(zaoxi, zaoxi2) + self.assertNotEqual(sax, sax2) + self.assertNotEqual(amlight, amlight2) + + print(f"zaoxi: {zaoxi}, zaoxi2: {zaoxi2}") + print(f"sax: {sax}, sax2: {sax2}") + print(f"amlight: {amlight}, amlight2: {amlight2}") + + def test_connection_amlight_to_zaoxi_many_identical_requests(self): + """ + Exercise many identical connection requests. + """ + temanager = TEManager(topology_data=None) + + for path in ( + TestData.TOPOLOGY_FILE_AMLIGHT, + TestData.TOPOLOGY_FILE_SAX, + TestData.TOPOLOGY_FILE_ZAOXI, + ): + topology = json.loads(path.read_text()) + temanager.add_topology(topology) + + graph = temanager.generate_graph_te() + + connection_request = json.loads(TestData.CONNECTION_REQ.read_text()) + + breakdowns = set() + num_requests = 10 + + for i in range(0, num_requests): + # Give each connection request a unique ID. + connection_request["id"] = f"{self.id()}-#{i}" + print(f"connection_request: {connection_request}") + + traffic_matrix = temanager.generate_traffic_matrix(connection_request) + + print(f"Generated graph: '{graph}', traffic matrix: '{traffic_matrix}'") + + self.assertIsNotNone(graph) + self.assertIsNotNone(traffic_matrix) + + conn = temanager.requests_connectivity(traffic_matrix) + print(f"Graph connectivity: {conn}") + + solution = TESolver(graph, traffic_matrix).solve() + print(f"TESolver result: {solution}") + + self.assertIsNotNone(solution.connection_map) + + breakdown = json.dumps( + temanager.generate_connection_breakdown(solution, connection_request) + ) + + print(f"breakdown: {breakdown}") + self.assertIsNotNone(breakdown) + + breakdowns.add(breakdown) + + print(f"breakdowns: {breakdowns}") + + # Check that we have the same number of unique breakdowns as + # connection requests. + self.assertEqual(len(breakdowns), num_requests) + + def test_connection_amlight_to_zaoxi_two_distinct_requests(self): + """ + Test with two distinct connection requests. + """ + temanager = TEManager(topology_data=None) + + for path in ( + TestData.TOPOLOGY_FILE_AMLIGHT, + TestData.TOPOLOGY_FILE_SAX, + TestData.TOPOLOGY_FILE_ZAOXI, + ): + topology = json.loads(path.read_text()) + temanager.add_topology(topology) + + graph = temanager.generate_graph_te() + print(f"Generated graph: '{graph}'") + + self.assertIsInstance(graph, nx.Graph) + + # Use a connection request that should span all three domains. + connection_request1 = json.loads(TestData.CONNECTION_REQ.read_text()) + print(f"Connection request #1: {connection_request1}") + traffic_matrix1 = temanager.generate_traffic_matrix(connection_request1) + + print(f"Traffic matrix #1: '{traffic_matrix1}'") + self.assertIsInstance(traffic_matrix1, TrafficMatrix) + + solution1 = TESolver(graph, traffic_matrix1).solve() + print(f"TESolver result #1: {solution1}") + + self.assertIsInstance(solution1, ConnectionSolution) + self.assertIsNotNone(solution1.connection_map) + + breakdown1 = temanager.generate_connection_breakdown( + solution1, connection_request1 + ) + print(f"Breakdown #1: {json.dumps(breakdown1)}") + + # Use another connection request that spans just one domain. + connection_request2 = json.loads(TestData.CONNECTION_REQ_AMLIGHT.read_text()) + print(f"Connection request #2: {connection_request2}") + + traffic_matrix2 = temanager.generate_traffic_matrix(connection_request2) + print(f"Traffic matrix #2: '{traffic_matrix2}'") + self.assertIsInstance(traffic_matrix2, TrafficMatrix) + + solution2 = TESolver(graph, traffic_matrix2).solve() + print(f"TESolver result #2: {solution2}") + + self.assertIsInstance(solution2, ConnectionSolution) + self.assertIsNotNone(solution2.connection_map) + + breakdown2 = temanager.generate_connection_breakdown( + solution2, connection_request2 + ) + print(f"Breakdown #2: {json.dumps(breakdown2)}") + + self.assertNotEqual(connection_request1, connection_request2) + self.assertNotEqual(traffic_matrix1, traffic_matrix2) + self.assertNotEqual(solution1, solution2) + self.assertNotEqual(breakdown1, breakdown2) + + def test_connection_amlight_to_zaoxi_unreserve(self): + """ + Exercise a connection request between Amlight and Zaoxi. + """ + temanager = TEManager(topology_data=None) + + for path in ( + TestData.TOPOLOGY_FILE_AMLIGHT, + TestData.TOPOLOGY_FILE_SAX, + TestData.TOPOLOGY_FILE_ZAOXI, + ): + topology = json.loads(path.read_text()) + temanager.add_topology(topology) + + graph = temanager.generate_graph_te() + + connection_request = json.loads(TestData.CONNECTION_REQ.read_text()) + print(f"connection_request: {connection_request}") + traffic_matrix = temanager.generate_traffic_matrix(connection_request) + + print(f"Generated graph: '{graph}', traffic matrix: '{traffic_matrix}'") + + self.assertIsNotNone(graph) + self.assertIsNotNone(traffic_matrix) + + conn = temanager.requests_connectivity(traffic_matrix) + print(f"Graph connectivity: {conn}") + + solution = TESolver(graph, traffic_matrix).solve() + print(f"TESolver result: {solution}") + + self.assertIsNotNone(solution.connection_map) + + breakdown1 = temanager.generate_connection_breakdown( + solution, connection_request + ) + print(f"breakdown1: {json.dumps(breakdown1)}") + + # Return all used VLANs. + temanager.unreserve_vlan(request_id=connection_request.get("id")) + + # Can we get the same breakdown for the same request now? + breakdown2 = temanager.generate_connection_breakdown( + solution, connection_request + ) + print(f"breakdown2: {json.dumps(breakdown2)}") + + self.assertEqual(breakdown1, breakdown2) + + # If we generate another breakdown without un-reserving any + # VLANs, the result should be distinct from the previous ones. + breakdown3 = temanager.generate_connection_breakdown( + solution, connection_request + ) + print(f"breakdown3: {json.dumps(breakdown3)}") + self.assertNotEqual(breakdown1, breakdown3) + self.assertNotEqual(breakdown2, breakdown3) + def test_connection_amlight_to_zaoxi_with_merged_topology(self): """ Solve with the "merged" topology of amlight, sax, and zaoxi. @@ -362,19 +792,16 @@ def test_connection_amlight_to_zaoxi_with_merged_topology(self): have a merged topology, nodes do not resolve to correct domains. """ - - connection_request = json.loads(TestData.CONNECTION_REQ.read_text()) - print(f"connection_request: {connection_request}") - topology_data = json.loads(TestData.TOPOLOGY_FILE_SDX.read_text()) print(f"topology_data: {topology_data}") - temanager = TEManager( - topology_data=topology_data, connection_data=connection_request - ) + temanager = TEManager(topology_data=topology_data) graph = temanager.generate_graph_te() - traffic_matrix = temanager.generate_connection_te() + + connection_request = json.loads(TestData.CONNECTION_REQ.read_text()) + print(f"connection_request: {connection_request}") + traffic_matrix = temanager.generate_traffic_matrix(connection_request) print(f"Generated graph: '{graph}', traffic matrix: '{traffic_matrix}'") @@ -390,35 +817,43 @@ def test_connection_amlight_to_zaoxi_with_merged_topology(self): # This hopefully should find a solution. self.assertIsNotNone(solution.connection_map) - breakdown = temanager.generate_connection_breakdown(solution) + breakdown = temanager.generate_connection_breakdown( + solution, connection_request + ) print(f"breakdown: {json.dumps(breakdown)}") # Note that the "domain" key is wrong in the results when we # initialize TEManager with a merged topology. - self.assertIsNotNone(breakdown.get("urn:ogf:network:sdx")) + self.assertIsNotNone(breakdown.get("urn:sdx")) def test_generate_graph_and_connection(self): graph = self.temanager.generate_graph_te() - tm = self.temanager.generate_connection_te() print(f"graph: {graph}") - print(f"tm: {tm}") - self.assertIsNotNone(graph) self.assertIsInstance(graph, nx.Graph) + request = json.loads(TestData.CONNECTION_REQ_AMLIGHT.read_text()) + tm = self.temanager.generate_traffic_matrix(request) + + print(f"tm: {tm}") self.assertIsNotNone(tm) self.assertIsInstance(tm, TrafficMatrix) - def _make_connection(self): + def _make_traffic_matrix_from_request( + self, connection_request: dict + ) -> TrafficMatrix: + """ + Make a traffic matrix out of a connection request dict. + """ graph = self.temanager.graph print(f"Generated networkx graph of the topology: {graph}") print(f"Graph nodes: {graph.nodes[0]}, edges: {graph.edges}") - connection = self.temanager.generate_connection_te() - print(f"connection: {connection}") + traffic_matrix = self.temanager.generate_traffic_matrix(connection_request) + print(f"traffic_matrix: {traffic_matrix}") - return connection + return traffic_matrix def _make_tm_and_solve(self, request) -> ConnectionSolution: """ @@ -427,7 +862,7 @@ def _make_tm_and_solve(self, request) -> ConnectionSolution: """ # Make a connection request. - tm = self._make_traffic_matrix(request) + tm = self._make_traffic_matrix_from_list(request) print(f"tm: {tm}") graph = self.temanager.generate_graph_te() @@ -443,7 +878,7 @@ def _make_tm_and_solve(self, request) -> ConnectionSolution: return solution - def _make_traffic_matrix(self, old_style_request: list) -> TrafficMatrix: + def _make_traffic_matrix_from_list(self, old_style_request: list) -> TrafficMatrix: """ Make a traffic matrix from the old-style list. @@ -507,4 +942,95 @@ def _make_traffic_matrix(self, old_style_request: list) -> TrafficMatrix: ) ) - return TrafficMatrix(connection_requests=new_requests) + return TrafficMatrix(connection_requests=new_requests, request_id=self.id()) + + def test_connection_amlight_to_zaoxi_user_port_v2(self): + """ + Exercise a connection request between Amlight and Zaoxi. + """ + temanager = TEManager(topology_data=None) + + for path in ( + TestData.TOPOLOGY_FILE_AMLIGHT_USER_PORT, + TestData.TOPOLOGY_FILE_SAX, + TestData.TOPOLOGY_FILE_ZAOXI, + ): + topology = json.loads(path.read_text()) + temanager.add_topology(topology) + + graph = temanager.generate_graph_te() + + connection_request = json.loads( + TestData.CONNECTION_REQ_AMLIGHT_ZAOXI_USER_PORT_v2.read_text() + ) + print(f"connection_request: {connection_request}") + traffic_matrix = temanager.generate_traffic_matrix(connection_request) + + print(f"Generated graph: '{graph}', traffic matrix: '{traffic_matrix}'") + + self.assertIsNotNone(graph) + self.assertIsNotNone(traffic_matrix) + + conn = temanager.requests_connectivity(traffic_matrix) + print(f"Graph connectivity: {conn}") + + solution = TESolver(graph, traffic_matrix).solve() + print(f"TESolver result: {solution}") + + self.assertIsNotNone(solution.connection_map) + + links = temanager.get_links_on_path(solution) + print(f"Links on path: {links}") + + # Make a flat list of links in connection solution dict, and + # check that we have the same number of links. + values = sum([v for v in solution.connection_map.values()], []) + self.assertEqual(len(links), len(values)) + + breakdown = temanager.generate_connection_breakdown( + solution, connection_request + ) + print(f"breakdown: {json.dumps(breakdown)}") + + # Note that the "domain" key is correct in the breakdown + # result when we initialize TEManager with None for topology, + # and later add individual topologies with add_topology(). + zaoxi = breakdown.get("urn:sdx:topology:zaoxi.net") + sax = breakdown.get("urn:sdx:topology:sax.net") + amlight = breakdown.get("urn:sdx:topology:amlight.net") + + # Per https://github.com/atlanticwave-sdx/pce/issues/101, each + # breakdown should be of the below form: + # + # { + # "name": "TENET_vlan_201_203_Ampath_Tenet", + # "dynamic_backup_path": true, + # "uni_a": { + # "tag": { + # "value": 203, + # "tag_type": 1 + # }, + # "interface_id": "cc:00:00:00:00:00:00:07:41" + # }, + # "uni_z": { + # "tag": { + # "value": 201, + # "tag_type": 1 + # }, + # "interface_id": "cc:00:00:00:00:00:00:08:50" + # } + # } + for segment in [zaoxi, sax, amlight]: + self.assertIsInstance(segment, dict) + self.assertIsInstance(segment.get("name"), str) + self.assertIsInstance(segment.get("dynamic_backup_path"), bool) + self.assertIsInstance(segment.get("uni_a"), dict) + self.assertIsInstance(segment.get("uni_a").get("tag"), dict) + self.assertIsInstance(segment.get("uni_a").get("tag").get("value"), int) + self.assertIsInstance(segment.get("uni_a").get("tag").get("tag_type"), int) + self.assertIsInstance(segment.get("uni_a").get("port_id"), str) + self.assertIsInstance(segment.get("uni_z"), dict) + self.assertIsInstance(segment.get("uni_z").get("tag"), dict) + self.assertIsInstance(segment.get("uni_z").get("tag").get("value"), int) + self.assertIsInstance(segment.get("uni_z").get("tag").get("tag_type"), int) + self.assertIsInstance(segment.get("uni_z").get("port_id"), str) diff --git a/tests/test_te_solver.py b/tests/test_te_solver.py index a5ad077d..eae4691a 100644 --- a/tests/test_te_solver.py +++ b/tests/test_te_solver.py @@ -1,15 +1,14 @@ import json -import pathlib import unittest import networkx as nx -from sdx.pce.load_balancing.te_solver import TESolver -from sdx.pce.models import ConnectionPath, ConnectionSolution, TrafficMatrix -from sdx.pce.utils.constants import Constants -from sdx.pce.utils.graphviz import can_read_dot_file, read_dot_file -from sdx.pce.utils.random_connection_generator import RandomConnectionGenerator -from sdx.pce.utils.random_topology_generator import RandomTopologyGenerator +from sdx_pce.load_balancing.te_solver import TESolver +from sdx_pce.models import ConnectionPath, ConnectionSolution, TrafficMatrix +from sdx_pce.utils.constants import Constants +from sdx_pce.utils.graphviz import can_read_dot_file, read_dot_file +from sdx_pce.utils.random_connection_generator import RandomConnectionGenerator +from sdx_pce.utils.random_topology_generator import RandomTopologyGenerator from . import TestData @@ -112,8 +111,9 @@ def test_mc_solve_5(self): ), ) - with open(traffic_matrix_file) as fp: - tm = TrafficMatrix.from_dict(json.load(fp)) + tm_dict = json.loads(traffic_matrix_file.read_text()) + tm_dict["request_id"] = self.id() + tm = TrafficMatrix.from_dict(tm_dict) solution = TESolver(graph, tm, Constants.COST_FLAG_HOP).solve() print(f"Solution: {solution}") @@ -139,8 +139,9 @@ def test_mc_solve_geant2012(self): self.assertNotEqual(graph, None, "Could not read dot file") connection_file = TestData.TEST_DATA_DIR / "test_connection.json" - with open(connection_file) as fp: - tm = TrafficMatrix.from_dict(json.load(fp)) + tm_dict = json.loads(connection_file.read_text()) + tm_dict["request_id"] = self.id() + tm = TrafficMatrix.from_dict(tm_dict) self.assertNotEqual(tm, None, "Could not read connection file") diff --git a/tests/test_te_solver_static.py b/tests/test_te_solver_static.py index 9db00174..553e4562 100644 --- a/tests/test_te_solver_static.py +++ b/tests/test_te_solver_static.py @@ -2,12 +2,13 @@ Solver tests that use some static topology files. These tests used to be in sdx-controller. """ + import json import unittest -from sdx.pce.load_balancing.te_solver import TESolver -from sdx.pce.models import ConnectionSolution -from sdx.pce.topology.temanager import TEManager +from sdx_pce.load_balancing.te_solver import TESolver +from sdx_pce.models import ConnectionSolution +from sdx_pce.topology.temanager import TEManager from . import TestData @@ -29,83 +30,84 @@ class TESolverTests(unittest.TestCase): ] def setUp(self): - with open(TestData.TOPOLOGY_FILE_SDX, "r", encoding="utf-8") as t: - topology_data = json.load(t) - with open(TestData.CONNECTION_REQ, "r", encoding="utf-8") as c: - connection_data = json.load(c) + topology_data = json.loads(TestData.TOPOLOGY_FILE_SDX.read_text()) + self.temanager = TEManager(topology_data) - self.temanager = TEManager(topology_data, connection_data) + self.connection_request = json.loads(TestData.CONNECTION_REQ.read_text()) def test_computation_breakdown(self): graph = self.temanager.generate_graph_te() - connection_request = self.temanager.generate_connection_te() + tm = self.temanager.generate_traffic_matrix(self.connection_request) print(f"Number of nodes: {graph.number_of_nodes()}") print(f"Graph edges: {graph.edges}") - print(f"Traffic Matrix: {connection_request}") + print(f"Traffic Matrix: {tm}") - solution = TESolver(graph, connection_request).solve() + solution = TESolver(graph, tm).solve() print(f"TESolver result: {solution}") self.assertIsInstance(solution, ConnectionSolution) self.assertEqual(solution.cost, 5.0) - breakdown = self.temanager.generate_connection_breakdown(solution) + breakdown = self.temanager.generate_connection_breakdown( + solution, self.connection_request + ) print(f"Breakdown: {breakdown}") self.assertIsNotNone(breakdown) def test_computation_breakdown_many_topologies(self): for topology_file in self.TOPOLOGY_FILE_LIST: print(f"Adding Topology: {topology_file}") - with open(topology_file, "r", encoding="utf-8") as data_file: - data = json.load(data_file) - self.temanager.topology_manager.add_topology(data) + data = json.loads(topology_file.read_text()) + self.temanager.topology_manager.add_topology(data) graph = self.temanager.generate_graph_te() print(f"Graph: {graph}") - connection_request = self.temanager.generate_connection_te() - print(f"Connection Request: {connection_request}") + tm = self.temanager.generate_traffic_matrix(self.connection_request) + print(f"Traffic matrix: {tm}") - conn = self.temanager.requests_connectivity(connection_request) + conn = self.temanager.requests_connectivity(tm) print(f"Graph connectivity: {conn}") - solution = TESolver(graph, connection_request).solve() + solution = TESolver(graph, tm).solve() print(f"TESolver result: {solution}") self.assertIsNotNone(solution.connection_map) self.assertEqual(solution.cost, 5.0) - breakdown = self.temanager.generate_connection_breakdown(solution) + breakdown = self.temanager.generate_connection_breakdown( + solution, self.connection_request + ) print(f"Breakdown: {breakdown}") self.assertIsNotNone(breakdown) def test_computation_update(self): for topology_file in self.TOPOLOGY_FILE_LIST: print(f"Adding Topology: {topology_file}") - with open(topology_file, "r", encoding="utf-8") as data_file: - data = json.load(data_file) - self.temanager.add_topology(data) + data = json.loads(topology_file.read_text()) + self.temanager.add_topology(data) for topology_file in self.TOPOLOGY_FILE_LIST_UPDATE: print(f"Updating Topology: {topology_file}") - with open(topology_file, "r", encoding="utf-8") as data_file: - data = json.load(data_file) - self.temanager.update_topology(data) + data = json.loads(topology_file.read_text()) + self.temanager.update_topology(data) graph = self.temanager.generate_graph_te() - connection_request = self.temanager.generate_connection_te() + tm = self.temanager.generate_traffic_matrix(self.connection_request) - conn = self.temanager.requests_connectivity(connection_request) + conn = self.temanager.requests_connectivity(tm) print(f"Graph connectivity: {conn}") - solution = TESolver(graph, connection_request).solve() + solution = TESolver(graph, tm).solve() print(f"TESolver result: {solution}") self.assertIsNotNone(solution.connection_map) self.assertEqual(solution.cost, 5.0) - breakdown = self.temanager.generate_connection_breakdown(solution) + breakdown = self.temanager.generate_connection_breakdown( + solution, self.connection_request + ) print(f"Breakdown: {breakdown}") self.assertIsNotNone(breakdown) diff --git a/tests/test_topology_graph.py b/tests/test_topology_graph.py index 02ac03b7..b256421a 100644 --- a/tests/test_topology_graph.py +++ b/tests/test_topology_graph.py @@ -1,10 +1,10 @@ -import pathlib import unittest import matplotlib.pyplot as plt import networkx as nx +from sdx_datamodel.parsing.topologyhandler import TopologyHandler -from sdx.pce.topology.manager import TopologyManager +from sdx_pce.topology.manager import TopologyManager from . import TestData @@ -18,10 +18,9 @@ def __write_graph(self, infile, outfile): """ Write graph images of a given topology file. """ - topology_manager = TopologyManager() - topology_handler = topology_manager.topology_handler + topology = TopologyHandler().import_topology(infile) - topology = topology_handler.import_topology(infile) + topology_manager = TopologyManager() topology_manager.set_topology(topology) graph = topology_manager.generate_graph() diff --git a/tests/test_topology_grenmlconverter.py b/tests/test_topology_grenmlconverter.py index 1e10c395..87e4954f 100644 --- a/tests/test_topology_grenmlconverter.py +++ b/tests/test_topology_grenmlconverter.py @@ -1,8 +1,9 @@ -import pathlib import unittest -from sdx.pce.topology.grenmlconverter import GrenmlConverter -from sdx.pce.topology.manager import TopologyManager +from sdx_datamodel.parsing.topologyhandler import TopologyHandler + +from sdx_pce.topology.grenmlconverter import GrenmlConverter +from sdx_pce.topology.manager import TopologyManager from . import TestData @@ -22,14 +23,12 @@ def tearDown(self): pass def test_grenml_converter_amlight(self): - manager = TopologyManager() + TopologyManager() # TODO: this does not raise errors when it should (such as # when the input file is not present). Make the necessary # change in datamodel's TopologyHandler class. - topology = manager.topology_handler.import_topology( - TestData.TOPOLOGY_FILE_AMLIGHT - ) + topology = TopologyHandler().import_topology(TestData.TOPOLOGY_FILE_AMLIGHT) print(f"Topology: {topology}") self.assertIsNotNone(topology, "No topology could be read") diff --git a/tests/test_topology_manager.py b/tests/test_topology_manager.py index 747bb8e5..e4353aaa 100644 --- a/tests/test_topology_manager.py +++ b/tests/test_topology_manager.py @@ -5,8 +5,8 @@ import matplotlib.pyplot as plt import networkx as nx -from sdx.pce.topology.grenmlconverter import GrenmlConverter -from sdx.pce.topology.manager import TopologyManager +from sdx_pce.topology.grenmlconverter import GrenmlConverter +from sdx_pce.topology.manager import TopologyManager from . import TestData @@ -27,8 +27,8 @@ class TopologyManagerTests(unittest.TestCase): ] TOPOLOGY_FILE_LIST_UPDATE = [TestData.TOPOLOGY_FILE_ZAOXI] - LINK_ID = "urn:ogf:network:sdx:link:amlight:A1-B2" - INTER_LINK_ID = "urn:ogf:network:sdx:link:nni:Miami-Sanpaolo" + LINK_ID = "urn:sdx:link:amlight:A1-B2" + INTER_LINK_ID = "urn:sdx:link:nni:Miami-Sanpaolo" def setUp(self): self.topology_manager = TopologyManager() @@ -41,14 +41,17 @@ def test_merge_topology(self): for topology_file in self.TOPOLOGY_FILE_LIST: print(f"Adding Topology file: {topology_file}") - with open(topology_file, "r", encoding="utf-8") as infile: - self.topology_manager.add_topology(json.load(infile)) + topology_data = json.loads(pathlib.Path(topology_file).read_text()) + self.topology_manager.add_topology(topology_data) - self.assertIsInstance(self.topology_manager.topology.to_dict(), dict) + topology = self.topology_manager.get_topology() + + self.assertIsInstance(topology.to_dict(), dict) print(f"Writing result to {self.TOPOLOGY_OUT}") - with open(self.TOPOLOGY_OUT, "w") as outfile: - json.dump(self.topology_manager.topology.to_dict(), outfile, indent=4) + pathlib.Path(self.TOPOLOGY_OUT).write_text( + json.dumps(topology.to_dict(), indent=4) + ) def test_update_topology(self): print("Test Topology Update!") @@ -57,13 +60,16 @@ def test_update_topology(self): for topology_file in self.TOPOLOGY_FILE_LIST_UPDATE: print(f"Updating topology: {topology_file}") - with open(topology_file, "r", encoding="utf-8") as infile: - self.topology_manager.update_topology(json.load(infile)) + topology_data = json.loads(pathlib.Path(topology_file).read_text()) + self.topology_manager.add_topology(topology_data) + + topology = self.topology_manager.get_topology() - self.assertIsInstance(self.topology_manager.topology.to_dict(), dict) + self.assertIsInstance(topology.to_dict(), dict) - with open(self.TOPOLOGY_OUT, "w") as outfile: - json.dump(self.topology_manager.topology.to_dict(), outfile, indent=4) + pathlib.Path(self.TOPOLOGY_OUT).write_text( + json.dumps(topology.to_dict(), indent=4) + ) graph = self.topology_manager.generate_graph() # pos = nx.spring_layout(graph, seed=225) # Seed for reproducible layout @@ -95,24 +101,26 @@ def test_linkproperty_update(self): self.topology_manager.update_link_property(self.LINK_ID, "latency", 8) self.topology_manager.update_link_property(self.INTER_LINK_ID, "latency", 8) - self.assertIsInstance(self.topology_manager.topology.to_dict(), dict) + topology = self.topology_manager.get_topology() + + self.assertIsInstance(topology.to_dict(), dict) - with open(self.TOPOLOGY_OUT, "w") as outfile: - json.dump(self.topology_manager.topology.to_dict(), outfile, indent=4) + pathlib.Path(self.TOPOLOGY_OUT).write_text( + json.dumps(topology.to_dict(), indent=4) + ) def test_link_property_update_json(self): print("Test Topology JSON Link Property Update!") - with open(self.TOPOLOGY_IN, "r", encoding="utf-8") as infile: - data = json.load(infile) - self.topology_manager.update_element_property_json( - data, "links", self.LINK_ID, "latency", 20 - ) + topology_data = json.loads(pathlib.Path(self.TOPOLOGY_IN).read_text()) + + self.topology_manager.update_element_property_json( + topology_data, "links", self.LINK_ID, "latency", 20 + ) - self.assertIsInstance(data, dict) + self.assertIsInstance(topology_data, dict) - with open(self.TOPOLOGY_OUT, "w") as outfile: - json.dump(data, outfile, indent=4) + pathlib.Path(self.TOPOLOGY_OUT).write_text(json.dumps(topology_data, indent=4)) def test_get_domain_name(self): """ @@ -120,12 +128,12 @@ def test_get_domain_name(self): """ for topology_file in self.TOPOLOGY_FILE_LIST: print(f"Adding Topology file: {topology_file}") - with open(topology_file, "r", encoding="utf-8") as infile: - self.topology_manager.add_topology(json.load(infile)) + topology_data = json.loads(pathlib.Path(topology_file).read_text()) + self.topology_manager.add_topology(topology_data) topology = self.topology_manager.get_topology() - for node in topology.get_nodes(): + for node in topology.nodes: topology_id = self.topology_manager.get_domain_name(node.id) if node.id in ( @@ -133,24 +141,22 @@ def test_get_domain_name(self): "urn:sdx:node:amlight.net:B1", "urn:sdx:node:amlight.net:B2", ): - self.assertEqual( - topology_id, "urn:ogf:network:sdx:topology:amlight.net" - ) + self.assertEqual(topology_id, "urn:sdx:topology:amlight.net") if node.id in ( - "urn:ogf:network:sdx:node:sax:A1", - "urn:ogf:network:sdx:node:sax:B1", - "urn:ogf:network:sdx:node:sax:B2", - "urn:ogf:network:sdx:node:sax:B3", + "urn:sdx:node:sax:A1", + "urn:sdx:node:sax:B1", + "urn:sdx:node:sax:B2", + "urn:sdx:node:sax:B3", ): - self.assertEqual(topology_id, "urn:ogf:network:sdx:topology:sax.net") + self.assertEqual(topology_id, "urn:sdx:topology:sax.net") if node.id in ( - "urn:ogf:network:sdx:node:zaoxi:A1", - "urn:ogf:network:sdx:node:zaoxi:B1", - "urn:ogf:network:sdx:node:zaoxi:B2", + "urn:sdx:node:zaoxi:A1", + "urn:sdx:node:zaoxi:B1", + "urn:sdx:node:zaoxi:B2", ): - self.assertEqual(topology_id, "urn:ogf:network:sdx:topology:zaoxi.net") + self.assertEqual(topology_id, "urn:sdx:topology:zaoxi.net") if __name__ == "__main__": diff --git a/tox.ini b/tox.ini index 95c2f0a2..0f912c41 100644 --- a/tox.ini +++ b/tox.ini @@ -11,17 +11,27 @@ requires = tox>=4 [testenv] -description = run the tests with pytest +description = run tests with pytest package = wheel wheel_build_env = .pkg deps = - pytest>=6 - pytest-cov + [test] commands = pytest {tty:--color=yes} {posargs} [testenv:extras] -description = run tests, with optional dependencies installed. -extras = - pygraphviz +description = run tests, with extra dependencies installed. +extras = + pygraphviz + +[testenv:lint] +description = Run code checkers +skip_install = True +deps = + [lint] + +commands = + ruff check {posargs:.} + black --check {posargs:.} + isort --check {posargs:.}