From 3db5f1cbc304028079e0c1340dbc38fea4fb6c88 Mon Sep 17 00:00:00 2001 From: Olga <86965961+omrozowicz-splunk@users.noreply.github.com> Date: Tue, 4 Apr 2023 11:48:10 +0200 Subject: [PATCH] feat: conditional profiles (#726) * build(pip): (deps-dev): bump pytest from 7.1.2 to 7.2.1 (#684) Bumps [pytest](https://github.com/pytest-dev/pytest) from 7.1.2 to 7.2.1. - [Release notes](https://github.com/pytest-dev/pytest/releases) - [Changelog](https://github.com/pytest-dev/pytest/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest/compare/7.1.2...7.2.1) --- updated-dependencies: - dependency-name: pytest dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore: update poetry version and generate new lock file (#698) * chore: add missing setuptools for mike (#699) * chore: add gnome-keyring (#700) * fix: refactor varBinds mechanism and implement compound indexes (#704) * fix: refactor varBinds mechanism and implement compound indexes * fix: add docstrings and fix unit tests * fix: mapping method for profiles * fix: looking for a parent in a map * fix: run pre-commit * fix: bring back the previous behavior of IF-MIB and SNMPv2-MIB during walk profile operation * chore(release): 1.8.7-beta.1 ## [1.8.7-beta.1](https://github.com/splunk/splunk-connect-for-snmp/compare/v1.8.6...v1.8.7-beta.1) (2023-02-23) ### Bug Fixes * refactor varBinds mechanism and implement compound indexes ([#704](https://github.com/splunk/splunk-connect-for-snmp/issues/704)) ([f4007f4](https://github.com/splunk/splunk-connect-for-snmp/commit/f4007f411eb93d64a06c385d01ebd03f5d9a63db)) * fix: cleanup helm structure different traps and polling (#688) * fix: helm cleanup, add more render manifests, cleanup values.yaml file, document configuration * fix: run make render and update Makefile with render_manifests.sh * fix: delete loggers, run pre-commit * fix: selector labels * fix: selector labels for yaml cm used by scheduler * fix: update documentation * fix: add polling and trap configuration to the offline and lightweight examples --------- Co-authored-by: semantic-release-bot Co-authored-by: Wojciech Zyla * chore(release): 1.8.7-beta.2 ## [1.8.7-beta.2](https://github.com/splunk/splunk-connect-for-snmp/compare/v1.8.7-beta.1...v1.8.7-beta.2) (2023-02-28) ### Bug Fixes * cleanup helm structure different traps and polling ([#688](https://github.com/splunk/splunk-connect-for-snmp/issues/688)) ([86957f9](https://github.com/splunk/splunk-connect-for-snmp/commit/86957f99f730f562afe8f9ffb08411b62a1d1151)) * fix: conditional profiles (#713) * fix: implement conditional profiles feature, add unit tests, refactor adding to assigned_profiles listm improve varBinds validation, change tagging conditional profiles in mongo, add documentation * fix: apply pre-commit * fix: documentation * chore: update CHANGELOG.md * chore(release): 1.8.7-beta.3 ## [1.8.7-beta.3](https://github.com/splunk/splunk-connect-for-snmp/compare/v1.8.7-beta.2...v1.8.7-beta.3) (2023-03-17) ### Bug Fixes * conditional profiles ([#713](https://github.com/splunk/splunk-connect-for-snmp/issues/713)) ([7d8d5fc](https://github.com/splunk/splunk-connect-for-snmp/commit/7d8d5fc652a184646da00553f033f9515b4a6ceb)) * fix: do not poll IF-MIB by default (#714) * fix: do not poll IF-MIB by default, add NOTE about IF-MIB not being polled by default * fix: update CHANGELOG and add unit test * fix: unit test * chore(release): 1.8.7-beta.4 ## [1.8.7-beta.4](https://github.com/splunk/splunk-connect-for-snmp/compare/v1.8.7-beta.3...v1.8.7-beta.4) (2023-03-21) ### Bug Fixes * do not poll IF-MIB by default ([#714](https://github.com/splunk/splunk-connect-for-snmp/issues/714)) ([3d2a1f6](https://github.com/splunk/splunk-connect-for-snmp/commit/3d2a1f66d8f607d7e913e6e2d2a745066c72b153)) * fix: tooBig error (#717) * fix: tooBig error * chore(release): 1.8.7-beta.5 ## [1.8.7-beta.5](https://github.com/splunk/splunk-connect-for-snmp/compare/v1.8.7-beta.4...v1.8.7-beta.5) (2023-03-24) ### Bug Fixes * tooBig error ([#717](https://github.com/splunk/splunk-connect-for-snmp/issues/717)) ([d964746](https://github.com/splunk/splunk-connect-for-snmp/commit/d96474685a21a416be649faf1390097619a56930)) * test: adding syntax from poetry >= 1.2 (#716) * test: adding syntax from poetry >= 1.2 * fix: treating not existing group name as a hostname (#721) * chore(release): 1.8.7-beta.6 ## [1.8.7-beta.6](https://github.com/splunk/splunk-connect-for-snmp/compare/v1.8.7-beta.5...v1.8.7-beta.6) (2023-03-28) ### Bug Fixes * treating not existing group name as a hostname ([#721](https://github.com/splunk/splunk-connect-for-snmp/issues/721)) ([5e46474](https://github.com/splunk/splunk-connect-for-snmp/commit/5e46474607c523e6669563ece2cc0b6bf6416a72)) * fix: prepare release (#722) * doc: improve documentation * chore: improve GH workflow permissions * doc: update docs, delete outdated informations * chore(release): 1.8.7-beta.7 ## [1.8.7-beta.7](https://github.com/splunk/splunk-connect-for-snmp/compare/v1.8.7-beta.6...v1.8.7-beta.7) (2023-04-03) ### Bug Fixes * prepare release ([#722](https://github.com/splunk/splunk-connect-for-snmp/issues/722)) ([b169d5d](https://github.com/splunk/splunk-connect-for-snmp/commit/b169d5d98aa983f436e3f6eaaa19f9c1e20bff9c)) * fix: workflow permissions (#724) * chore(release): 1.8.7-beta.8 ## [1.8.7-beta.8](https://github.com/splunk/splunk-connect-for-snmp/compare/v1.8.7-beta.7...v1.8.7-beta.8) (2023-04-03) ### Bug Fixes * workflow permissions ([#724](https://github.com/splunk/splunk-connect-for-snmp/issues/724)) ([7ffbc35](https://github.com/splunk/splunk-connect-for-snmp/commit/7ffbc3578a932705511acb82feb9c71ba6d6bdc4)) * fix: GitHub actions Helm permissions (#725) * fix: workflow permissions * chore(release): 1.8.7-beta.9 ## [1.8.7-beta.9](https://github.com/splunk/splunk-connect-for-snmp/compare/v1.8.7-beta.8...v1.8.7-beta.9) (2023-04-03) ### Bug Fixes * GitHub actions Helm permissions ([#725](https://github.com/splunk/splunk-connect-for-snmp/issues/725)) ([dec2fef](https://github.com/splunk/splunk-connect-for-snmp/commit/dec2fefc61397c99fb090bc3098ba872e24ac330)) * doc: minor enhancements (#727) * doc: shorten step by step polling * doc: add search option to the documentation and update values.yaml with the new profiles * doc: add logLevel to the main values.yaml, run make render * doc: change values.yaml lint * doc: fix title (#728) * feat: prepare release, modify changelog (#729) * chore(release): 1.9.0-beta.1 # [1.9.0-beta.1](https://github.com/splunk/splunk-connect-for-snmp/compare/v1.8.7-beta.9...v1.9.0-beta.1) (2023-04-04) ### Features * prepare release, modify changelog ([#729](https://github.com/splunk/splunk-connect-for-snmp/issues/729)) ([a373477](https://github.com/splunk/splunk-connect-for-snmp/commit/a373477dac7119bd95a7a0038ea3d94e0ae73396)) --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: semantic-release-bot Co-authored-by: Wojciech Zyla Co-authored-by: Lukasz Loboda <76950960+uoboda-splunk@users.noreply.github.com> --- .github/workflows/agreements.yaml | 3 + .github/workflows/cd-pages.yaml | 6 + .github/workflows/ci-main.yaml | 2 +- .github/workflows/ci-release-pr.yaml | 3 + .github/workflows/ci-release.yaml | 3 + .github/workflows/mike.yaml | 4 +- .github/workflows/offline-installation.yaml | 3 + .github/workflows/release-notes.yaml | 3 + CHANGELOG.md | 12 + Makefile | 2 + README.md | 2 - charts/splunk-connect-for-snmp/Chart.yaml | 4 +- .../templates/NOTES.txt | 11 +- .../templates/_helpers.tpl | 78 ++++ .../templates/common/scheduler-inventory.yaml | 2 + .../templates/common/traps-config.yaml | 2 + .../templates/inventory/_helpers.tpl | 24 +- .../templates/inventory/job.yaml | 4 +- .../templates/scheduler/_helpers.tpl | 25 +- .../templates/scheduler/deployment.yaml | 20 +- .../templates/scheduler/hpa.yaml | 28 -- .../templates/scheduler/networkpolicy.yaml | 2 +- .../templates/scheduler/pdb.yaml | 2 + .../templates/scheduler/serviceaccount.yaml | 12 - .../templates/serviceaccount.yaml | 12 + .../templates/sim/_helpers.tpl | 23 +- .../templates/sim/deployment.yaml | 13 +- .../templates/sim/deprecated_hpa.yaml | 28 ++ .../templates/sim/hpa.yaml | 10 +- .../templates/sim/ingress.yaml | 61 --- .../templates/sim/networkpolicy.yaml | 15 - .../templates/sim/serviceaccount.yaml | 12 - .../templates/tests/test-connection.yaml | 2 +- .../templates/traps/_helpers.tpl | 25 +- .../templates/traps/deployment.yaml | 16 +- .../templates/traps/deprecated_hpa.yaml | 28 ++ .../templates/traps/hpa.yaml | 12 +- .../templates/traps/ingress.yaml | 61 --- .../templates/traps/networkpolicy.yaml | 2 +- .../templates/traps/pdb.yaml | 2 + .../templates/traps/service.yaml | 2 + .../templates/traps/serviceaccount.yaml | 12 - .../templates/worker/_helpers.tpl | 55 +-- .../templates/worker/poller/deployment.yaml | 16 +- .../worker/poller/deprecated_hpa.yaml | 22 + .../templates/worker/poller/hpa.yaml | 2 +- .../templates/worker/sender/deployment.yaml | 12 +- .../worker/sender/deprecated_hpa.yaml | 22 + .../templates/worker/sender/hpa.yaml | 2 +- .../templates/worker/serviceaccount.yaml | 12 - .../templates/worker/trap/deployment.yaml | 16 +- .../templates/worker/trap/deprecated_hpa.yaml | 22 + .../templates/worker/trap/hpa.yaml | 2 +- charts/splunk-connect-for-snmp/values.yaml | 425 ++++++++++-------- docs/configuration/configuring-profiles.md | 64 ++- docs/configuration/poller-configuration.md | 22 +- docs/configuration/step-by-step-poll.md | 75 +++- docs/gettingstarted/sc4snmp-installation.md | 242 ++++------ docs/mib-request.md | 4 +- docs/offlineinstallation/offline-sc4snmp.md | 102 +---- docs/small-environment.md | 133 +----- examples/basic_template.md | 91 ++++ examples/lightweight_installation.yaml | 87 ++++ examples/o11y_values.yaml | 45 ++ examples/offline_installation_values.md | 64 +++ examples/polling_and_traps_v3.yaml | 31 ++ examples/polling_groups_values.yaml | 25 ++ examples/polling_values.yaml | 52 +++ examples/traps_enabled_values.yaml | 13 + integration_tests/automatic_setup.sh | 8 +- mkdocs.yml | 14 +- poetry.lock | 133 +++--- pyproject.toml | 8 +- render_manifests.sh | 10 + .../templates/common/scheduler-config.yaml | 4 +- .../templates/common/scheduler-inventory.yaml | 4 +- .../templates/common/splunk-secret.yaml | 9 + .../templates/common/traps-config.yaml | 4 +- .../templates/inventory/job.yaml | 6 +- .../templates/scheduler/deployment.yaml | 14 +- .../templates/scheduler/pdb.yaml | 4 +- .../templates/scheduler/serviceaccount.yaml | 12 - .../templates/serviceaccount.yaml | 10 + .../templates/sim/pdb.yaml | 4 +- .../templates/tests/test-connection.yaml | 6 +- .../templates/traps/deployment.yaml | 12 +- .../templates/traps/pdb.yaml | 4 +- .../templates/traps/service.yaml | 4 +- .../templates/traps/serviceaccount.yaml | 12 - .../templates/worker/pdb.yaml | 4 +- .../templates/worker/poller/deployment.yaml | 20 +- .../templates/worker/sender/deployment.yaml | 20 +- .../templates/worker/serviceaccount.yaml | 12 - .../templates/worker/trap/deployment.yaml | 20 +- rendered/values_autoscaling_enabled.yaml | 51 +++ ...values_autoscaling_enabled_deprecated.yaml | 52 +++ rendered/values_only_polling.yaml | 32 ++ rendered/values_only_traps.yaml | 14 + splunk_connect_for_snmp/__init__.py | 2 +- .../common/custom_translations.py | 2 + .../common/inventory_processor.py | 3 +- splunk_connect_for_snmp/enrich/tasks.py | 6 +- splunk_connect_for_snmp/inventory/tasks.py | 164 ++++++- splunk_connect_for_snmp/profiles/base.yaml | 4 +- splunk_connect_for_snmp/snmp/manager.py | 169 +++---- .../snmp/varbinds_resolver.py | 281 ++++++++++++ splunk_connect_for_snmp/splunk/tasks.py | 5 +- test/common/base_profiles/base.yaml | 2 +- .../base_profiles/runtime_config_enabled.yaml | 2 +- test/common/test_custom_translations.py | 9 +- test/common/test_inventory_processor.py | 12 +- test/common/test_profiles.py | 6 +- test/inventory/test_assign_profiles.py | 124 ++++- test/inventory/test_conditional_profiles.py | 236 ++++++++++ test/inventory/test_inventory_setup_poller.py | 131 +++++- test/snmp/test_do_work.py | 20 +- test/snmp/test_get_varbinds.py | 183 ++++++-- test/snmp/test_process_snmp_data.py | 41 +- test/snmp/test_utils.py | 42 +- test/snmp/test_varbinds_resolver.py | 187 ++++++++ test/splunk/test_prepare.py | 3 +- 121 files changed, 3003 insertions(+), 1389 deletions(-) delete mode 100644 charts/splunk-connect-for-snmp/templates/scheduler/hpa.yaml delete mode 100644 charts/splunk-connect-for-snmp/templates/scheduler/serviceaccount.yaml create mode 100644 charts/splunk-connect-for-snmp/templates/serviceaccount.yaml create mode 100644 charts/splunk-connect-for-snmp/templates/sim/deprecated_hpa.yaml delete mode 100644 charts/splunk-connect-for-snmp/templates/sim/ingress.yaml delete mode 100644 charts/splunk-connect-for-snmp/templates/sim/networkpolicy.yaml delete mode 100644 charts/splunk-connect-for-snmp/templates/sim/serviceaccount.yaml create mode 100644 charts/splunk-connect-for-snmp/templates/traps/deprecated_hpa.yaml delete mode 100644 charts/splunk-connect-for-snmp/templates/traps/ingress.yaml delete mode 100644 charts/splunk-connect-for-snmp/templates/traps/serviceaccount.yaml create mode 100644 charts/splunk-connect-for-snmp/templates/worker/poller/deprecated_hpa.yaml create mode 100644 charts/splunk-connect-for-snmp/templates/worker/sender/deprecated_hpa.yaml delete mode 100644 charts/splunk-connect-for-snmp/templates/worker/serviceaccount.yaml create mode 100644 charts/splunk-connect-for-snmp/templates/worker/trap/deprecated_hpa.yaml create mode 100644 examples/basic_template.md create mode 100644 examples/lightweight_installation.yaml create mode 100644 examples/o11y_values.yaml create mode 100644 examples/offline_installation_values.md create mode 100644 examples/polling_and_traps_v3.yaml create mode 100644 examples/polling_groups_values.yaml create mode 100644 examples/polling_values.yaml create mode 100644 examples/traps_enabled_values.yaml create mode 100755 render_manifests.sh create mode 100644 rendered/manifests/tests/splunk-connect-for-snmp/templates/common/splunk-secret.yaml delete mode 100644 rendered/manifests/tests/splunk-connect-for-snmp/templates/scheduler/serviceaccount.yaml create mode 100644 rendered/manifests/tests/splunk-connect-for-snmp/templates/serviceaccount.yaml delete mode 100644 rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/serviceaccount.yaml delete mode 100644 rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/serviceaccount.yaml create mode 100644 rendered/values_autoscaling_enabled.yaml create mode 100644 rendered/values_autoscaling_enabled_deprecated.yaml create mode 100644 rendered/values_only_polling.yaml create mode 100644 rendered/values_only_traps.yaml create mode 100644 splunk_connect_for_snmp/snmp/varbinds_resolver.py create mode 100644 test/inventory/test_conditional_profiles.py create mode 100644 test/snmp/test_varbinds_resolver.py diff --git a/.github/workflows/agreements.yaml b/.github/workflows/agreements.yaml index 572b02d86..cd4bf95c8 100644 --- a/.github/workflows/agreements.yaml +++ b/.github/workflows/agreements.yaml @@ -8,6 +8,9 @@ on: jobs: call-workflow-agreements: uses: splunk/addonfactory-github-workflows/.github/workflows/reusable-agreements.yaml@v1.2.1 + permissions: + contents: read + packages: write secrets: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} PERSONAL_ACCESS_TOKEN: ${{ secrets.PAT_CLATOOL }} diff --git a/.github/workflows/cd-pages.yaml b/.github/workflows/cd-pages.yaml index aab4a4cd8..c8fce7da2 100644 --- a/.github/workflows/cd-pages.yaml +++ b/.github/workflows/cd-pages.yaml @@ -11,6 +11,9 @@ jobs: mike: name: Release runs-on: ubuntu-latest + permissions: + contents: read + packages: write steps: - name: Checkout uses: actions/checkout@v2 @@ -70,6 +73,9 @@ jobs: # fi helm: runs-on: ubuntu-latest + permissions: + contents: write + packages: write needs: - mike steps: diff --git a/.github/workflows/ci-main.yaml b/.github/workflows/ci-main.yaml index 2457baa09..e3a0d41ce 100644 --- a/.github/workflows/ci-main.yaml +++ b/.github/workflows/ci-main.yaml @@ -136,7 +136,7 @@ jobs: if: "contains(needs.integration-tests-check.outputs.commit_message, '[run-int-tests]')" steps: - uses: actions/checkout@v2 - - name: Archieve repository + - name: Archive repository working-directory: /home/runner/work/splunk-connect-for-snmp run: tar -czf splunk-connect-for-snmp.tgz splunk-connect-for-snmp - name: Copy archived repository to scripts directory diff --git a/.github/workflows/ci-release-pr.yaml b/.github/workflows/ci-release-pr.yaml index ca050662e..a7c5d9df2 100644 --- a/.github/workflows/ci-release-pr.yaml +++ b/.github/workflows/ci-release-pr.yaml @@ -27,6 +27,9 @@ jobs: release: name: Release runs-on: ubuntu-latest + permissions: + contents: read + packages: write steps: - uses: actions/checkout@v2 with: diff --git a/.github/workflows/ci-release.yaml b/.github/workflows/ci-release.yaml index 7ff6fd584..80fa527d2 100644 --- a/.github/workflows/ci-release.yaml +++ b/.github/workflows/ci-release.yaml @@ -28,6 +28,9 @@ jobs: release: name: Release runs-on: ubuntu-latest + permissions: + contents: read + packages: write steps: - uses: actions/checkout@v2 with: diff --git a/.github/workflows/mike.yaml b/.github/workflows/mike.yaml index 5f500df37..e3da4ed86 100644 --- a/.github/workflows/mike.yaml +++ b/.github/workflows/mike.yaml @@ -37,9 +37,11 @@ jobs: - name: Upload Docs run: | + sudo apt update + sudo apt install gnome-keyring BRANCH=$(echo $GITHUB_REF | cut -d / -f 3) echo $BRANCH - pip3 install poetry=="1.1.15" + pip3 install poetry=="1.2.2" poetry install poetry run pip install 'setuptools==65.6.3' poetry run mike deploy -p $BRANCH diff --git a/.github/workflows/offline-installation.yaml b/.github/workflows/offline-installation.yaml index 564789291..f65caf43d 100644 --- a/.github/workflows/offline-installation.yaml +++ b/.github/workflows/offline-installation.yaml @@ -8,6 +8,9 @@ jobs: build: name: Build runs-on: ubuntu-latest + permissions: + contents: write + packages: write steps: - name: Check out code uses: actions/checkout@v1 diff --git a/.github/workflows/release-notes.yaml b/.github/workflows/release-notes.yaml index dfbdeafc2..bf0926b4c 100644 --- a/.github/workflows/release-notes.yaml +++ b/.github/workflows/release-notes.yaml @@ -7,6 +7,9 @@ on: jobs: call-workflow-preview: + permissions: + contents: write + packages: write uses: splunk/addonfactory-github-workflows/.github/workflows/reusable-release-notes.yaml@v1.2.1 secrets: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 7cde7c22f..be205f27a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,18 @@ ## Unreleased +## [1.9.0] + +### Changed +- add possibility to poll compound indexes (more than one value, ex. `['IP-MIB', 'ipAddressStatus', 'ipv4', '172.31.27.144']`) +- add option to automatically poll SNMP objects based on provided conditions with conditional profiles +- remove IF-MIB from the scope of the default small walk + +### Fixed +- possibility to use hostname instead of the bare ip address in polling +- getting rid off `An error of SNMP isWalk=False for a host 54.91.99.113 occurred: tooBig at ?` with limiting maximum +number of varBinds polled at once `maxOidToProcess` + ## [1.8.6] ### Changed diff --git a/Makefile b/Makefile index 5f9a9430d..6edea1980 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,6 @@ .PHONY: render render: + rm -rf rendered/manifests helm template -n default --values rendered/values.yaml --output-dir rendered/manifests/tests charts/splunk-connect-for-snmp rm -rf rendered/manifests/tests/splunk-connect-for-snmp/charts + ./render_manifests.sh diff --git a/README.md b/README.md index 4bef3fe75..65ace964d 100644 --- a/README.md +++ b/README.md @@ -15,5 +15,3 @@ There are plenty of versions you can browse: # Contact Feel free to contact us via [#splunk-connect-for-snmp](https://splunk-usergroups.slack.com/archives/C01K4V86WV7) slack channel. - -# diff --git a/charts/splunk-connect-for-snmp/Chart.yaml b/charts/splunk-connect-for-snmp/Chart.yaml index 7daaaaeda..01a033173 100644 --- a/charts/splunk-connect-for-snmp/Chart.yaml +++ b/charts/splunk-connect-for-snmp/Chart.yaml @@ -14,12 +14,12 @@ type: application # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.8.6 +version: 1.9.0-beta.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.8.6" +appVersion: "1.9.0-beta.1" # dependencies: - name: mongodb diff --git a/charts/splunk-connect-for-snmp/templates/NOTES.txt b/charts/splunk-connect-for-snmp/templates/NOTES.txt index 1c51b4d46..61e1a99d2 100644 --- a/charts/splunk-connect-for-snmp/templates/NOTES.txt +++ b/charts/splunk-connect-for-snmp/templates/NOTES.txt @@ -1,9 +1,2 @@ -Version 1.7 of SC4SNMP add new feature which enables horizontal worker pods autoscaling, in order to use it you will need to turn on microk8s metrics-server addon: - -microk8s enable metrics-server - -and you should also update worker configuration in values.yaml file according to the documentation: -https://splunk.github.io/splunk-connect-for-snmp/main/configuration/worker-configuration - -values.yaml template is available here: -https://splunk.github.io/splunk-connect-for-snmp/main/gettingstarted/sc4snmp-installation \ No newline at end of file +Walk profiles no longer include IF-MIB family by default. +If you've used this functionality before, please update the walk profile with ['IF-MIB'] varBind. \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/_helpers.tpl b/charts/splunk-connect-for-snmp/templates/_helpers.tpl index 894b748ac..435a03bec 100644 --- a/charts/splunk-connect-for-snmp/templates/_helpers.tpl +++ b/charts/splunk-connect-for-snmp/templates/_helpers.tpl @@ -14,6 +14,84 @@ {{- printf "redis://%s-redis-headless:6379/1" .Release.Name }} {{- end }} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "splunk-connect-for-snmp.fullname" -}} +{{- if .Values.worker.fullnameOverride }} +{{- .Values.worker.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default (printf "%s-%s" .Chart.Name "user") .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "splunk-connect-for-snmp.selectorLabels" -}} +app.kubernetes.io/name: {{ include "splunk-connect-for-snmp.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "splunk-connect-for-snmp.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "splunk-connect-for-snmp.labels" -}} +helm.sh/chart: {{ include "splunk-connect-for-snmp.chart" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "splunk-connect-for-snmp.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "splunk-connect-for-snmp.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + {{- define "splunk-connect-for-snmp.name" -}} {{- default (printf "%s" .Chart.Name ) .Values.nameOverride | trunc 63 | trimSuffix "-" }} {{- end }} + + +{{/* +Whether enable traps +*/}} +{{- define "splunk-connect-for-snmp.traps.enable" -}} +{{- if or (and (eq .Values.traps.service.type "LoadBalancer") .Values.traps.loadBalancerIP ) (and (eq .Values.traps.service.type "NodePort") .Values.traps.service.nodePort) }} +{{- printf "true" }} +{{- else }} +{{- printf "false" }} +{{- end -}} +{{- end }} + +{{/* +Whether enable polling +*/}} +{{- define "splunk-connect-for-snmp.polling.enable" -}} +{{- if .Values.poller.inventory }} +{{- printf "true" }} +{{- else }} +{{- printf "false" }} +{{- end -}} +{{- end }} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/common/scheduler-inventory.yaml b/charts/splunk-connect-for-snmp/templates/common/scheduler-inventory.yaml index 926e08bf7..8da25375b 100644 --- a/charts/splunk-connect-for-snmp/templates/common/scheduler-inventory.yaml +++ b/charts/splunk-connect-for-snmp/templates/common/scheduler-inventory.yaml @@ -1,3 +1,4 @@ +{{- if eq (include "splunk-connect-for-snmp.polling.enable" .) "true" }} apiVersion: v1 kind: ConfigMap metadata: @@ -11,3 +12,4 @@ data: {{ else }} address,port,version,community,secret,securityEngine,walk_interval,profiles,SmartProfiles,delete {{- end }} +{{- end -}} diff --git a/charts/splunk-connect-for-snmp/templates/common/traps-config.yaml b/charts/splunk-connect-for-snmp/templates/common/traps-config.yaml index b0df4169c..91e80a8c7 100644 --- a/charts/splunk-connect-for-snmp/templates/common/traps-config.yaml +++ b/charts/splunk-connect-for-snmp/templates/common/traps-config.yaml @@ -1,3 +1,4 @@ +{{- if eq (include "splunk-connect-for-snmp.traps.enable" .) "true" }} apiVersion: v1 kind: ConfigMap metadata: @@ -14,3 +15,4 @@ data: usernameSecrets: {{- toYaml .Values.traps.usernameSecrets | nindent 8 }} {{ end }} +{{- end -}} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/inventory/_helpers.tpl b/charts/splunk-connect-for-snmp/templates/inventory/_helpers.tpl index da80a5d20..c8140787d 100644 --- a/charts/splunk-connect-for-snmp/templates/inventory/_helpers.tpl +++ b/charts/splunk-connect-for-snmp/templates/inventory/_helpers.tpl @@ -30,18 +30,6 @@ Create chart name and version as used by the chart label. {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} -{{/* -Common labels -*/}} -{{- define "splunk-connect-for-snmp.inventory.labels" -}} -helm.sh/chart: {{ include "splunk-connect-for-snmp.inventory.chart" . }} -{{ include "splunk-connect-for-snmp.inventory.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - {{/* Selector labels */}} @@ -51,12 +39,10 @@ app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} {{/* -Create the name of the service account to use +Common labels */}} -{{- define "splunk-connect-for-snmp.inventory.serviceAccountName" -}} -{{- if .Values.inventory.serviceAccount.create }} -{{- default (include "splunk-connect-for-snmp.inventory.fullname" .) .Values.inventory.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.inventory.serviceAccount.name }} -{{- end }} +{{- define "splunk-connect-for-snmp.inventory.labels" -}} +{{ include "splunk-connect-for-snmp.inventory.selectorLabels" . }} +{{ include "splunk-connect-for-snmp.labels" . }} {{- end }} + diff --git a/charts/splunk-connect-for-snmp/templates/inventory/job.yaml b/charts/splunk-connect-for-snmp/templates/inventory/job.yaml index 1ecc81534..61935d6df 100644 --- a/charts/splunk-connect-for-snmp/templates/inventory/job.yaml +++ b/charts/splunk-connect-for-snmp/templates/inventory/job.yaml @@ -1,3 +1,4 @@ +{{- if eq (include "splunk-connect-for-snmp.polling.enable" .) "true" }} apiVersion: batch/v1 kind: Job metadata: @@ -81,4 +82,5 @@ spec: emptyDir: {} - name: tmp emptyDir: {} - restartPolicy: OnFailure \ No newline at end of file + restartPolicy: OnFailure +{{- end -}} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/scheduler/_helpers.tpl b/charts/splunk-connect-for-snmp/templates/scheduler/_helpers.tpl index e4e8f6f42..604e3fbe7 100644 --- a/charts/splunk-connect-for-snmp/templates/scheduler/_helpers.tpl +++ b/charts/splunk-connect-for-snmp/templates/scheduler/_helpers.tpl @@ -30,18 +30,6 @@ Create chart name and version as used by the chart label. {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} -{{/* -Common labels -*/}} -{{- define "splunk-connect-for-snmp.scheduler.labels" -}} -helm.sh/chart: {{ include "splunk-connect-for-snmp.scheduler.chart" . }} -{{ include "splunk-connect-for-snmp.scheduler.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - {{/* Selector labels */}} @@ -51,12 +39,9 @@ app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} {{/* -Create the name of the service account to use +Common labels */}} -{{- define "splunk-connect-for-snmp.scheduler.serviceAccountName" -}} -{{- if .Values.scheduler.serviceAccount.create }} -{{- default (include "splunk-connect-for-snmp.scheduler.fullname" .) .Values.scheduler.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.scheduler.serviceAccount.name }} -{{- end }} -{{- end }} +{{- define "splunk-connect-for-snmp.scheduler.labels" -}} +{{ include "splunk-connect-for-snmp.scheduler.selectorLabels" . }} +{{ include "splunk-connect-for-snmp.labels" . }} +{{- end }} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/scheduler/deployment.yaml b/charts/splunk-connect-for-snmp/templates/scheduler/deployment.yaml index 2cc438407..6c3af3683 100644 --- a/charts/splunk-connect-for-snmp/templates/scheduler/deployment.yaml +++ b/charts/splunk-connect-for-snmp/templates/scheduler/deployment.yaml @@ -1,3 +1,4 @@ +{{- if eq (include "splunk-connect-for-snmp.polling.enable" .) "true" }} apiVersion: apps/v1 kind: Deployment metadata: @@ -5,9 +6,7 @@ metadata: labels: {{- include "splunk-connect-for-snmp.scheduler.labels" . | nindent 4 }} spec: - {{- if not .Values.scheduler.autoscaling.enabled }} - replicas: {{ .Values.scheduler.replicaCount }} - {{- end }} + replicas: 1 selector: matchLabels: {{- include "splunk-connect-for-snmp.scheduler.selectorLabels" . | nindent 6 }} @@ -24,13 +23,19 @@ spec: imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} - serviceAccountName: {{ include "splunk-connect-for-snmp.scheduler.serviceAccountName" . }} + serviceAccountName: {{ include "splunk-connect-for-snmp.serviceAccountName" . }} securityContext: - {{- toYaml .Values.scheduler.podSecurityContext | nindent 8 }} + fsGroup: 10001 containers: - name: {{ .Chart.Name }}-scheduler securityContext: - {{- toYaml .Values.scheduler.securityContext | nindent 12 }} + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + runAsGroup: 10001 image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} args: @@ -108,4 +113,5 @@ spec: - name: pysnmp-cache-volume emptyDir: {} - name: tmp - emptyDir: {} \ No newline at end of file + emptyDir: {} +{{- end -}} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/scheduler/hpa.yaml b/charts/splunk-connect-for-snmp/templates/scheduler/hpa.yaml deleted file mode 100644 index ba32139ae..000000000 --- a/charts/splunk-connect-for-snmp/templates/scheduler/hpa.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.scheduler.autoscaling.enabled }} -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "splunk-connect-for-snmp.scheduler.fullname" . }} - labels: - {{- include "splunk-connect-for-snmp.scheduler.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "splunk-connect-for-snmp.scheduler.fullname" . }} - minReplicas: {{ .Values.scheduler.autoscaling.minReplicas }} - maxReplicas: {{ .Values.scheduler.autoscaling.maxReplicas }} - metrics: - {{- if .Values.scheduler.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - targetAverageUtilization: {{ .Values.scheduler.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.scheduler.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - targetAverageUtilization: {{ .Values.scheduler.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/scheduler/networkpolicy.yaml b/charts/splunk-connect-for-snmp/templates/scheduler/networkpolicy.yaml index 521634278..9b8723f6f 100644 --- a/charts/splunk-connect-for-snmp/templates/scheduler/networkpolicy.yaml +++ b/charts/splunk-connect-for-snmp/templates/scheduler/networkpolicy.yaml @@ -1,4 +1,4 @@ -{{- if .Values.scheduler.networkPolicy -}} +{{- if and .Values.scheduler.networkPolicy ( eq (include "splunk-connect-for-snmp.polling.enable" .) "true" ) -}} apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: diff --git a/charts/splunk-connect-for-snmp/templates/scheduler/pdb.yaml b/charts/splunk-connect-for-snmp/templates/scheduler/pdb.yaml index 621198bfe..0dcf1e793 100644 --- a/charts/splunk-connect-for-snmp/templates/scheduler/pdb.yaml +++ b/charts/splunk-connect-for-snmp/templates/scheduler/pdb.yaml @@ -1,3 +1,4 @@ +{{- if eq (include "splunk-connect-for-snmp.polling.enable" .) "true" }} apiVersion: policy/v1 kind: PodDisruptionBudget metadata: @@ -9,3 +10,4 @@ spec: selector: matchLabels: {{- include "splunk-connect-for-snmp.scheduler.selectorLabels" . | nindent 8 }} +{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/scheduler/serviceaccount.yaml b/charts/splunk-connect-for-snmp/templates/scheduler/serviceaccount.yaml deleted file mode 100644 index 296d77e69..000000000 --- a/charts/splunk-connect-for-snmp/templates/scheduler/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.scheduler.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "splunk-connect-for-snmp.scheduler.serviceAccountName" . }} - labels: - {{- include "splunk-connect-for-snmp.scheduler.labels" . | nindent 4 }} - {{- with .Values.scheduler.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/serviceaccount.yaml b/charts/splunk-connect-for-snmp/templates/serviceaccount.yaml new file mode 100644 index 000000000..4fac9e10e --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "splunk-connect-for-snmp.serviceAccountName" . }} + labels: + {{- include "splunk-connect-for-snmp.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/sim/_helpers.tpl b/charts/splunk-connect-for-snmp/templates/sim/_helpers.tpl index 4674d398c..115be8db2 100644 --- a/charts/splunk-connect-for-snmp/templates/sim/_helpers.tpl +++ b/charts/splunk-connect-for-snmp/templates/sim/_helpers.tpl @@ -30,18 +30,6 @@ Create chart name and version as used by the chart label. {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} -{{/* -Common labels -*/}} -{{- define "splunk-connect-for-snmp.sim.labels" -}} -helm.sh/chart: {{ include "splunk-connect-for-snmp.sim.chart" . }} -{{ include "splunk-connect-for-snmp.sim.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - {{/* Selector labels */}} @@ -51,14 +39,11 @@ app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} {{/* -Create the name of the service account to use +Common labels */}} -{{- define "splunk-connect-for-snmp.sim.serviceAccountName" -}} -{{- if .Values.sim.serviceAccount.create }} -{{- default (include "splunk-connect-for-snmp.sim.fullname" .) .Values.sim.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.sim.serviceAccount.name }} -{{- end }} +{{- define "splunk-connect-for-snmp.sim.labels" -}} +{{ include "splunk-connect-for-snmp.sim.selectorLabels" . }} +{{ include "splunk-connect-for-snmp.labels" . }} {{- end }} {{/* diff --git a/charts/splunk-connect-for-snmp/templates/sim/deployment.yaml b/charts/splunk-connect-for-snmp/templates/sim/deployment.yaml index d975e0c5a..2123edf85 100644 --- a/charts/splunk-connect-for-snmp/templates/sim/deployment.yaml +++ b/charts/splunk-connect-for-snmp/templates/sim/deployment.yaml @@ -26,13 +26,20 @@ spec: imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} - serviceAccountName: {{ include "splunk-connect-for-snmp.sim.serviceAccountName" . }} + serviceAccountName: {{ include "splunk-connect-for-snmp.serviceAccountName" . }} securityContext: - {{- toYaml .Values.sim.podSecurityContext | nindent 8 }} + fsGroup: 10001 containers: - name: {{ .Chart.Name }}-sim securityContext: - {{- toYaml .Values.sim.securityContext | nindent 12 }} + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + runAsGroup: 10001 image: {{ .Values.sim.image | default "quay.io/signalfx/splunk-otel-collector" }}:{{ .Values.sim.tag | default "0.41.0" }} imagePullPolicy: {{ .Values.sim.pullPolicy | default "IfNotPresent" }} args: ["--config=/config/otel-collector-config.yaml"] diff --git a/charts/splunk-connect-for-snmp/templates/sim/deprecated_hpa.yaml b/charts/splunk-connect-for-snmp/templates/sim/deprecated_hpa.yaml new file mode 100644 index 000000000..816224a33 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/sim/deprecated_hpa.yaml @@ -0,0 +1,28 @@ +{{- if and ( eq ( toString .Values.sim.autoscaling.enabled ) "true" ) ( eq ( toString .Values.useDeprecatedAPI ) "true" ) }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "splunk-connect-for-snmp.sim.fullname" . }} + labels: + {{- include "splunk-connect-for-snmp.sim.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "splunk-connect-for-snmp.sim.fullname" . }} + minReplicas: {{ .Values.sim.autoscaling.minReplicas }} + maxReplicas: {{ .Values.sim.autoscaling.maxReplicas }} + metrics: + {{- if .Values.sim.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.sim.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.sim.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.sim.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/sim/hpa.yaml b/charts/splunk-connect-for-snmp/templates/sim/hpa.yaml index c83f31297..84192a20c 100644 --- a/charts/splunk-connect-for-snmp/templates/sim/hpa.yaml +++ b/charts/splunk-connect-for-snmp/templates/sim/hpa.yaml @@ -1,5 +1,5 @@ -{{- if .Values.sim.autoscaling.enabled }} -apiVersion: autoscaling/v2beta1 +{{- if and (eq ( toString .Values.sim.autoscaling.enabled ) "true") ( eq ( toString .Values.useDeprecatedAPI ) "false" ) }} +apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: name: {{ include "splunk-connect-for-snmp.sim.fullname" . }} @@ -17,12 +17,14 @@ spec: - type: Resource resource: name: cpu - targetAverageUtilization: {{ .Values.sim.autoscaling.targetCPUUtilizationPercentage }} + target: + averageValue: {{ .Values.sim.autoscaling.targetCPUUtilizationPercentage }} {{- end }} {{- if .Values.sim.autoscaling.targetMemoryUtilizationPercentage }} - type: Resource resource: name: memory - targetAverageUtilization: {{ .Values.sim.autoscaling.targetMemoryUtilizationPercentage }} + target: + averageValue: {{ .Values.sim.autoscaling.targetMemoryUtilizationPercentage }} {{- end }} {{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/sim/ingress.yaml b/charts/splunk-connect-for-snmp/templates/sim/ingress.yaml deleted file mode 100644 index 4c0e7e0c9..000000000 --- a/charts/splunk-connect-for-snmp/templates/sim/ingress.yaml +++ /dev/null @@ -1,61 +0,0 @@ -{{- if .Values.sim.ingress.enabled -}} -{{- $fullName := include "splunk-connect-for-snmp.sim.fullname" . -}} -{{- $svcPort := .Values.sim.service.port -}} -{{- if and .Values.sim.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} - {{- if not (hasKey .Values.sim.ingress.annotations "kubernetes.io/ingress.class") }} - {{- $_ := set .Values.sim.ingress.annotations "kubernetes.io/ingress.class" .Values.sim.ingress.className}} - {{- end }} -{{- end }} -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1 -{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1beta1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "splunk-connect-for-snmp.sim.labels" . | nindent 4 }} - {{- with .Values.sim.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and .Values.sim.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} - ingressClassName: {{ .Values.sim.ingress.className }} - {{- end }} - {{- if .Values.sim.ingress.tls }} - tls: - {{- range .Values.sim.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} - {{- end }} - rules: - {{- range .Values.sim.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ .path }} - {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} - pathType: {{ .pathType }} - {{- end }} - backend: - {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} - service: - name: {{ $fullName }} - port: - number: {{ $svcPort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/sim/networkpolicy.yaml b/charts/splunk-connect-for-snmp/templates/sim/networkpolicy.yaml deleted file mode 100644 index 7c77a6124..000000000 --- a/charts/splunk-connect-for-snmp/templates/sim/networkpolicy.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- if .Values.sim.networkPolicy -}} -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: {{ include "splunk-connect-for-snmp.sim.fullname" . }} - labels: - {{- include "splunk-connect-for-snmp.sim.labels" . | nindent 4 }} -spec: - podSelector: - matchLabels: - {{- include "splunk-connect-for-snmp.sim.selectorLabels" . | nindent 8 }} - policyTypes: - - Ingress - - Egress -{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/sim/serviceaccount.yaml b/charts/splunk-connect-for-snmp/templates/sim/serviceaccount.yaml deleted file mode 100644 index fa3654893..000000000 --- a/charts/splunk-connect-for-snmp/templates/sim/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.sim.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "splunk-connect-for-snmp.sim.serviceAccountName" . }} - labels: - {{- include "splunk-connect-for-snmp.sim.labels" . | nindent 4 }} - {{- with .Values.sim.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/tests/test-connection.yaml b/charts/splunk-connect-for-snmp/templates/tests/test-connection.yaml index 1f0f540e8..75660e398 100644 --- a/charts/splunk-connect-for-snmp/templates/tests/test-connection.yaml +++ b/charts/splunk-connect-for-snmp/templates/tests/test-connection.yaml @@ -3,7 +3,7 @@ kind: Pod metadata: name: "{{ include "splunk-connect-for-snmp.traps.fullname" . }}-test-connection" labels: - {{- include "splunk-connect-for-snmp.traps.labels" . | nindent 4 }} + {{- include "splunk-connect-for-snmp.labels" . | nindent 4 }} annotations: "helm.sh/hook": test "kube-score/ignore": "pod-probes,pod-networkpolicy" diff --git a/charts/splunk-connect-for-snmp/templates/traps/_helpers.tpl b/charts/splunk-connect-for-snmp/templates/traps/_helpers.tpl index b83a4d0a3..45d79009b 100644 --- a/charts/splunk-connect-for-snmp/templates/traps/_helpers.tpl +++ b/charts/splunk-connect-for-snmp/templates/traps/_helpers.tpl @@ -30,18 +30,6 @@ Create chart name and version as used by the chart label. {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} -{{/* -Common labels -*/}} -{{- define "splunk-connect-for-snmp.traps.labels" -}} -helm.sh/chart: {{ include "splunk-connect-for-snmp.traps.chart" . }} -{{ include "splunk-connect-for-snmp.traps.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - {{/* Selector labels */}} @@ -51,12 +39,9 @@ app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} {{/* -Create the name of the service account to use +Common labels */}} -{{- define "splunk-connect-for-snmp.traps.serviceAccountName" -}} -{{- if .Values.traps.serviceAccount.create }} -{{- default (include "splunk-connect-for-snmp.traps.fullname" .) .Values.traps.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.traps.serviceAccount.name }} -{{- end }} -{{- end }} +{{- define "splunk-connect-for-snmp.traps.labels" -}} +{{ include "splunk-connect-for-snmp.traps.selectorLabels" . }} +{{ include "splunk-connect-for-snmp.labels" . }} +{{- end }} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/traps/deployment.yaml b/charts/splunk-connect-for-snmp/templates/traps/deployment.yaml index f10547dab..823d6faf9 100644 --- a/charts/splunk-connect-for-snmp/templates/traps/deployment.yaml +++ b/charts/splunk-connect-for-snmp/templates/traps/deployment.yaml @@ -1,3 +1,4 @@ +{{- if eq (include "splunk-connect-for-snmp.traps.enable" .) "true" }} apiVersion: apps/v1 kind: Deployment metadata: @@ -24,13 +25,19 @@ spec: imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} - serviceAccountName: {{ include "splunk-connect-for-snmp.traps.serviceAccountName" . }} + serviceAccountName: {{ include "splunk-connect-for-snmp.serviceAccountName" . }} securityContext: - {{- toYaml .Values.traps.podSecurityContext | nindent 8 }} + fsGroup: 10001 containers: - name: {{ .Chart.Name }}-traps securityContext: - {{- toYaml .Values.traps.securityContext | nindent 12 }} + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + runAsGroup: 10001 image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} args: @@ -159,4 +166,5 @@ spec: - name: pysnmp-cache-volume emptyDir: {} - name: tmp - emptyDir: {} \ No newline at end of file + emptyDir: {} +{{- end -}} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/traps/deprecated_hpa.yaml b/charts/splunk-connect-for-snmp/templates/traps/deprecated_hpa.yaml new file mode 100644 index 000000000..668155826 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/traps/deprecated_hpa.yaml @@ -0,0 +1,28 @@ +{{- if and .Values.traps.autoscaling.enabled (eq (include "splunk-connect-for-snmp.traps.enable" .) "true" ) ( eq ( toString .Values.useDeprecatedAPI ) "true" ) }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "splunk-connect-for-snmp.traps.fullname" . }} + labels: + {{- include "splunk-connect-for-snmp.traps.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "splunk-connect-for-snmp.traps.fullname" . }} + minReplicas: {{ .Values.traps.autoscaling.minReplicas }} + maxReplicas: {{ .Values.traps.autoscaling.maxReplicas }} + metrics: + {{- if .Values.traps.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.traps.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.traps.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.traps.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/traps/hpa.yaml b/charts/splunk-connect-for-snmp/templates/traps/hpa.yaml index ef3fc26df..0be0314c2 100644 --- a/charts/splunk-connect-for-snmp/templates/traps/hpa.yaml +++ b/charts/splunk-connect-for-snmp/templates/traps/hpa.yaml @@ -1,5 +1,5 @@ -{{- if .Values.traps.autoscaling.enabled }} -apiVersion: autoscaling/v2beta1 +{{- if and .Values.traps.autoscaling.enabled (eq (include "splunk-connect-for-snmp.traps.enable" .) "true" ) ( eq ( toString .Values.useDeprecatedAPI ) "false" ) }} +apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: name: {{ include "splunk-connect-for-snmp.traps.fullname" . }} @@ -17,12 +17,16 @@ spec: - type: Resource resource: name: cpu - targetAverageUtilization: {{ .Values.traps.autoscaling.targetCPUUtilizationPercentage }} + target: + type: Utilization + averageUtilization: {{ .Values.traps.autoscaling.targetCPUUtilizationPercentage }} {{- end }} {{- if .Values.traps.autoscaling.targetMemoryUtilizationPercentage }} - type: Resource resource: name: memory - targetAverageUtilization: {{ .Values.traps.autoscaling.targetMemoryUtilizationPercentage }} + target: + type: Value + averageValue: {{ .Values.traps.autoscaling.targetMemoryUtilizationPercentage }} {{- end }} {{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/traps/ingress.yaml b/charts/splunk-connect-for-snmp/templates/traps/ingress.yaml deleted file mode 100644 index 7d28dae6d..000000000 --- a/charts/splunk-connect-for-snmp/templates/traps/ingress.yaml +++ /dev/null @@ -1,61 +0,0 @@ -{{- if .Values.traps.ingress.enabled -}} -{{- $fullName := include "splunk-connect-for-snmp.traps.fullname" . -}} -{{- $svcPort := .Values.traps.service.port -}} -{{- if and .Values.traps.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} - {{- if not (hasKey .Values.traps.ingress.annotations "kubernetes.io/ingress.class") }} - {{- $_ := set .Values.traps.ingress.annotations "kubernetes.io/ingress.class" .Values.traps.ingress.className}} - {{- end }} -{{- end }} -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1 -{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1beta1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "splunk-connect-for-snmp.traps.labels" . | nindent 4 }} - {{- with .Values.traps.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and .Values.traps.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} - ingressClassName: {{ .Values.traps.ingress.className }} - {{- end }} - {{- if .Values.traps.ingress.tls }} - tls: - {{- range .Values.traps.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} - {{- end }} - rules: - {{- range .Values.traps.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ .path }} - {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} - pathType: {{ .pathType }} - {{- end }} - backend: - {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} - service: - name: {{ $fullName }} - port: - number: {{ $svcPort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/traps/networkpolicy.yaml b/charts/splunk-connect-for-snmp/templates/traps/networkpolicy.yaml index c1ae74d40..eae498d03 100644 --- a/charts/splunk-connect-for-snmp/templates/traps/networkpolicy.yaml +++ b/charts/splunk-connect-for-snmp/templates/traps/networkpolicy.yaml @@ -1,4 +1,4 @@ -{{- if .Values.traps.networkPolicy -}} +{{- if and .Values.traps.networkPolicy ( eq (include "splunk-connect-for-snmp.traps.enable" .) "true" ) -}} apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: diff --git a/charts/splunk-connect-for-snmp/templates/traps/pdb.yaml b/charts/splunk-connect-for-snmp/templates/traps/pdb.yaml index 10390b3ce..e9e550238 100644 --- a/charts/splunk-connect-for-snmp/templates/traps/pdb.yaml +++ b/charts/splunk-connect-for-snmp/templates/traps/pdb.yaml @@ -1,3 +1,4 @@ +{{- if eq (include "splunk-connect-for-snmp.traps.enable" .) "true" }} apiVersion: policy/v1 kind: PodDisruptionBudget metadata: @@ -9,3 +10,4 @@ spec: selector: matchLabels: {{- include "splunk-connect-for-snmp.traps.selectorLabels" . | nindent 8 }} +{{- end -}} diff --git a/charts/splunk-connect-for-snmp/templates/traps/service.yaml b/charts/splunk-connect-for-snmp/templates/traps/service.yaml index b4218c6b2..b51fd48d5 100644 --- a/charts/splunk-connect-for-snmp/templates/traps/service.yaml +++ b/charts/splunk-connect-for-snmp/templates/traps/service.yaml @@ -1,3 +1,4 @@ +{{- if eq (include "splunk-connect-for-snmp.traps.enable" .) "true" }} apiVersion: v1 kind: Service metadata: @@ -31,3 +32,4 @@ spec: name: snmp-udp selector: {{- include "splunk-connect-for-snmp.traps.selectorLabels" . | nindent 4 }} +{{- end -}} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/traps/serviceaccount.yaml b/charts/splunk-connect-for-snmp/templates/traps/serviceaccount.yaml deleted file mode 100644 index eb6b9a627..000000000 --- a/charts/splunk-connect-for-snmp/templates/traps/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.traps.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "splunk-connect-for-snmp.traps.serviceAccountName" . }} - labels: - {{- include "splunk-connect-for-snmp.traps.labels" . | nindent 4 }} - {{- with .Values.traps.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl b/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl index e856b5675..fae1c9a9d 100644 --- a/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl +++ b/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl @@ -30,22 +30,10 @@ Create chart name and version as used by the chart label. {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} -{{/* -Common labels -*/}} -{{- define "splunk-connect-for-snmp.worker.labels" -}} -helm.sh/chart: {{ include "splunk-connect-for-snmp.worker.chart" . }} -{{ include "splunk-connect-for-snmp.worker.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} {{/* Selector labels */}} - {{- define "splunk-connect-for-snmp.worker.selectorLabels" -}} app.kubernetes.io/name: {{ include "splunk-connect-for-snmp.worker.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} @@ -66,41 +54,28 @@ app.kubernetes.io/name: {{ include "splunk-connect-for-snmp.worker.name" . }}-tr app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} -{{- define "splunk-connect-for-snmp.worker.trap.labels" -}} -helm.sh/chart: {{ include "splunk-connect-for-snmp.worker.chart" . }} -{{ include "splunk-connect-for-snmp.worker.trap.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} +{{/* +Common labels +*/}} {{- define "splunk-connect-for-snmp.worker.poller.labels" -}} -helm.sh/chart: {{ include "splunk-connect-for-snmp.worker.chart" . }} {{ include "splunk-connect-for-snmp.worker.poller.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{ include "splunk-connect-for-snmp.labels" . }} {{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} + +{{- define "splunk-connect-for-snmp.worker.trap.labels" -}} +{{ include "splunk-connect-for-snmp.worker.trap.selectorLabels" . }} +{{ include "splunk-connect-for-snmp.labels" . }} {{- end }} {{- define "splunk-connect-for-snmp.worker.sender.labels" -}} -helm.sh/chart: {{ include "splunk-connect-for-snmp.worker.chart" . }} {{ include "splunk-connect-for-snmp.worker.sender.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} -{{/* -Create the name of the service account to use -*/}} -{{- define "splunk-connect-for-snmp.worker.serviceAccountName" -}} -{{- if .Values.worker.serviceAccount.create }} -{{- default (include "splunk-connect-for-snmp.worker.fullname" .) .Values.worker.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.worker.serviceAccount.name }} +{{ include "splunk-connect-for-snmp.labels" . }} {{- end }} + +{{- define "splunk-connect-for-snmp.worker.labels" -}} +{{ include "splunk-connect-for-snmp.worker.selectorLabels" . }} +{{ include "splunk-connect-for-snmp.labels" . }} {{- end }} {{- define "environmental-variables" -}} @@ -118,6 +93,8 @@ Create the name of the service account to use value: {{ .Values.worker.walkRetryMaxInterval | default "600" | quote }} - name: METRICS_INDEXING_ENABLED value: {{ (.Values.poller).metricsIndexingEnabled | default "false" | quote }} +- name: POLL_BASE_PROFILES + value: {{ (.Values.poller).pollBaseProfiles | quote }} {{- if .Values.worker.ignoreNotIncreasingOid }} - name: IGNORE_NOT_INCREASING_OIDS value: {{ join "," .Values.worker.ignoreNotIncreasingOid }} @@ -130,6 +107,8 @@ Create the name of the service account to use value: {{ .Values.worker.logLevel | default "INFO" }} - name: UDP_CONNECTION_TIMEOUT value: {{ .Values.worker.udpConnectionTimeout | default "3" | quote }} +- name: MAX_OID_TO_PROCESS + value: {{ .Values.poller.maxOidToProcess | default "70" | quote }} - name: PROFILES_RELOAD_DELAY value: {{ .Values.worker.profilesReloadDelay | default "60" | quote }} - name: MIB_SOURCES diff --git a/charts/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml b/charts/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml index f1bdb083b..caf79db40 100644 --- a/charts/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml +++ b/charts/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml @@ -1,3 +1,4 @@ +{{- if eq (include "splunk-connect-for-snmp.polling.enable" .) "true" }} apiVersion: apps/v1 kind: Deployment metadata: @@ -24,13 +25,19 @@ spec: imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} - serviceAccountName: {{ include "splunk-connect-for-snmp.worker.serviceAccountName" . }} + serviceAccountName: {{ include "splunk-connect-for-snmp.serviceAccountName" . }} securityContext: - {{- toYaml .Values.worker.podSecurityContext | nindent 8 }} + fsGroup: 10001 containers: - name: {{ .Chart.Name }}-worker-poller securityContext: - {{- toYaml .Values.worker.securityContext | nindent 12 }} + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + runAsGroup: 10001 image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} args: @@ -102,4 +109,5 @@ spec: - name: pysnmp-cache-volume emptyDir: {} - name: tmp - emptyDir: {} \ No newline at end of file + emptyDir: {} + {{- end -}} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/worker/poller/deprecated_hpa.yaml b/charts/splunk-connect-for-snmp/templates/worker/poller/deprecated_hpa.yaml new file mode 100644 index 000000000..c387a16d6 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/worker/poller/deprecated_hpa.yaml @@ -0,0 +1,22 @@ +{{- if and ( eq (include "splunk-connect-for-snmp.polling.enable" .) "true" ) (eq ( toString .Values.worker.poller.autoscaling.enabled) "true") ( eq ( toString .Values.useDeprecatedAPI ) "true" ) }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "splunk-connect-for-snmp.worker.fullname" . }}-poller + labels: + {{- include "splunk-connect-for-snmp.worker.poller.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "splunk-connect-for-snmp.worker.fullname" . }}-poller + minReplicas: {{ .Values.worker.poller.autoscaling.minReplicas }} + maxReplicas: {{ .Values.worker.poller.autoscaling.maxReplicas }} + metrics: + {{- if .Values.worker.poller.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.worker.poller.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/worker/poller/hpa.yaml b/charts/splunk-connect-for-snmp/templates/worker/poller/hpa.yaml index 0a130bf20..e6e9edb64 100644 --- a/charts/splunk-connect-for-snmp/templates/worker/poller/hpa.yaml +++ b/charts/splunk-connect-for-snmp/templates/worker/poller/hpa.yaml @@ -1,4 +1,4 @@ -{{- if .Values.worker.poller.autoscaling.enabled }} +{{- if and ( eq (include "splunk-connect-for-snmp.polling.enable" .) "true" ) (eq ( toString .Values.worker.poller.autoscaling.enabled) "true") ( eq ( toString .Values.useDeprecatedAPI ) "false" ) }} apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: diff --git a/charts/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/charts/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml index 1a54b7211..79b9bac1c 100644 --- a/charts/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml +++ b/charts/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml @@ -24,13 +24,19 @@ spec: imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} - serviceAccountName: {{ include "splunk-connect-for-snmp.worker.serviceAccountName" . }} + serviceAccountName: {{ include "splunk-connect-for-snmp.serviceAccountName" . }} securityContext: - {{- toYaml .Values.worker.podSecurityContext | nindent 8 }} + fsGroup: 10001 containers: - name: {{ .Chart.Name }}-worker-sender securityContext: - {{- toYaml .Values.worker.securityContext | nindent 12 }} + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + runAsGroup: 10001 image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} args: diff --git a/charts/splunk-connect-for-snmp/templates/worker/sender/deprecated_hpa.yaml b/charts/splunk-connect-for-snmp/templates/worker/sender/deprecated_hpa.yaml new file mode 100644 index 000000000..5887ea6b4 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/worker/sender/deprecated_hpa.yaml @@ -0,0 +1,22 @@ +{{- if and ( eq ( toString .Values.worker.sender.autoscaling.enabled) "true") ( eq ( toString .Values.useDeprecatedAPI ) "true" ) }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "splunk-connect-for-snmp.worker.fullname" . }}-sender + labels: + {{- include "splunk-connect-for-snmp.worker.sender.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "splunk-connect-for-snmp.worker.fullname" . }}-sender + minReplicas: {{ .Values.worker.sender.autoscaling.minReplicas }} + maxReplicas: {{ .Values.worker.sender.autoscaling.maxReplicas }} + metrics: + {{- if .Values.worker.sender.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.worker.sender.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/worker/sender/hpa.yaml b/charts/splunk-connect-for-snmp/templates/worker/sender/hpa.yaml index 91230c417..7339df704 100644 --- a/charts/splunk-connect-for-snmp/templates/worker/sender/hpa.yaml +++ b/charts/splunk-connect-for-snmp/templates/worker/sender/hpa.yaml @@ -1,4 +1,4 @@ -{{- if .Values.worker.sender.autoscaling.enabled }} +{{- if and ( eq ( toString .Values.worker.sender.autoscaling.enabled) "true") ( eq ( toString .Values.useDeprecatedAPI ) "false" ) }} apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: diff --git a/charts/splunk-connect-for-snmp/templates/worker/serviceaccount.yaml b/charts/splunk-connect-for-snmp/templates/worker/serviceaccount.yaml deleted file mode 100644 index 205b8e9ed..000000000 --- a/charts/splunk-connect-for-snmp/templates/worker/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.worker.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "splunk-connect-for-snmp.worker.serviceAccountName" . }} - labels: - {{- include "splunk-connect-for-snmp.worker.labels" . | nindent 4 }} - {{- with .Values.worker.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/charts/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml index fc82e0379..1477addad 100644 --- a/charts/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +++ b/charts/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -1,3 +1,4 @@ +{{- if eq (include "splunk-connect-for-snmp.traps.enable" .) "true" }} apiVersion: apps/v1 kind: Deployment metadata: @@ -24,13 +25,19 @@ spec: imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} - serviceAccountName: {{ include "splunk-connect-for-snmp.worker.serviceAccountName" . }} + serviceAccountName: {{ include "splunk-connect-for-snmp.serviceAccountName" . }} securityContext: - {{- toYaml .Values.worker.podSecurityContext | nindent 8 }} + fsGroup: 10001 containers: - name: {{ .Chart.Name }}-worker-trap securityContext: - {{- toYaml .Values.worker.securityContext | nindent 12 }} + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + runAsGroup: 10001 image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} args: @@ -92,4 +99,5 @@ spec: - name: pysnmp-cache-volume emptyDir: {} - name: tmp - emptyDir: {} \ No newline at end of file + emptyDir: {} +{{- end -}} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/worker/trap/deprecated_hpa.yaml b/charts/splunk-connect-for-snmp/templates/worker/trap/deprecated_hpa.yaml new file mode 100644 index 000000000..93f4cd106 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/worker/trap/deprecated_hpa.yaml @@ -0,0 +1,22 @@ +{{- if and ( eq (include "splunk-connect-for-snmp.traps.enable" .) "true" ) ( eq ( toString .Values.worker.trap.autoscaling.enabled) "true") ( eq ( toString .Values.useDeprecatedAPI ) "true" ) }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "splunk-connect-for-snmp.worker.fullname" . }}-trap + labels: + {{- include "splunk-connect-for-snmp.worker.trap.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "splunk-connect-for-snmp.worker.fullname" . }}-trap + minReplicas: {{ .Values.worker.trap.autoscaling.minReplicas }} + maxReplicas: {{ .Values.worker.trap.autoscaling.maxReplicas }} + metrics: + {{- if .Values.worker.trap.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.worker.trap.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/worker/trap/hpa.yaml b/charts/splunk-connect-for-snmp/templates/worker/trap/hpa.yaml index ae225915a..2ddb3d5e8 100644 --- a/charts/splunk-connect-for-snmp/templates/worker/trap/hpa.yaml +++ b/charts/splunk-connect-for-snmp/templates/worker/trap/hpa.yaml @@ -1,4 +1,4 @@ -{{- if .Values.worker.trap.autoscaling.enabled }} +{{- if and ( eq (include "splunk-connect-for-snmp.traps.enable" .) "true" ) ( eq ( toString .Values.worker.trap.autoscaling.enabled) "true") ( eq (toString .Values.useDeprecatedAPI) "false" ) }} apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: diff --git a/charts/splunk-connect-for-snmp/values.yaml b/charts/splunk-connect-for-snmp/values.yaml index f17f824e8..64ec2d7d8 100644 --- a/charts/splunk-connect-for-snmp/values.yaml +++ b/charts/splunk-connect-for-snmp/values.yaml @@ -2,36 +2,58 @@ # This is a YAML-formatted file. # Declare variables to be passed into your templates. -replicaCount: 1 +################################################################################ +# SC4SNMP image settings +################################################################################ image: + # The registry and name of the SC4SNMP image to pull repository: ghcr.io/splunk/splunk-connect-for-snmp/container + # The policy that specifies when the user wants the SC4SNMP images to be pulled pullPolicy: Always # Overrides the image tag whose default is the chart appVersion. tag: "" +# Secrets to attach to the respective serviceaccount to pull docker images imagePullSecrets: [] +################################################################################ +# Splunk Cloud / Splunk Enterprise configuration. +################################################################################ + splunk: + # Enables sending data to Splunk enabled: true + # the protocol of the HEC endpoint: https or http + protocol: "" + # the port of the HEC endpoint + port: "8088" + # IP address or a domain name of a Splunk instance to send data to. + host: "" + # the protocol, host and port given here makes up to a HEC endpoint + # according to the pattern: {{protocol}}://{{host}}:{{port}}/services/collector + # for ex. https://splunk-endpoint:8088/services/collector + + # Required for Splunk Enterprise/Cloud (if `enabled` is set to true). Splunk + # HTTP Event Collector token. + token: 00000000-0000-0000-0000-000000000000 + # Whether to skip checking the certificate of the HEC endpoint when sending + # data over HTTPS. + insecureSSL: "true" + +################################################################################ +# Splunk Observability configuration +################################################################################ -inventory: - nameOverride: "" - fullnameOverride: "" - - podAnnotations: {} - - podSecurityContext: - fsGroup: 10001 +sim: + # Enables sending data to Splunk Observability/SignalFx. + enabled: false - securityContext: - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 10001 - runAsGroup: 10001 + # Splunk Observability realm to send telemetry data to. + signalfxToken: "" + # Required for Splunk Observability (if `realm` is specified). Splunk + # Observability org access token. + signalfxRealm: "" resources: {} # limits: @@ -41,37 +63,90 @@ inventory: # cpu: 200m # memory: 256Mi - nodeSelector: {} - - tolerations: [] -scheduler: - nameOverride: "" - fullnameOverride: "" + service: + # Here you can define annotations to append under sim service + annotations: {} - serviceAccount: - # Specifies whether a service account should be created + secret: + # Option for creating a new secret or using an existing one. + # When secret.create=true, a new kubernetes secret will be created by the helm chart that will contain the + # values from sim.signalfxToken and sim.signalfxRealm. + # When secret.create=false, the user must set secret.name to a name of a k8s secret the user created. create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template name: "" - podAnnotations: {} - - podSecurityContext: - fsGroup: 10001 + replicaCount: 1 + autoscaling: + enabled: false - securityContext: - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 10001 - runAsGroup: 10001 +################################################################################ +# SC4SNMP components settings +################################################################################ +scheduler: + ### Group definitions ### + # Create the group definition in case you want to configure polling from multiple hosts + # at once, more on this: https://splunk.github.io/splunk-connect-for-snmp/main/configuration/configuring-groups/ + + #groups: | + # example_group_1: + # - address: 10.202.4.202 + # port: 161 + # - address: 63.2.40.0 + # port: 161 + + + ### Profiles definitions ### + # Create a profile definition to set varbinds you want to poll from the device. + # more on this: https://splunk.github.io/splunk-connect-for-snmp/main/configuration/configuring-profiles/ + + #profiles: | + # smart_profile: + # frequency: 100 + # condition: + # type: field + # field: "SNMPv2-MIB.sysDescr" + # patterns: + # - '.*linux.*' + # varBinds: + # - ['SNMPv2-MIB'] + # - ['SNMPv2-MIB', 'sysName'] + # - ['SNMPv2-MIB', 'sysUpTime',0] + # static_profile: + # frequency: 300 + # varBinds: + # - ['IP-MIB'] + # small_walk: + # condition: + # type: "walk" + # varBinds: + # - ['IF-MIB', 'ifDescr'] + # - ['IF-MIB', 'ifAdminStatus'] + # - ['IF-MIB', 'ifOperStatus'] + # - ['IF-MIB', 'ifName'] + # - ['IF-MIB', 'ifAlias'] + # - ['IF-MIB', 'ifIndex'] + # conditional_profile: + # frequency: 30 + # conditions: + # - field: IF-MIB.ifAdminStatus + # operation: "equals" + # value: "up" + # - field: IF-MIB.ifOperStatus + # operation: "equals" + # value: "up" + # varBinds: + # - ['IF-MIB', 'ifDescr'] + # - ['IF-MIB', 'ifAlias'] + # - ['IF-MIB', 'ifInErrors'] + # - ['IF-MIB', 'ifOutDiscards'] + + # mapping MIB fields to custom names + # more: https://splunk.github.io/splunk-connect-for-snmp/main/configuration/configuring-profiles/#custom-translations + customTranslations: {} + + # set CPU and Memory limits for a scheduler pod resources: {} # limits: # cpu: 800m @@ -79,227 +154,180 @@ scheduler: # requests: # cpu: 500m # memory: 256Mi + # logging level, possible options: DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL + logLevel: "INFO" + podAnnotations: {} + podAntiAffinity: soft + nodeSelector: {} - autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - # targetMemoryUtilizationPercentage: 80 +poller: + # Appending OID indexes to metrics. + # https://splunk.github.io/splunk-connect-for-snmp/main/configuration/poller-configuration/#append-oid-index-part-to-the-metrics + metricsIndexingEnabled: false - nodeSelector: {} + # Enable polling base profiles (with IF-MIB and SNMPv2-MIB) from + # https://github:com/splunk/splunk-connect-for-snmp/blob/main/splunk_connect_for_snmp/profiles/base.yaml + pollBaseProfiles: true - tolerations: [] + # Sometimes SNMP Agent cannot accept more than X OIDs per once, so if the error "TooBig" is visible in logs, + # decrease the number + maxOidToProcess: 70 - podAntiAffinity: soft + # list of kubernetes secrets name that will be used for polling + # https://splunk.github.io/splunk-connect-for-snmp/main/configuration/poller-configuration/#define-usernamesecrets + usernameSecrets: [] + + # Here is where polling happens. Learn more on how to configure it here: + # https://splunk.github.io/splunk-connect-for-snmp/main/configuration/poller-configuration/ + + #inventory: | + # address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + # example_group_1,,2c,public,,,3000,static_profile,t, worker: - taskTimeout: 2400 - walkRetryMaxInterval: 600 - ignoreNotIncreasingOid: [] + # workers are responsible for the actual execution of polling, processing trap messages, and sending data to Splunk. + # More: https://splunk.github.io/splunk-connect-for-snmp/main/configuration/worker-configuration/ + + # The poller worker consumes all the tasks related to polling poller: + # number of the poller replicas when autoscaling is set to false replicaCount: 2 + # minimum number of threads in a pod concurrency: 4 + # how many tasks are consumed from the queue at once prefetch: 1 autoscaling: + # enabling autoscaling for poller worker pods enabled: false + # minimum number of running poller worker pods when autoscaling is enabled minReplicas: 2 + # maximum number of running poller worker pods when autoscaling is enabled maxReplicas: 10 + # CPU % threshold that must be exceeded on poller worker pods to spawn another replica targetCPUUtilizationPercentage: 80 + resources: + # the resources limits for poller worker container limits: cpu: 500m + # the resources requests for poller worker container requests: cpu: 250m + + # The trap worker consumes all the trap related tasks produced by the trap pod trap: + # number of the trap replicas when autoscaling is set to false replicaCount: 2 + # minimum number of threads in a pod concurrency: 4 + # how many tasks are consumed from the queue at once prefetch: 30 autoscaling: + # enabling autoscaling for trap worker pods enabled: false + # minimum number of running poller trap pods when autoscaling is enabled minReplicas: 2 + # maximum number of running poller trap pods when autoscaling is enabled maxReplicas: 10 + # CPU % threshold that must be exceeded on traps worker pods to spawn another replica targetCPUUtilizationPercentage: 80 resources: + # the resources limits for poller worker container limits: cpu: 500m requests: + # the resources requests for poller worker container cpu: 250m + + # The sender worker handles sending data to Splunk sender: + # number of the poller replicas when autoscaling is set to false replicaCount: 1 + # minimum number of threads in a pod concurrency: 4 + # how many tasks are consumed from the queue at once prefetch: 30 autoscaling: + # enabling autoscaling for sender worker pods enabled: false + # minimum number of running sender trap pods when autoscaling is enabled minReplicas: 2 + # maximum number of running sender trap pods when autoscaling is enabled maxReplicas: 10 + # CPU % threshold that must be exceeded on sender worker pods to spawn another replica targetCPUUtilizationPercentage: 80 resources: + # the resources limits for poller worker container limits: cpu: 500m + # the resources requests for poller worker container requests: cpu: 250m - nameOverride: "" - fullnameOverride: "" + # task timeout in seconds (usually necessary when walk process takes a long time) + taskTimeout: 2400 + # maximum time interval between walk attempts + walkRetryMaxInterval: 60 + # ignoring `occurred: OID not increasing` issues for hosts specified in the array, ex: + # ignoreNotIncreasingOid: + # - "127.0.0.1:164" + # - "127.0.0.6" + ignoreNotIncreasingOid: [] + # logging level, possible options: DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL + logLevel: "INFO" + podAntiAffinity: soft - serviceAccount: - # Specifies whether a service account should be created +inventory: + secret: + # Option for creating a new secret or using an existing one. + # When secret.create=true, a new kubernetes secret will be created by the helm chart that will contain the + # values from sim.signalfxToken and sim.signalfxRealm. + # When secret.create=false, the user must set secret.name to a name of a k8s secret the user created. create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template name: "" - podAnnotations: {} - - podSecurityContext: - fsGroup: 10001 - - securityContext: - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 10001 - runAsGroup: 10001 - - resources: - limits: - cpu: 500m - memory: 5128Mi - requests: - cpu: 300m - memory: 256Mi - - autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - # targetMemoryUtilizationPercentage: 80 - - nodeSelector: {} - - tolerations: [] - - podAntiAffinity: soft - -poller: - metricsIndexingEnabled: false - usernameSecrets: [] - -sim: - enabled: false - nameOverride: "" - fullnameOverride: "" - - podAnnotations: {} - - autoscaling: - enabled: false - - podSecurityContext: - fsGroup: 10001 - ingress: - enabled: false - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 10001 - runAsGroup: 10001 + service: + annotations: { } + # set CPU and Memory limits for an inventory pod resources: {} - # limits: - # cpu: 500m - # memory: 512Mi - # requests: - # cpu: 200m - # memory: 256Mi + # limits: + # cpu: 800m + # memory: 512Mi + # requests: + # cpu: 500m + # memory: 256Mi nodeSelector: {} - tolerations: [] - serviceAccount: - # Specifies whether a service account should be created - create: false - # Annotations to add to the service account - annotations: { } - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "" - service: - annotations: {} - - secret: - # Option for creating a new secret or using an existing one. - # When secret.create=true, a new kubernetes secret will be created by the helm chart that will contain the - # values from sim.signalfxToken and sim.signalfxRealm. - # When secret.create=false, the user must set secret.name to a name of a k8s secret the user created. - create: true - name: "" traps: + # this is a simple server that can handle SNMP traps sent by SNMP devices like routers or switches. + + # number of the traps receivers replicas when autoscaling is set to false + # it makes sense to increase it in case there are hundreds of traps per seconds replicaCount: 2 + # usernameSecrets section define SNMPv3 secrets for trap messages sent by SNMP device usernameSecrets: [] + # SNMPv3 TRAPs require the configuration SNMP Engine ID of the TRAP sending application for the USM users table + # of the TRAP receiving application for each USM user securityEngineId: - "80003a8c04" + # communities define a version of SNMP protocol and SNMP community string, which should be used communities: {} - nameOverride: "" - fullnameOverride: "" - - serviceAccount: - # Specifies whether a service account should be created - create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "" - - podAnnotations: {} - - podSecurityContext: - fsGroup: 10001 - - securityContext: - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 10001 - runAsGroup: 10001 service: annotations: {} + # this settings set metallb.universe.tf/allow-shared-ip annotation in trap service + # was introduced to allow using splunk-connect-for-syslog on the same machine usemetallb: true metallbsharingkey: "splunk-connect" + # when using SC4SNMP on a standalone k8s installation, LoadBalancer is a good choice + # on a multi-node it's better to set this as NodePort and configure traps.service.nodePort type: LoadBalancer port: 162 - - ingress: - enabled: false - className: "" - annotations: - {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: chart-example.local - paths: - - path: / - pathType: ImplementationSpecific - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local + # nodePort will be set only when type of service is a NodePort + #nodePort: 30000 resources: {} # limits: @@ -316,12 +344,25 @@ traps: targetCPUUtilizationPercentage: 80 # targetMemoryUtilizationPercentage: 80 + # logging level, possible options: DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL + logLevel: "INFO" nodeSelector: {} - tolerations: [] - podAntiAffinity: soft + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +# This parameter allows to use SC4SNMP for older version of Kubernetes that doesn't support autoscaling/v2 +useDeprecatedAPI: false + ############################################################################# ### Please do not modify below values, unless you know what you're doing! ### ############################################################################# diff --git a/docs/configuration/configuring-profiles.md b/docs/configuration/configuring-profiles.md index fa9913300..cda3acdc0 100644 --- a/docs/configuration/configuring-profiles.md +++ b/docs/configuration/configuring-profiles.md @@ -3,7 +3,7 @@ Profiles are the units where you can configure what you want to poll, and then assign them to the device. The definition of profile can be found in the `values.yaml` file under the `scheduler` section. -Here are the instructions on how to use profiles: [Update Inventory and Profile](../deployment-configuration/#update-inventory-and-profile). +Here are the instructions on how to use profiles: [Update Inventory and Profile](../poller-configuration/#update-inventory). There are two types of profiles in general: @@ -159,9 +159,8 @@ poller: 10.202.4.202,,2c,public,,,2000,small_walk,, ``` -NOTE: When small walk is configured, you can set up polling only of OIDs belonging to the walk profile varBinds. -Additionally, there are two MIB families that are enabled by default (we need them to create the state of the device in the database and poll base profiles): `IF-MIB` and `SNMPv2-MIB`. -For example, if you've decided to use `small_walk` from the example above, you'll be able to poll only `UDP-MIB`, `IF-MIB`, and `SNMPv2-MIB` OIDs. +NOTE: When small walk is configured, `SNMPv2-MIB` is enabled by default (we need it to create the state of the device in the database). +For example, if you've decided to use `small_walk` from the example above, you'll be able to poll only `UDP-MIB`, and `SNMPv2-MIB` OIDs. ## SmartProfile configuration @@ -215,6 +214,63 @@ scheduler: NOTE: Be aware that profile changes may not be reflected immediately. It can take up to 1 minute for changes to propagate. In case you changed frequency, or a profile type, the change will be reflected only after the next walk. There is also 5 minute TTL for an inventory pod. Basically, SC4SNMP allows one inventory upgrade and then block updates for the next 5 minutes. +## Conditional profiles +There is a way to not explicitly give what SNMP objects we want to poll - only the conditions that must be fulfilled to +qualify object for polling. + +An example of a conditional profile is: + +```yaml +IF_conditional_profile: + frequency: 30 + conditions: + - field: IF-MIB.ifAdminStatus + operation: "equals" + value: "up" + - field: IF-MIB.ifOperStatus + operation: "equals" + value: "up" + varBinds: + - [ 'IF-MIB', 'ifDescr' ] + - [ 'IF-MIB', 'ifAlias' ] + - [ 'IF-MIB', 'ifInErrors' ] + - [ 'IF-MIB', 'ifOutDiscards' ] +``` + +When the such profile is defined and added to a device in an inventory, it will poll all interfaces where `ifAdminStatus` +and `ifOperStatus` is up. Note that conditional profiles are being evaluated during the walk process (on every `walk_interval`) +and if the status changes in between, the scope of the conditional profile won't be modified. + +These are operations possible to use in conditional profiles: + +1. `equals` - value gathered from `field` is equal to `value` +2. `gt` - value gathered from `field` is bigger than `value` (works only for numeric values) +3. `lt` - value gathered from `field` is smaller than `value` (works only for numeric values) +4. `in` - value gathered from `field` is equal to one of the elements provided in `value`, for ex.: + +```yaml +conditions: + - field: IF-MIB.ifAdminStatus + operation: "in" + value: + - "down" + - 0 +``` + +`field` part of `conditions` must fulfill the pattern `MIB-family.field`. Fields must represent textual value (not metric one), +you can learn more about it [here](snmp-data-format.md). + +You have to explicitly define `varBinds` (not only the MIB family but also the field to poll), so such config: + +```yaml +varBinds: +- [ 'IF-MIB' ] +``` + +is not correct. + + + ## Custom translations If the user wants to use custom names/translations of MIB names, it can be configured under the customTranslations section under scheduler config. Translations are grouped by MIB family. In the example below IF-MIB.ifInDiscards will be translated to IF-MIB.myCustomName1: diff --git a/docs/configuration/poller-configuration.md b/docs/configuration/poller-configuration.md index 8608c241f..e58d7c0ab 100644 --- a/docs/configuration/poller-configuration.md +++ b/docs/configuration/poller-configuration.md @@ -66,8 +66,28 @@ out of this object: } ``` +Not every SNMP metric object is structured the way it has its index as a one of the field value. +We can append the index part of OID with: + +```yaml +poller: + metricsIndexingEnabled: true +``` + + +### Disable automatic polling of base profiles + +There are [two profiles](https://github:com/splunk/splunk-connect-for-snmp/blob/main/splunk_connect_for_snmp/profiles/base.yaml) that are being polled by default - so that even without any configuration you can see +the data in Splunk. You can disable it with `pollBaseProfiles` parameter. + +```yaml +poller: + pollBaseProfiles: false +``` + + ### Configure inventory -To update inventory, see: [Update Inventory and Profile](#update-inventory-and-profile). +To update inventory, see: [Update Inventory and Profile](#update-inventory). `inventory` section in `poller` has the following fields to configure: diff --git a/docs/configuration/step-by-step-poll.md b/docs/configuration/step-by-step-poll.md index e27062d54..5bca728b2 100644 --- a/docs/configuration/step-by-step-poll.md +++ b/docs/configuration/step-by-step-poll.md @@ -1,4 +1,4 @@ -# Example polling scenario +# An example of a polling scenario We have 4 hosts we want to poll from: @@ -7,20 +7,38 @@ We have 4 hosts we want to poll from: 3. `10.202.4.203:161` 4. `10.202.4.204:163` -Let's say that we're interested mostly in information about interfaces and some CPU related data. For this purposes, -we need to configure the `IF-MIB` family for interfaces, and `UCD-SNMP-MIB` for the CPU. - -We'll do two things under the `scheduler` section: define the group from which we want to poll, and the profile of what exactly will be polled: +To retrieve data from the device efficiently, first determine the specific data needed. Instead of walking through +the entire `1.3.6.1`, limit the walk to poll only the necessary data. Configure the `IF-MIB` family for interfaces and +the `UCD-SNMP-MIB` for CPU-related statistics. In the `scheduler` section of `values.yaml`, define the target group and +establish the polling parameters, known as the profile, to gather the desired data precisely: ```yaml scheduler: logLevel: "INFO" profiles: | + small_walk: + condition: + type: "walk" + varBinds: + - ["IF-MIB"] + - ["UCD-SNMP-MIB"] switch_profile: frequency: 60 varBinds: - - ['IF-MIB'] - - ['UCD-SNMP-MIB'] + - ["IF-MIB", "ifDescr"] + - ["IF-MIB", "ifAdminStatus"] + - ["IF-MIB", "ifOperStatus"] + - ["IF-MIB", "ifName"] + - ["IF-MIB", "ifAlias"] + - ["IF-MIB", "ifIndex"] + - ["IF-MIB", "ifInDiscards"] + - ["IF-MIB", "ifInErrors"] + - ["IF-MIB", "ifInOctets"] + - ["IF-MIB", "ifOutDiscards"] + - ["IF-MIB", "ifOutErrors"] + - ["IF-MIB", "ifOutOctets"] + - ["IF-MIB", "ifOutQLen"] + - ["UCD-SNMP-MIB"] groups: | switch_group: - address: 10.202.4.201 @@ -30,7 +48,7 @@ scheduler: port: 163 ``` -Then we need to pass the proper instruction of what to do for SC4SNMP instance. This can be done by appending a new row +Then it is required to pass the proper instruction of what to do for SC4SNMP instance. This can be done by appending a new row to `poller.inventory`: ```yaml @@ -38,9 +56,46 @@ poller: logLevel: "WARN" inventory: | address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete - switch_group,,2c,public,,,2000,switch_profile,, + switch_group,,2c,public,,,2000,small_walk;switch_profile,, ``` +The provided configuration will make: + +1. Walk devices from `switch_group` with `IF-MIB` and `UCD-SNMP-MIB` every 2000 seconds +2. Poll specific `IF-MIB` fields and the whole `UCD-SNMP-MIB` every 60 seconds + +Note: you could as well limit walk profile even more if you want to enhance the performance. + +It makes sense to put in the walk the textual values that don't required to be constantly monitored, and monitor only the metrics +you're interested in: + +``` +small_walk: + condition: + type: "walk" + varBinds: + - ["IF-MIB", "ifDescr"] + - ["IF-MIB", "ifAdminStatus"] + - ["IF-MIB", "ifOperStatus"] + - ["IF-MIB", "ifName"] + - ["IF-MIB", "ifAlias"] + - ["IF-MIB", "ifIndex"] +switch_profile: + frequency: 60 + varBinds: + - ["IF-MIB", "ifInDiscards"] + - ["IF-MIB", "ifInErrors"] + - ["IF-MIB", "ifInOctets"] + - ["IF-MIB", "ifOutDiscards"] + - ["IF-MIB", "ifOutErrors"] + - ["IF-MIB", "ifOutOctets"] + - ["IF-MIB", "ifOutQLen"] +``` + +Then every metric object will be enriched with the textual values gathered from a walk process. Learn more about +SNMP format [here](snmp-data-format.md). + + Now we're ready to reload SC4SNMP. We run the `helm3 upgrade` command: ```yaml @@ -92,7 +147,7 @@ Successfully connected to http://snmp-mibserver/index.csv {"message": "New Record address='10.202.4.204' port=163 version='2c' community='public' secret=None security_engine=None walk_interval=2000 profiles=['switch_profile'] smart_profiles=True delete=False", "time": "2022-09-05T14:30:30.607641", "level": "INFO"} ``` -In some time (depending of how long the walk takes), we'll see events under: +In some time (depending on how long the walk takes), we'll see events under: ```yaml | mpreview index=netmetrics | search profiles=switch_profile diff --git a/docs/gettingstarted/sc4snmp-installation.md b/docs/gettingstarted/sc4snmp-installation.md index e17d98c69..31b8a5623 100644 --- a/docs/gettingstarted/sc4snmp-installation.md +++ b/docs/gettingstarted/sc4snmp-installation.md @@ -5,11 +5,17 @@ for single node non-HA deployments. It does not have resource requests and limit See the mongo, redis, scheduler, worker, and traps configuration sections for guidance on production configuration. +## Installation process + + ### Offline installation For offline installation instructions see [this page](../offlineinstallation/offline-sc4snmp.md). -### Add SC4SNMP repository + +### Online installation + +#### Add SC4SNMP repository ``` microk8s helm3 repo add splunk-connect-for-snmp https://splunk.github.io/splunk-connect-for-snmp microk8s helm3 repo update @@ -19,135 +25,27 @@ Now the package should be visible in `helm3` search command result: microk8s helm3 search repo snmp ``` Example output: -``` +``` NAME CHART VERSION APP VERSION DESCRIPTION splunk-connect-for-snmp/splunk-connect-for-snmp 1.0.0 1.0.0 A Helm chart for SNMP Connect for SNMP ``` -### Download and modify values.yaml -```yaml -splunk: - enabled: true - protocol: https - host: ###SPLUNK_HOST### - token: ###SPLUNK_TOKEN### - insecureSSL: "false" - port: "###SPLUNK_PORT###" -image: - pullPolicy: "Always" -traps: - communities: - 2c: - - public - - homelab - #usernameSecrets: - # - sc4snmp-hlab-sha-aes - # - sc4snmp-hlab-sha-des - - #loadBalancerIP: The IP address in the metallb pool - loadBalancerIP: ###X.X.X.X### -worker: - # There are 3 types of workers - trap: - # replicaCount: number of trap-worker pods which consumes trap tasks - replicaCount: 2 - #autoscaling: use it instead of replicaCount in order to make pods scalable by itself - #autoscaling: - # enabled: true - # minReplicas: 2 - # maxReplicas: 10 - # targetCPUUtilizationPercentage: 80 - poller: - # replicaCount: number of poller-worker pods which consumes polling tasks - replicaCount: 2 - #autoscaling: use it instead of replicaCount in order to make pods scalable by itself - #autoscaling: - # enabled: true - # minReplicas: 2 - # maxReplicas: 10 - # targetCPUUtilizationPercentage: 80 - sender: - # replicaCount: number of sender-worker pods which consumes sending tasks - replicaCount: 1 - # autoscaling: use it instead of replicaCount in order to make pods scalable by itself - #autoscaling: - # enabled: true - # minReplicas: 2 - # maxReplicas: 10 - # targetCPUUtilizationPercentage: 80 - # udpConnectionTimeout: timeout in seconds for SNMP operations - #udpConnectionTimeout: 5 - logLevel: "INFO" -scheduler: - logLevel: "INFO" -# profiles: | -# generic_switch: -# frequency: 300 -# varBinds: -# - ['SNMPv2-MIB', 'sysDescr'] -# - ['SNMPv2-MIB', 'sysName', 0] -# - ['TCP-MIB', 'tcpActiveOpens'] -# - ['TCP-MIB', 'tcpAttemptFails'] -# - ['IF-MIB'] -#poller: - # usernameSecrets: - # - sc4snmp-hlab-sha-aes - # - sc4snmp-hlab-sha-des - # inventory: | - # address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete - # 10.0.0.1,,3,,sc4snmp-hlab-sha-aes,,1800,,, - # 10.0.0.199,,2c,public,,,3000,,,True - # 10.0.0.100,,3,,sc4snmp-hlab-sha-des,,1800,,, -sim: - # sim must be enabled if you want to use signalFx - enabled: false -# signalfxToken: BCwaJ_Ands4Xh7Nrg -# signalfxRealm: us0 -mongodb: - pdb: - create: true - persistence: - storageClass: "microk8s-hostpath" - volumePermissions: - enabled: true -``` - -`values.yaml` is used during the installation process for configuring Kubernetes values. - -### Configure Splunk Enterprise or Splunk Cloud Connection -Splunk Enterprise or Splunk Cloud Connection is enabled by default. To disable Splunk Enterprise or Splunk Cloud `splunk.enabled` property, set it to `false`. -Additionally, the connection parameters for Splunk Enterprise or Splunk Cloud need to be set in the `splunk` section: - -| Placeholder | Description | Example | -|---|---|---| -| ###SPLUNK_HOST### | host address of splunk instance | "i-08c221389a3b9899a.ec2.splunkit.io" | -| ###SPLUNK_PORT### | port number of splunk instance | "8088" | -| ###SPLUNK_TOKEN### | Splunk HTTP Event Collector token | 450a69af-16a9-4f87-9628-c26f04ad3785 | -| ###X.X.X.X### | SHARED IP address used for SNMP Traps | 10.202.18.166 | - -Other optional variables can be configured: - -| variable | description | default | -| --- | --- | --- | -| splunk.protocol | port of splunk instance| https | -| splunk.insecure_ssl| is insecure ssl allowed | "true" | -| splunk.eventIndex | name of the events index | "netops" | -| splunk.metricsIndex | name of the metrics index | "netmetrics" | - - -### Configure Splunk Infrastructure Monitoring Connection -Splunk Infrastructure Monitoring is disabled by default. To enable the Splunk Infrastructure Monitoring -`sim.enabled` property, set it to `true`. -Additionally, connection parameters for Splunk Infrastructure Monitoring need to be set in the `sim` section: - -| variable | description | default | -| --- | --- | --- | -|signalfxToken | SIM token which can be use for ingesting date vi API | not set| -|signalfxRealm | Real of SIM | not set | - -For more details please check [SIM Configuration](../configuration/sim-configuration.md) - -### Install SC4SNMP +#### Download and modify values.yaml + +The installation of SC4SNMP requires the creation of a `values.yaml` file, which serves as the configuration file. To configure this file, follow these steps: + +1. Start with checking out the [basic configuration template][basic_template_link] +2. Review the [examples][examples_link] to determine which areas require configuration. +3. For more advanced configuration options, refer to the complete default [values.yaml](https://github.com/splunk/splunk-connect-for-snmp/blob/main/charts/splunk-connect-for-snmp/values.yaml) +or download it directly from Helm using the command `microk8s helm3 show values splunk-connect-for-snmp/splunk-connect-for-snmp` +4. In order to learn more about each of the config parts, check [configuration](../configuration/deployment-configuration.md) section. + +It is recommended to start by completing the base template and gradually add additional configurations as needed. + +#### Install SC4SNMP + +After the `values.yaml` creation, you can proceed with the SC4SNMP installation: + ``` bash microk8s helm3 install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace ``` @@ -160,12 +58,16 @@ Use the following command to propagate configuration changes: microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace ``` -### Verify deployment +## Verification of the deployment + In a few minutes, all pods should be up and running. It can be verified with: + ``` bash microk8s kubectl get pods -n sc4snmp ``` + Example output: + ``` NAME READY STATUS RESTARTS AGE snmp-splunk-connect-for-snmp-scheduler-7ddbc8d75-bljsj 1/1 Running 0 133m @@ -179,45 +81,64 @@ snmp-splunk-connect-for-snmp-trap-78759bfc8b-79m6d 1/1 Running snmp-splunk-connect-for-snmp-inventory-mjccw 0/1 Completed 0 6s ``` -### Test SNMP Traps -- Test the Trap by logging into Splunk and confirming the presence of events - in snmp `netops` index. +The output may vary depending on the configuration. In the above example, both polling and traps are configured, +and the data is being sent to Splunk. -- Test the trap from a Linux system with SNMP installed. Replace the IP address - `10.0.101.22` with the shared IP address above. +If you have `traps` configured, you should see `EXTERNAL-IP` in `snmp-splunk-connect-for-snmp-trap` service. +Check it using the command: -``` bash -apt update -apt-get install snmpd -snmptrap -v2c -c public 10.0.101.22 123 1.3.6.1.2.1.1.4 1.3.6.1.2.1.1.4 s test +```bash +microk8s kubectl get svc -n sc4snmp ``` -- Search Splunk: You should see one event per trap command with the host value of the - test machine IP address. +Here is an example of the correct setup: -``` bash -index="netops" sourcetype="sc4snmp:traps" +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +snmp-redis-headless ClusterIP None 6379/TCP 33h +snmp-mongodb ClusterIP 10.152.183.147 27017/TCP 33h +snmp-mibserver ClusterIP 10.152.183.253 80/TCP 33h +snmp-redis-master ClusterIP 10.152.183.135 6379/TCP 33h +snmp-mongodb-metrics ClusterIP 10.152.183.217 9216/TCP 33h +snmp-splunk-connect-for-snmp-trap LoadBalancer 10.152.183.33 10.202.9.21 162:30161/UDP 33h ``` -### Test SNMP Poller -- Test the Poller by logging into Splunk and confirming the presence of events - in snmp `netops` and metrics in `netmetrics` index. +If there's `` communicate instead of the IP address, that means you either provided the wrong IP address +in `traps.loadBalancerIP` or there's something wrong with the `metallb` microk8s addon. + +For the sake of the example, let's assume we haven't changed the default indexes names and the metric data goes to `netmetrics` +and the events goes to `netops`. + +#### Test SNMP Traps + +1. Simulate the event. On a Linux system, you can download `snmpd` package for its purpose and run: -- Test the trap from a Linux system install snmpd. - ``` bash apt update apt-get install snmpd +snmptrap -v2c -c public EXTERNAL-IP 123 1.3.6.1.2.1.1.4 1.3.6.1.2.1.1.4 s test ``` -- To test SNMP poller, snmpd needs to be configured to listen on the external IP. To enable listening snmpd to external IP, go to the `/etc/snmp/snmpd.conf` configuration file, and replace the IP address `10.0.101.22` with the server IP address where snmpd is configured. +Remember to replace `EXTERNAL-IP` with the ip address of the `snmp-splunk-connect-for-snmp-trap` service from the above. + +2. Search Splunk: You should see one event per trap command with the host value of the test machine `EXTERNAL-IP` IP address. + +``` bash +index="netops" sourcetype="sc4snmp:traps" +``` + +#### Test SNMP Poller + +1. To test SNMP poller, you can either use the device you already have, or configure snmpd on your Linux system. +Snmpd needs to be configured to listen on the external IP. To enable listening snmpd to external IP, go to the `/etc/snmp/snmpd.conf` configuration file, and replace the IP address `10.0.101.22` with the server IP address where snmpd is configured: `agentaddress 10.0.101.22,127.0.0.1,[::1]`. Restart snmpd through the execute command: + ``` bash service snmpd stop service snmpd start ``` -- Configure SC4SNMP Poller to test and add the IP address which you want to poll. Add the configuration entry into the `values.yaml` file by +2. Configure SC4SNMP Poller to test and add the IP address which you want to poll. Add the configuration entry into the `values.yaml` file by replacing the IP address `10.0.101.22` with the server IP address where the snmpd was configured. ``` bash poller: @@ -226,18 +147,13 @@ poller: 10.0.101.22,,2c,public,,,42000,,, ``` -- Load `values.yaml` file into SC4SNMP +3. Load `values.yaml` file into SC4SNMP ``` bash microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace ``` -- Check-in Splunk - -Before polling starts, SC4SNMP must perform SNMP WALK process on the device. It is the run first time after configuring the new device, and then the run time in every `walk_interval`. -Its purpose is to gather all the data and provide meaningful context for the polling records. For example, it might report that your device is so large that the walk takes too long, so the scope of walking needs to be limited. -In such cases, enable the small walk. See: [walk takes too much time](../../bestpractices/#walking-a-device-takes-too-much-time). -When the walk finishes, events appear in Splunk. Confirm the walk with the following queries: +4. Verify if the records appeared in Splunk: ``` bash index="netops" sourcetype="sc4snmp:event" @@ -246,3 +162,21 @@ index="netops" sourcetype="sc4snmp:event" ``` bash | mpreview index="netmetrics" | search sourcetype="sc4snmp:metric" ``` + +NOTE: Before polling starts, SC4SNMP must perform SNMP WALK process on the device. It is run first time after configuring the new device, and then the run time in every `walk_interval`. +Its purpose is to gather all the data and provide meaningful context for the polling records. For example, it might report that your device is so large that the walk takes too long, so the scope of walking needs to be limited. +In such cases, enable the small walk. See: [walk takes too much time](../../bestpractices/#walking-a-device-takes-too-much-time). +When the walk finishes, events appear in Splunk. + +## Next Steps + +A good way to start with SC4SNMP polling is to follow the [Step by Step guide for polling](../configuration/step-by-step-poll.md). +Advanced configuration of polling is available in [Poller configuration](../configuration/poller-configuration.md) section. +SNMP data format is explained in [SNMP data format](../configuration/snmp-data-format.md) section. + +For advanced trap configuration, check the [Traps configuration](../configuration/trap-configuration.md) section. + + + +[examples_link]: https://github.com/splunk/splunk-connect-for-snmp/tree/main/examples +[basic_template_link]: https://github.com/splunk/splunk-connect-for-snmp/blob/main/examples/basic_template.md \ No newline at end of file diff --git a/docs/mib-request.md b/docs/mib-request.md index 07a1f5b4a..10d4fb09c 100644 --- a/docs/mib-request.md +++ b/docs/mib-request.md @@ -63,7 +63,9 @@ In order to add your MIB files to the MIB server in standalone SC4SNMP installat `/home/user/local_mibs/VENDOR1` and `/home/user/local_mibs/VENDOR2` and put files inside accordingly. Putting wrong vendor names won't make compilation fail, this is more for the logging purposes. Segregating your files will make troubleshooting easier. -3. Add following config to the `values.yaml`: +3. MIB files should be named the same as the contained MIB module. The MIB module name is specified at the beginning of +the MIB file before `::= BEGIN` keyword. +4. Add following config to the `values.yaml`: ```yaml mibserver: diff --git a/docs/offlineinstallation/offline-sc4snmp.md b/docs/offlineinstallation/offline-sc4snmp.md index 22bdea224..f34252689 100644 --- a/docs/offlineinstallation/offline-sc4snmp.md +++ b/docs/offlineinstallation/offline-sc4snmp.md @@ -67,105 +67,7 @@ image: pullPolicy: "Never" ``` -Example `values.yaml` file: -```yaml -splunk: - enabled: true - protocol: https - host: ###SPLUNK_HOST### - token: ###SPLUNK_TOKEN### - insecureSSL: "false" - port: "###SPLUNK_PORT###" -image: - tag: ###TAG### - pullPolicy: "Never" -traps: - communities: - 2c: - - public - - homelab - #usernameSecrets: - # - sc4snmp-hlab-sha-aes - # - sc4snmp-hlab-sha-des - - #loadBalancerIP: The IP address in the metallb pool - loadBalancerIP: ###X.X.X.X### -worker: - # There are 3 types of workers - trap: - # replicaCount: number of trap-worker pods which consumes trap tasks - replicaCount: 2 - #autoscaling: use it instead of replicaCount in order to make pods scalable by itself - #autoscaling: - # enabled: true - # minReplicas: 2 - # maxReplicas: 40 - # targetCPUUtilizationPercentage: 80 - poller: - # replicaCount: number of poller-worker pods which consumes polling tasks - replicaCount: 2 - #autoscaling: use it instead of replicaCount in order to make pods scalable by itself - #autoscaling: - # enabled: true - # minReplicas: 2 - # maxReplicas: 40 - # targetCPUUtilizationPercentage: 80 - sender: - # replicaCount: number of sender-worker pods which consumes sending tasks - replicaCount: 1 - # autoscaling: use it instead of replicaCount in order to make pods scalable by itself - #autoscaling: - # enabled: true - # minReplicas: 2 - # maxReplicas: 40 - # targetCPUUtilizationPercentage: 80 - # udpConnectionTimeout: timeout in seconds for SNMP operations - #udpConnectionTimeout: 5 - logLevel: "INFO" -scheduler: - logLevel: "INFO" -# profiles: | -# generic_switch: -# frequency: 300 -# varBinds: -# - ['SNMPv2-MIB', 'sysDescr'] -# - ['SNMPv2-MIB', 'sysName', 0] -# - ['TCP-MIB', 'tcpActiveOpens'] -# - ['TCP-MIB', 'tcpAttemptFails'] -# - ['IF-MIB'] -poller: - # usernameSecrets: - # - sc4snmp-hlab-sha-aes - # - sc4snmp-hlab-sha-des - # inventory: | - # address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete - # 10.0.0.1,,3,,sc4snmp-hlab-sha-aes,,1800,,, - # 10.0.0.199,,2c,public,,,3000,,,True - # 10.0.0.100,,3,,sc4snmp-hlab-sha-des,,1800,,, -sim: - # sim must be enabled if you want to use signalFx - enabled: false - image: - pullPolicy: "Never" -# signalfxToken: BCwaJ_Ands4Xh7Nrg -# signalfxRealm: us0 -mongodb: - image: - pullPolicy: "Never" - pdb: - create: true - persistence: - storageClass: "microk8s-hostpath" - volumePermissions: - enabled: true -redis: - image: - pullPolicy: "Never" -``` - -Fill `###` variables according to the description from [online installation](../gettingstarted/sc4snmp-installation.md#configure-splunk-enterprise-or-splunk-cloud-connection). - -Additionally, fill `###TAG###` with the same tag you used before to `docker pull` an SC4SNMP image. +Example `values.yaml` file can be found [here][offline_doc_link]. The next step is to unpack the chart package `splunk-connect-for-snmp-chart.tar`. It will result in creating the `splunk-connect-for-snmp` directory: @@ -178,3 +80,5 @@ Finally, run the helm install command in the directory where both the `values.ya ```bash microk8s helm3 install snmp -f values.yaml splunk-connect-for-snmp --namespace=sc4snmp --create-namespace ``` + +[offline_doc_link]: https://github.com/splunk/splunk-connect-for-snmp/blob/main/examples/offline_installation_values.md \ No newline at end of file diff --git a/docs/small-environment.md b/docs/small-environment.md index 7315dec7d..6c886738d 100644 --- a/docs/small-environment.md +++ b/docs/small-environment.md @@ -3,140 +3,13 @@ SC4SNMP can be successfully installed in small environments with 2 CPUs and 4 GB of memory. One important thing to remember is that Splunk OpenTelemetry Collector for Kubernetes cannot be installed in such a small environment along with SC4SNMP. The other difference from normal installation is that the `resources` limits must be set for Kubernetes -pods. See the example of `values.yaml` with the appropriate resources below: +pods. See the example of `values.yaml` with the appropriate resources [here][lightweight_doc_link]. -```yaml -splunk: - enabled: true - protocol: https - host: ###SPLUNK_HOST### - token: ###SPLUNK_TOKEN### - insecureSSL: "false" - port: "###SPLUNK_PORT###" -image: - pullPolicy: "Always" -traps: - replicaCount: 1 - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 40m - memory: 256Mi - communities: - 2c: - - public - - homelab - #usernameSecrets: - # - sc4snmp-hlab-sha-aes - # - sc4snmp-hlab-sha-des - - #loadBalancerIP: The IP address in the metallb pool - loadBalancerIP: ###X.X.X.X### -worker: - # There are 3 types of workers - trap: - # replicaCount: number of trap-worker pods which consumes trap tasks - replicaCount: 1 - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 40m - memory: 150Mi - #autoscaling: use it instead of replicaCount in order to make pods scalable by itself - #autoscaling: - # enabled: true - # minReplicas: 2 - # maxReplicas: 40 - # targetCPUUtilizationPercentage: 80 - poller: - # replicaCount: number of poller-worker pods which consumes polling tasks - replicaCount: 2 - resources: - limits: - cpu: 200m - memory: 600Mi - requests: - cpu: 60m - memory: 260Mi - #autoscaling: use it instead of replicaCount in order to make pods scalable by itself - #autoscaling: - # enabled: true - # minReplicas: 2 - # maxReplicas: 40 - # targetCPUUtilizationPercentage: 80 - sender: - # replicaCount: number of sender-worker pods which consumes sending tasks - replicaCount: 1 - resources: - limits: - cpu: 100m - memory: 350Mi - requests: - cpu: 30m - memory: 250Mi - # autoscaling: use it instead of replicaCount in order to make pods scalable by itself - #autoscaling: - # enabled: true - # minReplicas: 2 - # maxReplicas: 40 - # targetCPUUtilizationPercentage: 80 - # udpConnectionTimeout: timeout in seconds for SNMP operations - #udpConnectionTimeout: 5 - logLevel: "INFO" -scheduler: - logLevel: "INFO" - resources: - limits: - cpu: 40m - memory: 260Mi - requests: - cpu: 20m - memory: 180Mi -# profiles: | -# generic_switch: -# frequency: 300 -# varBinds: -# - ['SNMPv2-MIB', 'sysDescr'] -# - ['SNMPv2-MIB', 'sysName', 0] -# - ['TCP-MIB', 'tcpActiveOpens'] -# - ['TCP-MIB', 'tcpAttemptFails'] -# - ['IF-MIB'] -poller: - # usernameSecrets: - # - sc4snmp-hlab-sha-aes - # - sc4snmp-hlab-sha-des - # inventory: | - # address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete - # 10.0.0.1,,3,,sc4snmp-hlab-sha-aes,,1800,,, - # 10.0.0.199,,2c,public,,,3000,,,True - # 10.0.0.100,,3,,sc4snmp-hlab-sha-des,,1800,,, -sim: - # sim must be enabled if you want to use signalFx - enabled: false -# signalfxToken: BCwaJ_Ands4Xh7Nrg -# signalfxRealm: us0 -mongodb: - pdb: - create: true - persistence: - storageClass: "microk8s-hostpath" - volumePermissions: - enabled: true -inventory: - resources: - limits: - cpu: 60m - memory: 300Mi - requests: - cpu: 20m -``` The rest of the installation is the same as in [online](gettingstarted/sc4snmp-installation.md), or the [offline](offlineinstallation/offline-sc4snmp.md) installation. Keep in mind that a lightweight instance of SC4SNMP won't be able to poll from many devices and may experience delays if there is frequent polling. + +[lightweight_doc_link]: https://github.com/splunk/splunk-connect-for-snmp/blob/main/examples/lightweight_installation.yaml \ No newline at end of file diff --git a/examples/basic_template.md b/examples/basic_template.md new file mode 100644 index 000000000..92a731a8f --- /dev/null +++ b/examples/basic_template.md @@ -0,0 +1,91 @@ +## Basic SC4SNMP values.yaml template + +Example 1: Traps and polling functionality enabled, sending data to Splunk: + +```yaml +splunk: + enabled: true + protocol: https + host: ###SPLUNK_HOST### + token: ###SPLUNK_TOKEN### + insecureSSL: "false" + port: "###SPLUNK_PORT###" +traps: + communities: + 2c: + - public + loadBalancerIP: ###TRAP_RECEIVER_IP### +scheduler: + profiles: | + generic_switch: + frequency: 300 + varBinds: + - ['SNMPv2-MIB', 'sysDescr'] + - ['SNMPv2-MIB', 'sysName', 0] + - ['TCP-MIB', 'tcpActiveOpens'] + - ['TCP-MIB', 'tcpAttemptFails'] + - ['IF-MIB'] +poller: + inventory: | + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + ###POLLED_DEVICE_IP###,,2c,public,,,3000,generic_switch,, +``` + +Example 2: Polling functionality enabled, sending data to SIM: + +```yaml +splunk: + enabled: false +sim: + enabled: true + signalfxToken: ###SIGNALFX_TOKEN### + signalfxRealm: ###SIGNALFX_REALM### +scheduler: + profiles: | + generic_switch: + frequency: 300 + varBinds: + - ['SNMPv2-MIB', 'sysDescr'] + - ['SNMPv2-MIB', 'sysName', 0] + - ['TCP-MIB', 'tcpActiveOpens'] + - ['TCP-MIB', 'tcpAttemptFails'] + - ['IF-MIB'] +poller: + inventory: | + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + ###POLLED_DEVICE_IP###,,2c,public,,,3000,generic_switch,, +``` + +Splunk related placeholders to fill: + +| Placeholder | Description | Example | +|---|---|---| +| ###SPLUNK_HOST### | host address of splunk instance | "i-08c221389a3b9899a.ec2.splunkit.io" | +| ###SPLUNK_PORT### | port number of splunk instance | "8088" | +| ###SPLUNK_TOKEN### | Splunk HTTP Event Collector token | 450a69af-16a9-4f87-9628-c26f04ad3785 | + +Splunk optional variables can be configured: + +| variable | description | default | +| --- | --- | --- | +| splunk.protocol | port of splunk instance| https | +| splunk.insecure_ssl| is insecure ssl allowed | "true" | +| splunk.eventIndex | name of the events index | "netops" | +| splunk.metricsIndex | name of the metrics index | "netmetrics" | + +Splunk Infrastructure Monitoring placeholders to fill: + +| Placeholder | Description | Example | +| --- | --- | -- | +| ###SIGNALFX_TOKEN### | SIM token which can be use for ingesting date vi API | nBCsdc_Ands4Xh7Nrg | +| ###SIGNALFX_REALM### | Real of SIM | us1 | + +Shared placeholders to fill: + +| Placeholder | Description | Example | +|------------------------|---------------------------------------|---------------| +| ###TRAP_RECEIVER_IP### | SHARED IP address used for SNMP Traps | 10.202.18.166 | +| ###POLLED_DEVICE_IP### | IP address of the device to poll from | 56.22.180.166 | + +Note: In case of standalone SC4SNMP installation, `###TRAP_RECEIVER_IP###` should be the IP address of the machine +where SC4SNMP is installed. \ No newline at end of file diff --git a/examples/lightweight_installation.yaml b/examples/lightweight_installation.yaml new file mode 100644 index 000000000..8c89d967f --- /dev/null +++ b/examples/lightweight_installation.yaml @@ -0,0 +1,87 @@ +splunk: + enabled: true + protocol: https + host: ###SPLUNK_HOST### + token: ###SPLUNK_TOKEN### + insecureSSL: "false" + port: "###SPLUNK_PORT###" +image: + pullPolicy: "Always" +traps: + communities: + 2c: + - public + - homelab + replicaCount: 1 + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 40m + memory: 256Mi + #loadBalancerIP: The IP address in the metallb pool + loadBalancerIP: ###TRAP_RECEIVER_IP### +worker: + trap: + replicaCount: 1 + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 40m + memory: 150Mi + poller: + replicaCount: 2 + resources: + limits: + cpu: 200m + memory: 600Mi + requests: + cpu: 60m + memory: 260Mi + sender: + replicaCount: 1 + resources: + limits: + cpu: 100m + memory: 350Mi + requests: + cpu: 30m + memory: 250Mi + logLevel: "INFO" +scheduler: + logLevel: "INFO" + resources: + limits: + cpu: 40m + memory: 260Mi + requests: + cpu: 20m + memory: 180Mi + profiles: | + generic_switch: + frequency: 300 + varBinds: + - ['SNMPv2-MIB', 'sysDescr'] + - ['SNMPv2-MIB', 'sysName', 0] + - ['TCP-MIB', 'tcpActiveOpens'] + - ['TCP-MIB', 'tcpAttemptFails'] + - ['IF-MIB'] +poller: + usernameSecrets: + - sc4snmp-hlab-sha-aes + - sc4snmp-hlab-sha-des + inventory: | + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 10.0.0.1,,3,,sc4snmp-hlab-sha-aes,,1800,generic_switch,, + 10.0.0.199,,2c,public,,,3000,,,True + 10.0.0.100,,3,,sc4snmp-hlab-sha-des,,1800,generic_switch,, +inventory: + resources: + limits: + cpu: 60m + memory: 300Mi + requests: + cpu: 20m \ No newline at end of file diff --git a/examples/o11y_values.yaml b/examples/o11y_values.yaml new file mode 100644 index 000000000..88d5a6e4d --- /dev/null +++ b/examples/o11y_values.yaml @@ -0,0 +1,45 @@ +splunk: + # splunk integration is enabled by default, so in order to use only o11y, you need to disable splunk + # alternatively, use both of them at once + enabled: false +sim: + enabled: true + signalfxToken: xxxxxx + signalfxRealm: us0 +scheduler: + profiles: | + small_walk: + condition: + type: walk + varBinds: + - ['TCP-MIB'] + - ['IF-MIB'] + - ['IP-MIB'] + IF_profile: + frequency: 600 + varBinds: + - [ 'IF-MIB', 'ifDescr' ] + - [ 'IF-MIB', 'ifAdminStatus' ] + - [ 'IF-MIB', 'ifName' ] + - [ 'IF-MIB','ifAlias' ] + - [ 'IF-MIB', 'ifInDiscards' ] + - [ 'IF-MIB', 'ifInErrors' ] + - [ 'IF-MIB', 'ifInNUcastPkts' ] + - [ 'IF-MIB', 'ifInOctets' ] + - [ 'IF-MIB', 'ifInUcastPkts' ] + - [ 'IF-MIB', 'ifInUnknownProtos' ] + - [ 'IF-MIB', 'ifOutDiscards' ] + - [ 'IF-MIB', 'ifOutErrors' ] + - [ 'IF-MIB', 'ifOutNUcastPkts' ] + - [ 'IF-MIB', 'ifOutOctets' ] + - [ 'IF-MIB', 'ifOutQLen' ] + - [ 'IF-MIB', 'ifOutUcastPkts' ] + ICMP_profile: + frequency: 500 + varBinds: + - ['IP-MIB', 'icmp'] +poller: + inventory: | + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 54.82.4.248,,2c,public,,,4000,small_walk;IF_profile;ICMP_profile,, + 54.82.4.249,,2c,public,,,1800,small_walk;IF_profile,, \ No newline at end of file diff --git a/examples/offline_installation_values.md b/examples/offline_installation_values.md new file mode 100644 index 000000000..e72e7634b --- /dev/null +++ b/examples/offline_installation_values.md @@ -0,0 +1,64 @@ +## Offline SC4SNMP values.yaml template + +```yaml +splunk: + enabled: true + protocol: https + host: ###SPLUNK_HOST### + token: ###SPLUNK_TOKEN### + insecureSSL: "false" + port: "###SPLUNK_PORT###" +image: + #Fill ###TAG## with the SC4SNMP version downloaded before with docker pull command + # according to the documentation: https://splunk.github.io/splunk-connect-for-snmp/main/offlineinstallation/offline-sc4snmp/ + tag: ###TAG### + pullPolicy: Never +traps: + communities: + 2c: + - public + - homelab + replicaCount: 1 + loadBalancerIP: ###TRAP_RECEIVER_IP### +worker: + trap: + replicaCount: 1 + poller: + replicaCount: 2 + sender: + replicaCount: 1 + logLevel: "INFO" +scheduler: + logLevel: "INFO" + profiles: | + generic_switch: + frequency: 300 + varBinds: + - ['SNMPv2-MIB', 'sysDescr'] + - ['SNMPv2-MIB', 'sysName', 0] + - ['TCP-MIB', 'tcpActiveOpens'] + - ['TCP-MIB', 'tcpAttemptFails'] + - ['IF-MIB'] +poller: + usernameSecrets: + - sc4snmp-hlab-sha-aes + - sc4snmp-hlab-sha-des + inventory: | + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 10.0.0.1,,3,,sc4snmp-hlab-sha-aes,,1800,generic_switch,, + 10.0.0.199,,2c,public,,,3000,,,True + 10.0.0.100,,3,,sc4snmp-hlab-sha-des,,1800,generic_switch,, +mongodb: + image: + pullPolicy: Never +redis: + image: + pullPolicy: Never +mibserver: + image: + pullPolicy: Never +``` + +Fill `###` variables according to the description from [online installation](https://splunk.github.io/splunk-connect-for-snmp/main/gettingstarted/sc4snmp-installation/#configure-splunk-enterprise-or-splunk-cloud-connection). + +Additionally, fill `###TAG###` ith the same tag used before to `docker pull` an SC4SNMP image. \ No newline at end of file diff --git a/examples/polling_and_traps_v3.yaml b/examples/polling_and_traps_v3.yaml new file mode 100644 index 000000000..5f5d47ecd --- /dev/null +++ b/examples/polling_and_traps_v3.yaml @@ -0,0 +1,31 @@ +splunk: + enabled: true + protocol: https + host: i-0d903f60788be4c68.ec2.splunkit.io + token: 00000000-0000-0000-0000-000000000000 + insecureSSL: "false" + port: "8088" +traps: + # Remember to create sc4snmp-homesecure-sha-aes and sc4snmp-homesecure-sha-des secrets beforehand + # this is how to do it: https://splunk.github.io/splunk-connect-for-snmp/main/configuration/snmpv3-configuration/ + usernameSecrets: + - sc4snmp-homesecure-sha-aes + - sc4snmp-homesecure-sha-des + securityEngineId: + - "80003a8c04" + loadBalancerIP: 10.202.4.202 +scheduler: + profiles: | + switch_profile: + frequency: 60 + varBinds: + - ['IF-MIB'] + - ['UCD-SNMP-MIB'] +poller: + # Remember to create sc4snmp-hlab-sha-aes secret beforehand + # this is how to do it: https://splunk.github.io/splunk-connect-for-snmp/main/configuration/snmpv3-configuration/ + usernameSecrets: + - sc4snmp-hlab-sha-aes + inventory: | + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 54.82.4.248,,3,public,sc4snmp-hlab-sha-aes,,2000,switch_profile,, \ No newline at end of file diff --git a/examples/polling_groups_values.yaml b/examples/polling_groups_values.yaml new file mode 100644 index 000000000..a2fe73746 --- /dev/null +++ b/examples/polling_groups_values.yaml @@ -0,0 +1,25 @@ +splunk: + enabled: true + protocol: https + host: i-0d903f60788be4c68.ec2.splunkit.io + token: 00000000-0000-0000-0000-000000000000 + insecureSSL: "true" + port: "8088" +scheduler: + groups: | + switch_group: + - address: 10.202.4.201 + - address: 10.202.4.202 + - address: 10.202.4.203 + - address: 10.202.4.204 + port: 163 + profiles: | + switch_profile: + frequency: 60 + varBinds: + - ['IF-MIB'] + - ['UCD-SNMP-MIB'] +poller: + inventory: | + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + switch_group,,2c,public,,,2000,switch_profile,, \ No newline at end of file diff --git a/examples/polling_values.yaml b/examples/polling_values.yaml new file mode 100644 index 000000000..dcf193ef6 --- /dev/null +++ b/examples/polling_values.yaml @@ -0,0 +1,52 @@ +splunk: + enabled: true + protocol: https + host: i-0d903f60788be4c68.ec2.splunkit.io + token: 00000000-0000-0000-0000-000000000000 + insecureSSL: "false" + port: "8088" +# in the worker section you can adjust scaling parameters +# full description is here: +# https://splunk.github.io/splunk-connect-for-snmp/main/configuration/worker-configuration +worker: + poller: + replicaCount: 5 + sender: + replicaCount: 3 +scheduler: + profiles: | + small_walk: + condition: + type: walk + varBinds: + - ["TCP-MIB"] + - ["IF-MIB"] + - ["IP-MIB"] + IF_profile: + frequency: 600 + varBinds: + - [ 'IF-MIB', 'ifDescr' ] + - [ 'IF-MIB', 'ifAdminStatus' ] + - [ 'IF-MIB', 'ifName' ] + - [ 'IF-MIB', 'ifAlias' ] + - [ 'IF-MIB', 'ifInDiscards' ] + - [ 'IF-MIB', 'ifInErrors' ] + - [ 'IF-MIB', 'ifInNUcastPkts' ] + - [ 'IF-MIB', 'ifInOctets' ] + - [ 'IF-MIB', 'ifInUcastPkts' ] + - [ 'IF-MIB', 'ifInUnknownProtos' ] + - [ 'IF-MIB', 'ifOutDiscards' ] + - [ 'IF-MIB', 'ifOutErrors' ] + - [ 'IF-MIB', 'ifOutNUcastPkts' ] + - [ 'IF-MIB', 'ifOutOctets' ] + - [ 'IF-MIB', 'ifOutQLen' ] + - [ 'IF-MIB', 'ifOutUcastPkts' ] + ICMP_profile: + frequency: 500 + varBinds: + - ["IP-MIB", "icmp"] +poller: + inventory: | + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 54.82.4.248,,2c,public,,,4000,small_walk;IF_profile;ICMP_profile,, + 54.82.4.249,,2c,public,,,1800,small_walk;IF_profile,, \ No newline at end of file diff --git a/examples/traps_enabled_values.yaml b/examples/traps_enabled_values.yaml new file mode 100644 index 000000000..bda9095a7 --- /dev/null +++ b/examples/traps_enabled_values.yaml @@ -0,0 +1,13 @@ +splunk: + enabled: true + protocol: https + host: i-0d903f60788be4c68.ec2.splunkit.io + token: 00000000-0000-0000-0000-000000000000 + insecureSSL: "false" + port: "8088" +traps: + communities: + 2c: + - public + - homelab + loadBalancerIP: 10.202.6.213 diff --git a/integration_tests/automatic_setup.sh b/integration_tests/automatic_setup.sh index 3c40da642..4ba0318de 100755 --- a/integration_tests/automatic_setup.sh +++ b/integration_tests/automatic_setup.sh @@ -41,9 +41,9 @@ deploy_poetry() { curl -sSL https://install.python-poetry.org | $PYTHON - export PATH="/home/ubuntu/.local/bin:$PATH" poetry install - poetry add -D splunk-sdk - poetry add -D splunklib - poetry add -D pysnmp + poetry add --group dev splunk-sdk + poetry add --group dev splunklib + poetry add --group dev pysnmp } wait_for_pod_initialization() { @@ -136,4 +136,4 @@ poetry run pytest --splunk_host="localhost" --splunk_password="changeme2" \ if [ ! -z "${S3_PATH}" ]; then aws s3 cp /home/ubuntu/splunk-connect-for-snmp/integration_tests/result.xml s3://snmp-integration-tests/$S3_PATH/ aws s3 cp /home/ubuntu/splunk-connect-for-snmp/integration_tests/pytest.log s3://snmp-integration-tests/$S3_PATH/ -fi \ No newline at end of file +fi diff --git a/mkdocs.yml b/mkdocs.yml index 02193dd57..2b429381b 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -13,6 +13,8 @@ markdown_extensions: - codehilite plugins: + - search: + lang: en - mkdocs-video: is_video: True @@ -36,7 +38,6 @@ nav: - Platform Microk8s: "gettingstarted/mk8s/k8s-microk8s.md" - Install Splunk OpenTelemetry Collector for Kubernetes: "gettingstarted/sck-installation.md" - Install SC4SNMP: "gettingstarted/sc4snmp-installation.md" - - High Availability: ha.md - Configuration: - Deployment: "configuration/deployment-configuration.md" - Polling: @@ -52,15 +53,16 @@ nav: - Redis: "configuration/redis-configuration.md" - SNMPv3 configuration: "configuration/snmpv3-configuration.md" - Splunk Infrastructure Monitoring: "configuration/sim-configuration.md" + - Offline Installation: + - Install Microk8s: "offlineinstallation/offline-microk8s.md" + - Install Splunk OpenTelemetry Collector for Kubernetes: "offlineinstallation/offline-sck.md" + - Install SC4SNMP: "offlineinstallation/offline-sc4snmp.md" + - Lightweight installation: "small-environment.md" - Planning: "planning.md" - Security: "security.md" - Request MIB: "mib-request.md" - Upgrade SC4SNMP: "upgrade.md" - Troubleshooting : "bestpractices.md" - Releases: "releases.md" - - Offline Installation: - - Install Microk8s: "offlineinstallation/offline-microk8s.md" - - Install Splunk OpenTelemetry Collector for Kubernetes: "offlineinstallation/offline-sck.md" - - Install SC4SNMP: "offlineinstallation/offline-sc4snmp.md" - - Lightweight installation: "small-environment.md" + - High Availability: ha.md diff --git a/poetry.lock b/poetry.lock index 3056035af..8a1e91f88 100644 --- a/poetry.lock +++ b/poetry.lock @@ -25,14 +25,6 @@ category = "main" optional = false python-versions = ">=3.6" -[[package]] -name = "atomicwrites" -version = "1.4.0" -description = "Atomic file writes." -category = "dev" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" - [[package]] name = "attrs" version = "22.1.0" @@ -638,17 +630,9 @@ python-versions = ">=3.6.2" [package.dependencies] wcwidth = "*" -[[package]] -name = "py" -version = "1.11.0" -description = "library with cross-python path, ini-parsing, io, code, log facilities" -category = "dev" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" - [[package]] name = "pycryptodomex" -version = "3.15.0" +version = "3.17" description = "Cryptographic library for Python" category = "main" optional = false @@ -737,7 +721,7 @@ docs = ["furo (>=2022.3.4,<2023.0.0)", "myst-parser (>=0.17)", "sphinx (>=4.3.0, [[package]] name = "pysnmp-pyasn1" -version = "1.1.2" +version = "1.1.3" description = "ASN.1 types and codecs" category = "main" optional = false @@ -757,34 +741,40 @@ requests = ">=2.26.0,<3.0.0" [[package]] name = "pysnmplib" -version = "5.0.17" +version = "5.0.21" description = "" category = "main" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "^3.8" +develop = false [package.dependencies] -pycryptodomex = ">=3.11.0,<4.0.0" -pysnmp-pyasn1 = ">=1.0.3,<2.0.0" -pysnmp-pysmi = ">=1.0.4,<2.0.0" +pycryptodomex = "^3.11.0" +pysnmp-pyasn1 = "^1.0.3" +pysnmp-pysmi = "^1.0.4" + +[package.source] +type = "git" +url = "https://github.com/pysnmp/pysnmp.git" +reference = "main" +resolved_reference = "8f492d3782b26db88e2ea861da9ece413b074777" [[package]] name = "pytest" -version = "7.1.2" +version = "7.2.1" description = "pytest: simple powerful testing with Python" category = "dev" optional = false python-versions = ">=3.7" [package.dependencies] -atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""} attrs = ">=19.2.0" colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} iniconfig = "*" packaging = "*" pluggy = ">=0.12,<2.0" -py = ">=1.8.2" -tomli = ">=1.0.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} [package.extras] testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"] @@ -1138,7 +1128,7 @@ testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>= [metadata] lock-version = "1.1" python-versions = "^3.8" -content-hash = "cc3dcb63c4c1c47e5a8fc1b956f37a9cc828aea637d62f48ae59956925fc82d9" +content-hash = "cc8ac61bad91cfe1a3a6f28bbd7faf0e1c0b27921183579a1bdadf57c008373a" [metadata.files] amqp = [ @@ -1153,10 +1143,6 @@ async-timeout = [ {file = "async-timeout-4.0.2.tar.gz", hash = "sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15"}, {file = "async_timeout-4.0.2-py3-none-any.whl", hash = "sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c"}, ] -atomicwrites = [ - {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"}, - {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"}, -] attrs = [ {file = "attrs-22.1.0-py2.py3-none-any.whl", hash = "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c"}, {file = "attrs-22.1.0.tar.gz", hash = "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6"}, @@ -1418,44 +1404,40 @@ prompt-toolkit = [ {file = "prompt_toolkit-3.0.30-py3-none-any.whl", hash = "sha256:d8916d3f62a7b67ab353a952ce4ced6a1d2587dfe9ef8ebc30dd7c386751f289"}, {file = "prompt_toolkit-3.0.30.tar.gz", hash = "sha256:859b283c50bde45f5f97829f77a4674d1c1fcd88539364f1b28a37805cfd89c0"}, ] -py = [ - {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, - {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, -] pycryptodomex = [ - {file = "pycryptodomex-3.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:6f5b6ba8aefd624834bc177a2ac292734996bb030f9d1b388e7504103b6fcddf"}, - {file = "pycryptodomex-3.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:4540904c09704b6f831059c0dfb38584acb82cb97b0125cd52688c1f1e3fffa6"}, - {file = "pycryptodomex-3.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:0fadb9f7fa3150577800eef35f62a8a24b9ddf1563ff060d9bd3af22d3952c8c"}, - {file = "pycryptodomex-3.15.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:fc9bc7a9b79fe5c750fc81a307052f8daabb709bdaabb0fb18fb136b66b653b5"}, - {file = "pycryptodomex-3.15.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:f8be976cec59b11f011f790b88aca67b4ea2bd286578d0bd3e31bcd19afcd3e4"}, - {file = "pycryptodomex-3.15.0-cp27-cp27m-manylinux2014_aarch64.whl", hash = "sha256:78d9621cf0ea35abf2d38fa2ca6d0634eab6c991a78373498ab149953787e5e5"}, - {file = "pycryptodomex-3.15.0-cp27-cp27m-musllinux_1_1_aarch64.whl", hash = "sha256:7db44039cc8b449bd08ab338a074e87093bd170f1a1b76d2fcef8a3e2ee11199"}, - {file = "pycryptodomex-3.15.0-cp27-cp27m-win32.whl", hash = "sha256:b6306403228edde6e289f626a3908a2f7f67c344e712cf7c0a508bab3ad9e381"}, - {file = "pycryptodomex-3.15.0-cp27-cp27m-win_amd64.whl", hash = "sha256:48697790203909fab02a33226fda546604f4e2653f9d47bc5d3eb40879fa7c64"}, - {file = "pycryptodomex-3.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:18e2ab4813883ae63396c0ffe50b13554b32bb69ec56f0afaf052e7a7ae0d55b"}, - {file = "pycryptodomex-3.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:3709f13ca3852b0b07fc04a2c03b379189232b24007c466be0f605dd4723e9d4"}, - {file = "pycryptodomex-3.15.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:191e73bc84a8064ad1874dba0ebadedd7cce4dedee998549518f2c74a003b2e1"}, - {file = "pycryptodomex-3.15.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:e3164a18348bd53c69b4435ebfb4ac8a4076291ffa2a70b54f0c4b80c7834b1d"}, - {file = "pycryptodomex-3.15.0-cp27-cp27mu-manylinux2014_aarch64.whl", hash = "sha256:5676a132169a1c1a3712edf25250722ebc8c9102aa9abd814df063ca8362454f"}, - {file = "pycryptodomex-3.15.0-cp27-cp27mu-musllinux_1_1_aarch64.whl", hash = "sha256:781efd04ea6762bb2ef7d4fa632c9c89895433744b6c345bd0c239d5ab058dfc"}, - {file = "pycryptodomex-3.15.0-cp35-abi3-macosx_10_9_x86_64.whl", hash = "sha256:e2b12968522a0358b8917fc7b28865acac002f02f4c4c6020fcb264d76bfd06d"}, - {file = "pycryptodomex-3.15.0-cp35-abi3-manylinux1_i686.whl", hash = "sha256:e47bf8776a7e15576887f04314f5228c6527b99946e6638cf2f16da56d260cab"}, - {file = "pycryptodomex-3.15.0-cp35-abi3-manylinux1_x86_64.whl", hash = "sha256:996e1ba717077ce1e6d4849af7a1426f38b07b3d173b879e27d5e26d2e958beb"}, - {file = "pycryptodomex-3.15.0-cp35-abi3-manylinux2010_i686.whl", hash = "sha256:65204412d0c6a8e3c41e21e93a5e6054a74fea501afa03046a388cf042e3377a"}, - {file = "pycryptodomex-3.15.0-cp35-abi3-manylinux2010_x86_64.whl", hash = "sha256:dd452a5af7014e866206d41751886c9b4bf379a339fdf2dbfc7dd16c0fb4f8e0"}, - {file = "pycryptodomex-3.15.0-cp35-abi3-manylinux2014_aarch64.whl", hash = "sha256:b9279adc16e4b0f590ceff581f53a80179b02cba9056010d733eb4196134a870"}, - {file = "pycryptodomex-3.15.0-cp35-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:04a5d6a17560e987272fc1763e9772a87689a08427b8cbdebe3ca7cba95d6156"}, - {file = "pycryptodomex-3.15.0-cp35-abi3-win32.whl", hash = "sha256:46b3f05f2f7ac7841053da4e0f69616929ca3c42f238c405f6c3df7759ad2780"}, - {file = "pycryptodomex-3.15.0-cp35-abi3-win_amd64.whl", hash = "sha256:8eecdf9cdc7343001d047f951b9cc805cd68cb6cd77b20ea46af5bffc5bd3dfb"}, - {file = "pycryptodomex-3.15.0-pp27-pypy_73-macosx_10_9_x86_64.whl", hash = "sha256:67e1e6a92151023ccdfcfbc0afb3314ad30080793b4c27956ea06ab1fb9bcd8a"}, - {file = "pycryptodomex-3.15.0-pp27-pypy_73-manylinux1_x86_64.whl", hash = "sha256:c4cb9cb492ea7dcdf222a8d19a1d09002798ea516aeae8877245206d27326d86"}, - {file = "pycryptodomex-3.15.0-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:94c7b60e1f52e1a87715571327baea0733708ab4723346598beca4a3b6879794"}, - {file = "pycryptodomex-3.15.0-pp27-pypy_73-win32.whl", hash = "sha256:04cc393045a8f19dd110c975e30f38ed7ab3faf21ede415ea67afebd95a22380"}, - {file = "pycryptodomex-3.15.0-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0776bfaf2c48154ab54ea45392847c1283d2fcf64e232e85565f858baedfc1fa"}, - {file = "pycryptodomex-3.15.0-pp36-pypy36_pp73-manylinux1_x86_64.whl", hash = "sha256:463119d7d22d0fc04a0f9122e9d3e6121c6648bcb12a052b51bd1eed1b996aa2"}, - {file = "pycryptodomex-3.15.0-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:a07a64709e366c2041cd5cfbca592b43998bf4df88f7b0ca73dca37071ccf1bd"}, - {file = "pycryptodomex-3.15.0-pp36-pypy36_pp73-win32.whl", hash = "sha256:35a8f7afe1867118330e2e0e0bf759c409e28557fb1fc2fbb1c6c937297dbe9a"}, - {file = "pycryptodomex-3.15.0.tar.gz", hash = "sha256:7341f1bb2dadb0d1a0047f34c3a58208a92423cdbd3244d998e4b28df5eac0ed"}, + {file = "pycryptodomex-3.17-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:12056c38e49d972f9c553a3d598425f8a1c1d35b2e4330f89d5ff1ffb70de041"}, + {file = "pycryptodomex-3.17-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ab33c2d9f275e05e235dbca1063753b5346af4a5cac34a51fa0da0d4edfb21d7"}, + {file = "pycryptodomex-3.17-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:caa937ff29d07a665dfcfd7a84f0d4207b2ebf483362fa9054041d67fdfacc20"}, + {file = "pycryptodomex-3.17-cp27-cp27m-manylinux2014_aarch64.whl", hash = "sha256:db23d7341e21b273d2440ec6faf6c8b1ca95c8894da612e165be0b89a8688340"}, + {file = "pycryptodomex-3.17-cp27-cp27m-musllinux_1_1_aarch64.whl", hash = "sha256:f854c8476512cebe6a8681cc4789e4fcff6019c17baa0fd72b459155dc605ab4"}, + {file = "pycryptodomex-3.17-cp27-cp27m-win32.whl", hash = "sha256:a57e3257bacd719769110f1f70dd901c5b6955e9596ad403af11a3e6e7e3311c"}, + {file = "pycryptodomex-3.17-cp27-cp27m-win_amd64.whl", hash = "sha256:d38ab9e53b1c09608ba2d9b8b888f1e75d6f66e2787e437adb1fecbffec6b112"}, + {file = "pycryptodomex-3.17-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:3c2516b42437ae6c7a29ef3ddc73c8d4714e7b6df995b76be4695bbe4b3b5cd2"}, + {file = "pycryptodomex-3.17-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:5c23482860302d0d9883404eaaa54b0615eefa5274f70529703e2c43cc571827"}, + {file = "pycryptodomex-3.17-cp27-cp27mu-manylinux2014_aarch64.whl", hash = "sha256:7a8dc3ee7a99aae202a4db52de5a08aa4d01831eb403c4d21da04ec2f79810db"}, + {file = "pycryptodomex-3.17-cp27-cp27mu-musllinux_1_1_aarch64.whl", hash = "sha256:7cc28dd33f1f3662d6da28ead4f9891035f63f49d30267d3b41194c8778997c8"}, + {file = "pycryptodomex-3.17-cp35-abi3-macosx_10_9_universal2.whl", hash = "sha256:2d4d395f109faba34067a08de36304e846c791808524614c731431ee048fe70a"}, + {file = "pycryptodomex-3.17-cp35-abi3-macosx_10_9_x86_64.whl", hash = "sha256:55eed98b4150a744920597c81b3965b632038781bab8a08a12ea1d004213c600"}, + {file = "pycryptodomex-3.17-cp35-abi3-manylinux2014_aarch64.whl", hash = "sha256:7fa0b52df90343fafe319257b31d909be1d2e8852277fb0376ba89d26d2921db"}, + {file = "pycryptodomex-3.17-cp35-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78f0ddd4adc64baa39b416f3637aaf99f45acb0bcdc16706f0cc7ebfc6f10109"}, + {file = "pycryptodomex-3.17-cp35-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4fa037078e92c7cc49f6789a8bac3de06856740bb2038d05f2d9a2e4b165d59"}, + {file = "pycryptodomex-3.17-cp35-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:88b0d5bb87eaf2a31e8a759302b89cf30c97f2f8ca7d83b8c9208abe8acb447a"}, + {file = "pycryptodomex-3.17-cp35-abi3-musllinux_1_1_i686.whl", hash = "sha256:6feedf4b0e36b395329b4186a805f60f900129cdf0170e120ecabbfcb763995d"}, + {file = "pycryptodomex-3.17-cp35-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:7a6651a07f67c28b6e978d63aa3a3fccea0feefed9a8453af3f7421a758461b7"}, + {file = "pycryptodomex-3.17-cp35-abi3-win32.whl", hash = "sha256:32e764322e902bbfac49ca1446604d2839381bbbdd5a57920c9daaf2e0b778df"}, + {file = "pycryptodomex-3.17-cp35-abi3-win_amd64.whl", hash = "sha256:4b51e826f0a04d832eda0790bbd0665d9bfe73e5a4d8ea93b6a9b38beeebe935"}, + {file = "pycryptodomex-3.17-pp27-pypy_73-macosx_10_9_x86_64.whl", hash = "sha256:d4cf0128da167562c49b0e034f09e9cedd733997354f2314837c2fa461c87bb1"}, + {file = "pycryptodomex-3.17-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:c92537b596bd5bffb82f8964cabb9fef1bca8a28a9e0a69ffd3ec92a4a7ad41b"}, + {file = "pycryptodomex-3.17-pp27-pypy_73-win32.whl", hash = "sha256:599bb4ae4bbd614ca05f49bd4e672b7a250b80b13ae1238f05fd0f09d87ed80a"}, + {file = "pycryptodomex-3.17-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4c4674f4b040321055c596aac926d12f7f6859dfe98cd12f4d9453b43ab6adc8"}, + {file = "pycryptodomex-3.17-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67a3648025e4ddb72d43addab764336ba2e670c8377dba5dd752e42285440d31"}, + {file = "pycryptodomex-3.17-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40e8a11f578bd0851b02719c862d55d3ee18d906c8b68a9c09f8c564d6bb5b92"}, + {file = "pycryptodomex-3.17-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:23d83b610bd97704f0cd3acc48d99b76a15c8c1540d8665c94d514a49905bad7"}, + {file = "pycryptodomex-3.17-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd29d35ac80755e5c0a99d96b44fb9abbd7e871849581ea6a4cb826d24267537"}, + {file = "pycryptodomex-3.17-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64b876d57cb894b31056ad8dd6a6ae1099b117ae07a3d39707221133490e5715"}, + {file = "pycryptodomex-3.17-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee8bf4fdcad7d66beb744957db8717afc12d176e3fd9c5d106835133881a049b"}, + {file = "pycryptodomex-3.17-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c84689c73358dfc23f9fdcff2cb9e7856e65e2ce3b5ed8ff630d4c9bdeb1867b"}, + {file = "pycryptodomex-3.17.tar.gz", hash = "sha256:0af93aad8d62e810247beedef0261c148790c52f3cd33643791cc6396dd217c1"}, ] pydantic = [ {file = "pydantic-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:221166d99726238f71adc4fa9f3e94063a10787574b966f86a774559e709ac5a"}, @@ -1620,20 +1602,17 @@ pyrate-limiter = [ {file = "pyrate-limiter-2.8.1.tar.gz", hash = "sha256:0741b7db4b3facdce60bd836e0fcc43911bc52c443f674f924afba7e02e79c18"}, ] pysnmp-pyasn1 = [ - {file = "pysnmp-pyasn1-1.1.2.tar.gz", hash = "sha256:b3345370fcb49e03ecc78b336a3f940e0c0c1409d702f64af1fe165b61e37b35"}, - {file = "pysnmp_pyasn1-1.1.2-py3-none-any.whl", hash = "sha256:de4132bc2931a7b32277aac1ce8e7650db137199df087a376530153b1b1b4d8d"}, + {file = "pysnmp-pyasn1-1.1.3.tar.gz", hash = "sha256:fc559133ec6717e9d96dd4bd69c981310b23364dc2280a9b5f40f684fb6b4b8a"}, + {file = "pysnmp_pyasn1-1.1.3-py3-none-any.whl", hash = "sha256:d9a471b058adb9f2c3ce3aa85f800f2beef1a86c03b08d182a5653c9880fbd5e"}, ] pysnmp-pysmi = [ {file = "pysnmp-pysmi-1.1.10.tar.gz", hash = "sha256:0149c5772e6151f6286f546058da3e1203771d46c9b8b53b568bf1c44267506f"}, {file = "pysnmp_pysmi-1.1.10-py3-none-any.whl", hash = "sha256:6526b2bda6ca5f01f1c0ac2c8ff01cb34e0eec3c9fe887decd86dc78121ce52c"}, ] -pysnmplib = [ - {file = "pysnmplib-5.0.17-py3-none-any.whl", hash = "sha256:2400e2c7776e7653b2edac5a5d35d5aa959bd0dad54d7b06d7b95b89312d5e64"}, - {file = "pysnmplib-5.0.17.tar.gz", hash = "sha256:73fd976f2608597776890c69a1539c9967f5ddd9ca6836cc5b0d1915e7a17ad8"}, -] +pysnmplib = [] pytest = [ - {file = "pytest-7.1.2-py3-none-any.whl", hash = "sha256:13d0e3ccfc2b6e26be000cb6568c832ba67ba32e719443bfe725814d3c42433c"}, - {file = "pytest-7.1.2.tar.gz", hash = "sha256:a06a0425453864a270bc45e71f783330a7428defb4230fb5e6a731fde06ecd45"}, + {file = "pytest-7.2.1-py3-none-any.whl", hash = "sha256:c7c6ca206e93355074ae32f7403e8ea12163b1163c976fee7d4d84027c162be5"}, + {file = "pytest-7.2.1.tar.gz", hash = "sha256:d45e0952f3727241918b8fd0f376f5ff6b301cc0777c6f9a556935c92d8a7d42"}, ] pytest-cov = [ {file = "pytest-cov-3.0.0.tar.gz", hash = "sha256:e7f0f5b1617d2210a2cabc266dfe2f4c75a8d32fb89eafb7ad9d06f6d076d470"}, diff --git a/pyproject.toml b/pyproject.toml index bf63f0b10..696d89d33 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "splunk-connect-for-snmp" -version = "1.8.6" +version = "1.9.0-beta.1" description = "" authors = ["omrozowicz-splunk "] license = "Apache-2.0" @@ -36,7 +36,6 @@ requests-cache = "^0.9.3" requests-ratelimiter = "^0.2.1" mongoengine = "^0.24.1" celery-redbeat = {git = "https://github.com/splunk/redbeat", rev = "main"} -pysnmplib = "^5.0.5" PyYAML = "^6.0" #Note this is temporary PR to upstream project is issued wait-for-dep = {extras = ["redis"], git="https://github.com/omrozowicz-splunk/wait-for-dep.git"} @@ -45,9 +44,10 @@ pika = "^1.2.0" JSON-log-formatter ="^0.5.1" "ruamel.yaml" = "^0.17.21" mkdocs-video = "^1.3.0" +pysnmplib = {git = "https://github.com/pysnmp/pysnmp.git", rev = "main"} -[tool.poetry.dev-dependencies] -pytest = "^7.1.1" +[tool.poetry.group.dev.dependencies] +pytest = "^7.2.1" pytest-cov = "^3.0.0" mike = "^1.0.1" mkdocs = "^1.2.2" diff --git a/render_manifests.sh b/render_manifests.sh new file mode 100755 index 000000000..682b4e3ec --- /dev/null +++ b/render_manifests.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +TEST_CASES=("only_polling" "only_traps" "autoscaling_enabled" "autoscaling_enabled_deprecated") +for test_case in "${TEST_CASES[@]}" + do + VALUES_FILE=rendered/values_"${test_case}".yaml + MANIFEST_DIR=rendered/manifests/tests_"${test_case}" + helm template --values "${VALUES_FILE}" --output-dir "${MANIFEST_DIR}" -n default charts/splunk-connect-for-snmp + rm -rf "${MANIFEST_DIR}"/splunk-connect-for-snmp/charts + done \ No newline at end of file diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/common/scheduler-config.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/common/scheduler-config.yaml index 90f2e6174..2f8987642 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/common/scheduler-config.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/common/scheduler-config.yaml @@ -5,10 +5,10 @@ kind: ConfigMap metadata: name: splunk-connect-for-snmp-config labels: - helm.sh/chart: splunk-connect-for-snmp-1.8.5 app.kubernetes.io/name: splunk-connect-for-snmp-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.8.5" + helm.sh/chart: splunk-connect-for-snmp-1.8.7-beta.9 + app.kubernetes.io/version: "1.8.7-beta.9" app.kubernetes.io/managed-by: Helm data: config.yaml: |- diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/common/scheduler-inventory.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/common/scheduler-inventory.yaml index 957ac030a..11d8c383c 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/common/scheduler-inventory.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/common/scheduler-inventory.yaml @@ -5,10 +5,10 @@ kind: ConfigMap metadata: name: splunk-connect-for-snmp-inventory labels: - helm.sh/chart: splunk-connect-for-snmp-1.8.5 app.kubernetes.io/name: splunk-connect-for-snmp-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.8.5" + helm.sh/chart: splunk-connect-for-snmp-1.8.7-beta.9 + app.kubernetes.io/version: "1.8.7-beta.9" app.kubernetes.io/managed-by: Helm data: inventory.csv: | diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/common/splunk-secret.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/common/splunk-secret.yaml new file mode 100644 index 000000000..21e689f0a --- /dev/null +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/common/splunk-secret.yaml @@ -0,0 +1,9 @@ +--- +# Source: splunk-connect-for-snmp/templates/common/splunk-secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: splunk-connect-for-snmp-splunk +type: Opaque +data: + hec_token: "MDAwMDAwMDAtMDAwMC0wMDAwLTAwMDAtMDAwMDAwMDAwMDAw" diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/common/traps-config.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/common/traps-config.yaml index 0ce5607ee..a9a337011 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/common/traps-config.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/common/traps-config.yaml @@ -5,10 +5,10 @@ kind: ConfigMap metadata: name: splunk-connect-for-snmp-traps labels: - helm.sh/chart: splunk-connect-for-snmp-1.8.5 app.kubernetes.io/name: splunk-connect-for-snmp-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.8.5" + helm.sh/chart: splunk-connect-for-snmp-1.8.7-beta.9 + app.kubernetes.io/version: "1.8.7-beta.9" app.kubernetes.io/managed-by: Helm data: config.yaml: |- diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/inventory/job.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/inventory/job.yaml index 5b9287b87..4af72f285 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/inventory/job.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/inventory/job.yaml @@ -5,10 +5,10 @@ kind: Job metadata: name: release-name-splunk-connect-for-snmp-inventory labels: - helm.sh/chart: splunk-connect-for-snmp-1.8.5 app.kubernetes.io/name: splunk-connect-for-snmp-inventory app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.8.5" + helm.sh/chart: splunk-connect-for-snmp-1.8.7-beta.9 + app.kubernetes.io/version: "1.8.7-beta.9" app.kubernetes.io/managed-by: Helm spec: ttlSecondsAfterFinished: 300 @@ -21,7 +21,7 @@ spec: spec: containers: - name: splunk-connect-for-snmp-inventory - image: "ghcr.io/splunk/splunk-connect-for-snmp/container:1.8.5" + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:1.8.7-beta.9" imagePullPolicy: Always args: ["inventory"] diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/scheduler/deployment.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/scheduler/deployment.yaml index 0559a09a8..a7a09ea68 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/scheduler/deployment.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/scheduler/deployment.yaml @@ -5,13 +5,13 @@ kind: Deployment metadata: name: release-name-splunk-connect-for-snmp-scheduler labels: - helm.sh/chart: splunk-connect-for-snmp-1.8.5 app.kubernetes.io/name: splunk-connect-for-snmp-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.8.5" + helm.sh/chart: splunk-connect-for-snmp-1.8.7-beta.9 + app.kubernetes.io/version: "1.8.7-beta.9" app.kubernetes.io/managed-by: Helm spec: - replicas: + replicas: 1 selector: matchLabels: app.kubernetes.io/name: splunk-connect-for-snmp-scheduler @@ -22,7 +22,7 @@ spec: app.kubernetes.io/name: splunk-connect-for-snmp-scheduler app.kubernetes.io/instance: release-name spec: - serviceAccountName: release-name-splunk-connect-for-snmp-scheduler + serviceAccountName: release-name-splunk-connect-for-snmp-user securityContext: fsGroup: 10001 containers: @@ -30,12 +30,12 @@ spec: securityContext: capabilities: drop: - - ALL + - ALL readOnlyRootFilesystem: true - runAsGroup: 10001 runAsNonRoot: true runAsUser: 10001 - image: "ghcr.io/splunk/splunk-connect-for-snmp/container:1.8.5" + runAsGroup: 10001 + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:1.8.7-beta.9" imagePullPolicy: Always args: [ diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/scheduler/pdb.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/scheduler/pdb.yaml index 0e06cfc27..d4cd4f276 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/scheduler/pdb.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/scheduler/pdb.yaml @@ -5,10 +5,10 @@ kind: PodDisruptionBudget metadata: name: release-name-splunk-connect-for-snmp-scheduler labels: - helm.sh/chart: splunk-connect-for-snmp-1.8.5 app.kubernetes.io/name: splunk-connect-for-snmp-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.8.5" + helm.sh/chart: splunk-connect-for-snmp-1.8.7-beta.9 + app.kubernetes.io/version: "1.8.7-beta.9" app.kubernetes.io/managed-by: Helm spec: minAvailable: 1 diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/scheduler/serviceaccount.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/scheduler/serviceaccount.yaml deleted file mode 100644 index d1a97fbb6..000000000 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/scheduler/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -# Source: splunk-connect-for-snmp/templates/scheduler/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: release-name-splunk-connect-for-snmp-scheduler - labels: - helm.sh/chart: splunk-connect-for-snmp-1.8.5 - app.kubernetes.io/name: splunk-connect-for-snmp-scheduler - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.8.5" - app.kubernetes.io/managed-by: Helm diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/serviceaccount.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/serviceaccount.yaml new file mode 100644 index 000000000..962cbed68 --- /dev/null +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/serviceaccount.yaml @@ -0,0 +1,10 @@ +--- +# Source: splunk-connect-for-snmp/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: release-name-splunk-connect-for-snmp-user + labels: + helm.sh/chart: splunk-connect-for-snmp-1.8.7-beta.9 + app.kubernetes.io/version: "1.8.7-beta.9" + app.kubernetes.io/managed-by: Helm diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/sim/pdb.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/sim/pdb.yaml index f37ed896d..46274afa8 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/sim/pdb.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/sim/pdb.yaml @@ -5,10 +5,10 @@ kind: PodDisruptionBudget metadata: name: release-name-splunk-connect-for-snmp-sim labels: - helm.sh/chart: splunk-connect-for-snmp-1.8.5 app.kubernetes.io/name: splunk-connect-for-snmp-sim app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.8.5" + helm.sh/chart: splunk-connect-for-snmp-1.8.7-beta.9 + app.kubernetes.io/version: "1.8.7-beta.9" app.kubernetes.io/managed-by: Helm spec: minAvailable: 80% diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/tests/test-connection.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/tests/test-connection.yaml index a6f13ad5d..a525db4fa 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/tests/test-connection.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/tests/test-connection.yaml @@ -5,10 +5,8 @@ kind: Pod metadata: name: "release-name-splunk-connect-for-snmp-trap-test-connection" labels: - helm.sh/chart: splunk-connect-for-snmp-1.8.5 - app.kubernetes.io/name: splunk-connect-for-snmp-trap - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.8.5" + helm.sh/chart: splunk-connect-for-snmp-1.8.7-beta.9 + app.kubernetes.io/version: "1.8.7-beta.9" app.kubernetes.io/managed-by: Helm annotations: "helm.sh/hook": test diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/deployment.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/deployment.yaml index d0c67d1f0..9d8cb28b3 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/deployment.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: release-name-splunk-connect-for-snmp-trap labels: - helm.sh/chart: splunk-connect-for-snmp-1.8.5 app.kubernetes.io/name: splunk-connect-for-snmp-trap app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.8.5" + helm.sh/chart: splunk-connect-for-snmp-1.8.7-beta.9 + app.kubernetes.io/version: "1.8.7-beta.9" app.kubernetes.io/managed-by: Helm spec: replicas: 2 @@ -22,7 +22,7 @@ spec: app.kubernetes.io/name: splunk-connect-for-snmp-trap app.kubernetes.io/instance: release-name spec: - serviceAccountName: release-name-splunk-connect-for-snmp-trap + serviceAccountName: release-name-splunk-connect-for-snmp-user securityContext: fsGroup: 10001 containers: @@ -30,12 +30,12 @@ spec: securityContext: capabilities: drop: - - ALL + - ALL readOnlyRootFilesystem: true - runAsGroup: 10001 runAsNonRoot: true runAsUser: 10001 - image: "ghcr.io/splunk/splunk-connect-for-snmp/container:1.8.5" + runAsGroup: 10001 + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:1.8.7-beta.9" imagePullPolicy: Always args: [ diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/pdb.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/pdb.yaml index 437a4f48e..fd8f78b1b 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/pdb.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/pdb.yaml @@ -5,10 +5,10 @@ kind: PodDisruptionBudget metadata: name: release-name-splunk-connect-for-snmp-trap labels: - helm.sh/chart: splunk-connect-for-snmp-1.8.5 app.kubernetes.io/name: splunk-connect-for-snmp-trap app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.8.5" + helm.sh/chart: splunk-connect-for-snmp-1.8.7-beta.9 + app.kubernetes.io/version: "1.8.7-beta.9" app.kubernetes.io/managed-by: Helm spec: minAvailable: 80% diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/service.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/service.yaml index f75b3249a..a64b80fac 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/service.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/service.yaml @@ -5,10 +5,10 @@ kind: Service metadata: name: release-name-splunk-connect-for-snmp-trap labels: - helm.sh/chart: splunk-connect-for-snmp-1.8.5 app.kubernetes.io/name: splunk-connect-for-snmp-trap app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.8.5" + helm.sh/chart: splunk-connect-for-snmp-1.8.7-beta.9 + app.kubernetes.io/version: "1.8.7-beta.9" app.kubernetes.io/managed-by: Helm annotations: metallb.universe.tf/allow-shared-ip: "splunk-connect" diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/serviceaccount.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/serviceaccount.yaml deleted file mode 100644 index 84bf6d033..000000000 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -# Source: splunk-connect-for-snmp/templates/traps/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: release-name-splunk-connect-for-snmp-trap - labels: - helm.sh/chart: splunk-connect-for-snmp-1.8.5 - app.kubernetes.io/name: splunk-connect-for-snmp-trap - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.8.5" - app.kubernetes.io/managed-by: Helm diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/pdb.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/pdb.yaml index a11764016..b25ead021 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/pdb.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/pdb.yaml @@ -5,10 +5,10 @@ kind: PodDisruptionBudget metadata: name: release-name-splunk-connect-for-snmp-worker labels: - helm.sh/chart: splunk-connect-for-snmp-1.8.5 app.kubernetes.io/name: splunk-connect-for-snmp-worker app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.8.5" + helm.sh/chart: splunk-connect-for-snmp-1.8.7-beta.9 + app.kubernetes.io/version: "1.8.7-beta.9" app.kubernetes.io/managed-by: Helm spec: minAvailable: 80% diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml index 3cd2680e9..683a425e4 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: release-name-splunk-connect-for-snmp-worker-poller labels: - helm.sh/chart: splunk-connect-for-snmp-1.8.5 app.kubernetes.io/name: splunk-connect-for-snmp-worker-poller app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.8.5" + helm.sh/chart: splunk-connect-for-snmp-1.8.7-beta.9 + app.kubernetes.io/version: "1.8.7-beta.9" app.kubernetes.io/managed-by: Helm spec: replicas: 2 @@ -22,7 +22,7 @@ spec: app.kubernetes.io/name: splunk-connect-for-snmp-worker-poller app.kubernetes.io/instance: release-name spec: - serviceAccountName: release-name-splunk-connect-for-snmp-worker + serviceAccountName: release-name-splunk-connect-for-snmp-user securityContext: fsGroup: 10001 containers: @@ -30,12 +30,12 @@ spec: securityContext: capabilities: drop: - - ALL + - ALL readOnlyRootFilesystem: true - runAsGroup: 10001 runAsNonRoot: true runAsUser: 10001 - image: "ghcr.io/splunk/splunk-connect-for-snmp/container:1.8.5" + runAsGroup: 10001 + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:1.8.7-beta.9" imagePullPolicy: Always args: [ @@ -47,19 +47,23 @@ spec: - name: REDIS_URL value: redis://release-name-redis-headless:6379/1 - name: SC4SNMP_VERSION - value: 1.8.5 + value: 1.8.7-beta.9 - name: CELERY_BROKER_URL value: redis://release-name-redis-headless:6379/0 - name: MONGO_URI value: mongodb://release-name-mongodb:27017 - name: WALK_RETRY_MAX_INTERVAL - value: "600" + value: "60" - name: METRICS_INDEXING_ENABLED value: "false" + - name: POLL_BASE_PROFILES + value: "true" - name: LOG_LEVEL value: INFO - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: MAX_OID_TO_PROCESS + value: "70" - name: PROFILES_RELOAD_DELAY value: "60" - name: MIB_SOURCES diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml index 8b0067de9..06d1bfd7f 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: release-name-splunk-connect-for-snmp-worker-sender labels: - helm.sh/chart: splunk-connect-for-snmp-1.8.5 app.kubernetes.io/name: splunk-connect-for-snmp-worker-sender app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.8.5" + helm.sh/chart: splunk-connect-for-snmp-1.8.7-beta.9 + app.kubernetes.io/version: "1.8.7-beta.9" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -22,7 +22,7 @@ spec: app.kubernetes.io/name: splunk-connect-for-snmp-worker-sender app.kubernetes.io/instance: release-name spec: - serviceAccountName: release-name-splunk-connect-for-snmp-worker + serviceAccountName: release-name-splunk-connect-for-snmp-user securityContext: fsGroup: 10001 containers: @@ -30,12 +30,12 @@ spec: securityContext: capabilities: drop: - - ALL + - ALL readOnlyRootFilesystem: true - runAsGroup: 10001 runAsNonRoot: true runAsUser: 10001 - image: "ghcr.io/splunk/splunk-connect-for-snmp/container:1.8.5" + runAsGroup: 10001 + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:1.8.7-beta.9" imagePullPolicy: Always args: [ @@ -47,19 +47,23 @@ spec: - name: REDIS_URL value: redis://release-name-redis-headless:6379/1 - name: SC4SNMP_VERSION - value: 1.8.5 + value: 1.8.7-beta.9 - name: CELERY_BROKER_URL value: redis://release-name-redis-headless:6379/0 - name: MONGO_URI value: mongodb://release-name-mongodb:27017 - name: WALK_RETRY_MAX_INTERVAL - value: "600" + value: "60" - name: METRICS_INDEXING_ENABLED value: "false" + - name: POLL_BASE_PROFILES + value: "true" - name: LOG_LEVEL value: INFO - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: MAX_OID_TO_PROCESS + value: "70" - name: PROFILES_RELOAD_DELAY value: "60" - name: MIB_SOURCES diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/serviceaccount.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/serviceaccount.yaml deleted file mode 100644 index aa90eba11..000000000 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -# Source: splunk-connect-for-snmp/templates/worker/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: release-name-splunk-connect-for-snmp-worker - labels: - helm.sh/chart: splunk-connect-for-snmp-1.8.5 - app.kubernetes.io/name: splunk-connect-for-snmp-worker - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.8.5" - app.kubernetes.io/managed-by: Helm diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml index 79a7297d2..28b4be9f7 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: release-name-splunk-connect-for-snmp-worker-trap labels: - helm.sh/chart: splunk-connect-for-snmp-1.8.5 app.kubernetes.io/name: splunk-connect-for-snmp-worker-trap app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.8.5" + helm.sh/chart: splunk-connect-for-snmp-1.8.7-beta.9 + app.kubernetes.io/version: "1.8.7-beta.9" app.kubernetes.io/managed-by: Helm spec: replicas: 2 @@ -22,7 +22,7 @@ spec: app.kubernetes.io/name: splunk-connect-for-snmp-worker-trap app.kubernetes.io/instance: release-name spec: - serviceAccountName: release-name-splunk-connect-for-snmp-worker + serviceAccountName: release-name-splunk-connect-for-snmp-user securityContext: fsGroup: 10001 containers: @@ -30,12 +30,12 @@ spec: securityContext: capabilities: drop: - - ALL + - ALL readOnlyRootFilesystem: true - runAsGroup: 10001 runAsNonRoot: true runAsUser: 10001 - image: "ghcr.io/splunk/splunk-connect-for-snmp/container:1.8.5" + runAsGroup: 10001 + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:1.8.7-beta.9" imagePullPolicy: Always args: [ @@ -47,19 +47,23 @@ spec: - name: REDIS_URL value: redis://release-name-redis-headless:6379/1 - name: SC4SNMP_VERSION - value: 1.8.5 + value: 1.8.7-beta.9 - name: CELERY_BROKER_URL value: redis://release-name-redis-headless:6379/0 - name: MONGO_URI value: mongodb://release-name-mongodb:27017 - name: WALK_RETRY_MAX_INTERVAL - value: "600" + value: "60" - name: METRICS_INDEXING_ENABLED value: "false" + - name: POLL_BASE_PROFILES + value: "true" - name: LOG_LEVEL value: INFO - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: MAX_OID_TO_PROCESS + value: "70" - name: PROFILES_RELOAD_DELAY value: "60" - name: MIB_SOURCES diff --git a/rendered/values_autoscaling_enabled.yaml b/rendered/values_autoscaling_enabled.yaml new file mode 100644 index 000000000..20aba4f0b --- /dev/null +++ b/rendered/values_autoscaling_enabled.yaml @@ -0,0 +1,51 @@ +splunk: + enabled: true + protocol: https + host: 10.202.18.152 + token: 00000000-0000-0000-0000-000000000000 + insecureSSL: "true" + port: "8088" +traps: + autoscaling: + enabled: true + communities: + 2c: + - public + - homelab + #loadBalancerIP: The IP address in the metallb pool + loadBalancerIP: 10.202.6.213 +worker: + poller: + autoscaling: + enabled: true + trap: + autoscaling: + enabled: true + sender: + autoscaling: + enabled: true +scheduler: + profiles: | + IF_profile: + frequency: 600 + varBinds: + - [ "IF-MIB", "ifDescr" ] + - [ "IF-MIB", "ifAdminStatus" ] + - [ "IF-MIB", "ifName" ] + - [ 'IF-MIB','ifAlias' ] + - [ "IF-MIB", "ifInDiscards" ] + - [ "IF-MIB", "ifInErrors" ] + - [ "IF-MIB", "ifInNUcastPkts" ] + - [ "IF-MIB", "ifInOctets" ] + - [ "IF-MIB", "ifInUcastPkts" ] + - [ "IF-MIB", "ifInUnknownProtos" ] + - [ "IF-MIB", "ifOutDiscards" ] + - [ "IF-MIB", "ifOutErrors" ] + - [ "IF-MIB", "ifOutNUcastPkts" ] + - [ "IF-MIB", "ifOutOctets" ] + - [ "IF-MIB", "ifOutQLen" ] + - [ "IF-MIB", "ifOutUcastPkts" ] +poller: + inventory: | + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 54.82.41.24,,2c,public,,,1800,IF_profile,false, \ No newline at end of file diff --git a/rendered/values_autoscaling_enabled_deprecated.yaml b/rendered/values_autoscaling_enabled_deprecated.yaml new file mode 100644 index 000000000..c00c440e3 --- /dev/null +++ b/rendered/values_autoscaling_enabled_deprecated.yaml @@ -0,0 +1,52 @@ +splunk: + enabled: true + protocol: https + host: 10.202.18.152 + token: 00000000-0000-0000-0000-000000000000 + insecureSSL: "true" + port: "8088" +useDeprecatedAPI: true +traps: + autoscaling: + enabled: true + communities: + 2c: + - public + - homelab + #loadBalancerIP: The IP address in the metallb pool + loadBalancerIP: 10.202.6.213 +worker: + poller: + autoscaling: + enabled: true + trap: + autoscaling: + enabled: true + sender: + autoscaling: + enabled: true +scheduler: + profiles: | + IF_profile: + frequency: 600 + varBinds: + - [ "IF-MIB", "ifDescr" ] + - [ "IF-MIB", "ifAdminStatus" ] + - [ "IF-MIB", "ifName" ] + - [ 'IF-MIB','ifAlias' ] + - [ "IF-MIB", "ifInDiscards" ] + - [ "IF-MIB", "ifInErrors" ] + - [ "IF-MIB", "ifInNUcastPkts" ] + - [ "IF-MIB", "ifInOctets" ] + - [ "IF-MIB", "ifInUcastPkts" ] + - [ "IF-MIB", "ifInUnknownProtos" ] + - [ "IF-MIB", "ifOutDiscards" ] + - [ "IF-MIB", "ifOutErrors" ] + - [ "IF-MIB", "ifOutNUcastPkts" ] + - [ "IF-MIB", "ifOutOctets" ] + - [ "IF-MIB", "ifOutQLen" ] + - [ "IF-MIB", "ifOutUcastPkts" ] +poller: + inventory: | + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 54.82.41.24,,2c,public,,,1800,IF_profile,false, \ No newline at end of file diff --git a/rendered/values_only_polling.yaml b/rendered/values_only_polling.yaml new file mode 100644 index 000000000..007eb9fed --- /dev/null +++ b/rendered/values_only_polling.yaml @@ -0,0 +1,32 @@ +splunk: + enabled: true + protocol: https + host: 10.202.18.152 + token: 00000000-0000-0000-0000-000000000000 + insecureSSL: "true" + port: "8088" +scheduler: + profiles: | + IF_profile: + frequency: 600 + varBinds: + - [ "IF-MIB", "ifDescr" ] + - [ "IF-MIB", "ifAdminStatus" ] + - [ "IF-MIB", "ifName" ] + - [ 'IF-MIB','ifAlias' ] + - [ "IF-MIB", "ifInDiscards" ] + - [ "IF-MIB", "ifInErrors" ] + - [ "IF-MIB", "ifInNUcastPkts" ] + - [ "IF-MIB", "ifInOctets" ] + - [ "IF-MIB", "ifInUcastPkts" ] + - [ "IF-MIB", "ifInUnknownProtos" ] + - [ "IF-MIB", "ifOutDiscards" ] + - [ "IF-MIB", "ifOutErrors" ] + - [ "IF-MIB", "ifOutNUcastPkts" ] + - [ "IF-MIB", "ifOutOctets" ] + - [ "IF-MIB", "ifOutQLen" ] + - [ "IF-MIB", "ifOutUcastPkts" ] +poller: + inventory: | + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 54.91.99.113,,2c,public,,,1800,IF_profile,false, \ No newline at end of file diff --git a/rendered/values_only_traps.yaml b/rendered/values_only_traps.yaml new file mode 100644 index 000000000..e07b7299a --- /dev/null +++ b/rendered/values_only_traps.yaml @@ -0,0 +1,14 @@ +splunk: + enabled: true + protocol: https + host: 10.202.18.152 + token: 00000000-0000-0000-0000-000000000000 + insecureSSL: "true" + port: "8088" +traps: + communities: + 2c: + - public + - homelab + #loadBalancerIP: The IP address in the metallb pool + loadBalancerIP: 10.202.6.213 diff --git a/splunk_connect_for_snmp/__init__.py b/splunk_connect_for_snmp/__init__.py index 5b477db3b..1fbee5798 100644 --- a/splunk_connect_for_snmp/__init__.py +++ b/splunk_connect_for_snmp/__init__.py @@ -15,4 +15,4 @@ # -__version__ = "1.8.6" +__version__ = "1.9.0-beta.1" diff --git a/splunk_connect_for_snmp/common/custom_translations.py b/splunk_connect_for_snmp/common/custom_translations.py index b190fe137..d6b8c1c63 100644 --- a/splunk_connect_for_snmp/common/custom_translations.py +++ b/splunk_connect_for_snmp/common/custom_translations.py @@ -31,6 +31,8 @@ def load_custom_translations(): try: with open(CONFIG_PATH, encoding="utf-8") as file: config_runtime = yaml.safe_load(file) + if not config_runtime: + return None return config_runtime.get("customTranslations") except FileNotFoundError: diff --git a/splunk_connect_for_snmp/common/inventory_processor.py b/splunk_connect_for_snmp/common/inventory_processor.py index 755010777..e50c72aeb 100644 --- a/splunk_connect_for_snmp/common/inventory_processor.py +++ b/splunk_connect_for_snmp/common/inventory_processor.py @@ -139,8 +139,9 @@ def get_group_hosts(self, source_object, group_name): self.inventory_records.append(host_group_object) else: self.logger.warning( - f"Group {group_name} doesn't exist in the configuration. Skipping..." + f"Group {group_name} doesn't exist in the configuration. Treating {group_name} as a hostname" ) + self.single_hosts.append(source_object) class InventoryRecordManager: diff --git a/splunk_connect_for_snmp/enrich/tasks.py b/splunk_connect_for_snmp/enrich/tasks.py index 23fec8e15..fa228e9af 100644 --- a/splunk_connect_for_snmp/enrich/tasks.py +++ b/splunk_connect_for_snmp/enrich/tasks.py @@ -127,7 +127,11 @@ def enrich(self, result): if not current_attributes and group_data["fields"]: attributes_collection.update_one( - {"address": address, "group_key_hash": group_key_hash}, + { + "address": address, + "group_key_hash": group_key_hash, + "indexes": group_data.get("indexes", []), + }, {"$set": {"id": group_key, "fields": {}}}, upsert=True, ) diff --git a/splunk_connect_for_snmp/inventory/tasks.py b/splunk_connect_for_snmp/inventory/tasks.py index b513b6745..02422d203 100644 --- a/splunk_connect_for_snmp/inventory/tasks.py +++ b/splunk_connect_for_snmp/inventory/tasks.py @@ -48,6 +48,11 @@ MONGO_DB = os.getenv("MONGO_DB", "sc4snmp") CONFIG_PATH = os.getenv("CONFIG_PATH", "/app/config/config.yaml") PROFILES_RELOAD_DELAY = int(os.getenv("PROFILES_RELOAD_DELAY", "300")) +POLL_BASE_PROFILES = human_bool(os.getenv("POLL_BASE_PROFILES", "true")) + + +class BadlyFormattedFieldError(Exception): + pass class InventoryTask(Task): @@ -77,8 +82,30 @@ def inventory_setup_poller(self, work): {"address": address}, {"target": True, "state": True, "config": True}, ) - assigned_profiles = assign_profiles(ir, self.profiles, target) + assigned_profiles, computed_conditional_profiles = assign_profiles( + ir, self.profiles, target + ) + for profile in computed_conditional_profiles: + conditional_profile_name = list(profile.keys())[0] + mongo_profile_tag = f"{list(profile.keys())[0]}__{address.replace('.', '|')}" + profile_body = list(profile.values())[0] + try: + new_profile = generate_conditional_profile( + mongo_db, mongo_profile_tag, profile_body, address + ) + except Exception as e: + logger.warning( + f"Profile {conditional_profile_name} for {address} couldn't be processed: {e}" + ) + continue + mongo_db.profiles.replace_one( + {mongo_profile_tag: {"$exists": True}}, new_profile, upsert=True + ) + add_profile_to_assigned_list( + assigned_profiles, profile_body["frequency"], mongo_profile_tag + ) + logger.debug(f"Profiles Assigned for host {address}: {assigned_profiles}") active_schedules: list[str] = [] for period in assigned_profiles: task_config = generate_poll_task_definition( @@ -87,7 +114,6 @@ def inventory_setup_poller(self, work): periodic_obj.manage_task(**task_config) periodic_obj.delete_unused_poll_tasks(f"{address}", active_schedules) - # periodic_obj.delete_disabled_poll_tasks() def generate_poll_task_definition( @@ -106,9 +132,18 @@ def generate_poll_task_definition( return task_config +def add_profile_to_assigned_list( + assigned_profiles: dict[int, list[str]], frequency: int, profile_name: str +): + if frequency not in assigned_profiles: + assigned_profiles[frequency] = [] + assigned_profiles[frequency].append(profile_name) + + def assign_profiles(ir, profiles, target): assigned_profiles: dict[int, list[str]] = {} address = transform_address_to_key(ir.address, ir.port) + computed_profiles = [] if ir.smart_profiles: for profile_name, profile in profiles.items(): @@ -116,11 +151,11 @@ def assign_profiles(ir, profiles, target): continue # skip this profile it is static - if profile["condition"]["type"] == "base": + if profile["condition"]["type"] == "base" and POLL_BASE_PROFILES: logger.debug(f"Adding base profile {profile_name}") - if profile["frequency"] not in assigned_profiles: - assigned_profiles[profile["frequency"]] = [] - assigned_profiles[profile["frequency"]].append(profile_name) + add_profile_to_assigned_list( + assigned_profiles, profile["frequency"], profile_name + ) elif profile["condition"]["type"] == "field": logger.debug(f"profile is a field condition {profile_name}") @@ -137,10 +172,10 @@ def assign_profiles(ir, profiles, target): result = re.search(pattern, cs["value"]) if result: logger.debug(f"Adding smart profile {profile_name}") - if profile["frequency"] not in assigned_profiles: - assigned_profiles[profile["frequency"]] = [] - assigned_profiles[profile["frequency"]].append( - profile_name + add_profile_to_assigned_list( + assigned_profiles, + profile["frequency"], + profile_name, ) continue @@ -158,12 +193,15 @@ def assign_profiles(ir, profiles, target): logger.warning( f"profile {profile_name} is a smart profile, it does not need to be configured as a static one" ) + elif "conditions" in profile: + computed_profiles.append({profile_name: profile}) + continue if "frequency" not in profile: logger.warning(f"profile {profile_name} does not have frequency") continue - if profile["frequency"] not in assigned_profiles: - assigned_profiles[profile["frequency"]] = [] - assigned_profiles[profile["frequency"]].append(profile_name) + add_profile_to_assigned_list( + assigned_profiles, profile["frequency"], profile_name + ) else: logger.warning( f"profile {profile_name} was assigned for the host: {address}, no such profile in the config" @@ -175,12 +213,11 @@ def assign_profiles(ir, profiles, target): if profile.get("condition", {}).get("type") == "mandatory" ] for m_profile_name, m_profile_frequency in mandatory_profiles: - if m_profile_frequency not in assigned_profiles: - assigned_profiles[m_profile_frequency] = [] - assigned_profiles[m_profile_frequency].append(m_profile_name) + add_profile_to_assigned_list( + assigned_profiles, m_profile_frequency, m_profile_name + ) - logger.debug(f"Profiles Assigned for host {address}: {assigned_profiles}") - return assigned_profiles + return assigned_profiles, computed_profiles def is_smart_profile_valid(profile_name, profile): @@ -222,3 +259,94 @@ def is_smart_profile_valid(profile_name, profile): logger.warning(f"Patterns for profile {profile_name} must be a list") return False return True + + +def filter_condition_on_database(mongo_client, address: str, conditions: list): + attributes = mongo_client.attributes + query = create_query(conditions, address) + result = attributes.find( + query, {"address": 1, "group_key_hash": 1, "_id": 0, "indexes": 1} + ) + return list(result) + + +def create_profile(profile_name, frequency, varBinds, records): + # Connecting general fields from varBinds with filtered object indexes + # like ["IF-MIB", "ifDescr"] + [1] = ["IF-MIB", "ifDescr", 1] + varbind_list = [ + varbind + record["indexes"] + for record in records + for varbind in varBinds + if len(varbind) == 2 + ] + profile = {profile_name: {"frequency": frequency, "varBinds": varbind_list}} + return profile + + +def create_query(conditions: typing.List[dict], address: str) -> dict: + + conditional_profiles_mapping = { + "equals": "$eq", + "gt": "$gt", + "lt": "$lt", + "in": "$in", + } + + def _parse_mib_component(field: str) -> str: + mib_component = field.split("|") + if len(mib_component) < 2: + raise BadlyFormattedFieldError(f"Field {field} is badly formatted") + return mib_component[0] + + def _convert_to_float(value: typing.Any, ignore_error=False) -> typing.Any: + try: + return float(value) + except ValueError: + if ignore_error: + return value + else: + raise BadlyFormattedFieldError(f"Value '{value}' should be numeric") + + def _get_value_for_operation(operation: str, value: str) -> typing.Any: + if operation in ["lt", "gt"]: + return _convert_to_float(value) + elif operation == "in": + return [_convert_to_float(v, True) for v in value] + return value + + filters = [] + field = "" + for condition in conditions: + field = condition["field"] + # fields in databases are written in convention "IF-MIB|ifInOctets" + field = field.replace(".", "|") + value = condition["value"] + operation = condition["operation"].lower() + value_for_querying = _get_value_for_operation(operation, value) + mongo_operation = conditional_profiles_mapping.get(operation) + filters.append({f"fields.{field}.value": {mongo_operation: value_for_querying}}) + mib_component = _parse_mib_component(field) + return { + "$and": [ + {"address": address}, + {"group_key_hash": {"$regex": f"^{mib_component}"}}, + *filters, + ] + } + + +def generate_conditional_profile( + mongo_client, profile_name, conditional_profile_body, address +): + profile_conditions = conditional_profile_body.get("conditions") + profile_varbinds = conditional_profile_body.get("varBinds") + profile_frequency = conditional_profile_body.get("frequency") + if not profile_varbinds: + raise BadlyFormattedFieldError(f"No varBinds provided in the profile") + filtered_snmp_objects = filter_condition_on_database( + mongo_client, address, profile_conditions + ) + new_conditional_profile = create_profile( + profile_name, profile_frequency, profile_varbinds, filtered_snmp_objects + ) + return new_conditional_profile diff --git a/splunk_connect_for_snmp/profiles/base.yaml b/splunk_connect_for_snmp/profiles/base.yaml index 70e32c9e7..e433e53ed 100644 --- a/splunk_connect_for_snmp/profiles/base.yaml +++ b/splunk_connect_for_snmp/profiles/base.yaml @@ -13,13 +13,13 @@ BaseDeviceData: condition: type: "mandatory" varBinds: - # Syntax: [ "MIB-Files", "MIB ocelery[tblib]bject name" "MIB index number"] + # Syntax: [ "MIB-Files", "MIB object name" "MIB index number"] - ["SNMPv2-MIB", "sysDescr",0] - ["SNMPv2-MIB", "sysName",0] - ["SNMPv2-MIB", "sysObjectID",0] - ["SNMPv2-MIB", "sysContact",0] - ["SNMPv2-MIB", "sysLocation",0] -EnirchIF: +EnrichIF: frequency: 600 condition: type: "base" diff --git a/splunk_connect_for_snmp/snmp/manager.py b/splunk_connect_for_snmp/snmp/manager.py index c14897a03..20120fb28 100644 --- a/splunk_connect_for_snmp/snmp/manager.py +++ b/splunk_connect_for_snmp/snmp/manager.py @@ -21,6 +21,7 @@ from splunk_connect_for_snmp.common.collection_manager import ProfilesManager from splunk_connect_for_snmp.inventory.loader import transform_address_to_key +from splunk_connect_for_snmp.snmp.varbinds_resolver import ProfileCollection try: from dotenv import load_dotenv @@ -61,6 +62,7 @@ CONFIG_PATH = os.getenv("CONFIG_PATH", "/app/config/config.yaml") PROFILES_RELOAD_DELAY = int(os.getenv("PROFILES_RELOAD_DELAY", "60")) UDP_CONNECTION_TIMEOUT = int(os.getenv("UDP_CONNECTION_TIMEOUT", 3)) +MAX_OID_TO_PROCESS = int(os.getenv("MAX_OID_TO_PROCESS", 70)) DEFAULT_STANDARD_MIBS = [ "HOST-RESOURCES-MIB", @@ -216,20 +218,27 @@ def extract_index_number(index): return index_number -def extract_index_oid_part(varBind): +def extract_indexes(index): """ - Extracts index from OIDs of metrics. + Extracts indexes from OIDs of metrics. Not always MIB files are structurized the way one of the field is a meaningful index. - https://stackoverflow.com/questions/58886693/how-to-standardize-oid-index-retrieval-in-pysnmp - :param varBind: pysnmp object retrieved from a device - :return: str + :param index: pysnmp object retrieved from a device + :return: list """ - object_identity, _ = varBind - mib_node = object_identity.getMibNode() - object_instance_oid = object_identity.getOid() - object_oid = mib_node.getName() - index_part = object_instance_oid[len(object_oid) :] - return str(index_part) + indexes_to_return = [] + if not index: + return [0] + if isinstance(index, tuple): + for element in index: + if isinstance(element._value, bytes): + element_value = ".".join(str(byte) for byte in element._value) + indexes_to_return.append(element_value) + elif isinstance(element._value, tuple): + element_value = list(element) + indexes_to_return += element_value + else: + indexes_to_return.append(element._value) + return indexes_to_return class Poller(Task): @@ -252,6 +261,8 @@ def __init__(self, **kwargs): self.profiles_manager = ProfilesManager(self.mongo_client) self.profiles = self.profiles_manager.return_collection() + self.profiles_collection = ProfileCollection(self.profiles) + self.profiles_collection.process_profiles() self.last_modified = time.time() self.snmpEngine = SnmpEngine() self.already_loaded_mibs = set() @@ -288,6 +299,7 @@ def do_work( if time.time() - self.last_modified > PROFILES_RELOAD_DELAY or walk: self.profiles = self.profiles_manager.return_collection() + self.profiles_collection.update(self.profiles) self.last_modified = time.time() logger.debug("Profiles reloaded") @@ -308,7 +320,6 @@ def do_work( return False, {} if varbinds_bulk: - for (errorIndication, errorStatus, errorIndex, varBindTable,) in bulkCmd( self.snmpEngine, authData, @@ -338,18 +349,24 @@ def do_work( ) if varbinds_get: - for (errorIndication, errorStatus, errorIndex, varBindTable,) in getCmd( - self.snmpEngine, authData, transport, contextData, *varbinds_get + # some devices cannot process more OID than X, so it is necessary to divide it on chunks + for varbind_chunk in self.get_varbind_chunk( + varbinds_get, MAX_OID_TO_PROCESS ): - if not _any_failure_happened( - errorIndication, - errorStatus, - errorIndex, - varBindTable, - ir.address, - walk, + for (errorIndication, errorStatus, errorIndex, varBindTable,) in getCmd( + self.snmpEngine, authData, transport, contextData, *varbind_chunk ): - self.process_snmp_data(varBindTable, metrics, address, get_mapping) + if not _any_failure_happened( + errorIndication, + errorStatus, + errorIndex, + varBindTable, + ir.address, + walk, + ): + self.process_snmp_data( + varBindTable, metrics, address, get_mapping + ) for group_key, metric in metrics.items(): if "profiles" in metrics[group_key]: @@ -359,6 +376,10 @@ def do_work( return retry, metrics + def get_varbind_chunk(self, lst, n): + for i in range(0, len(lst), n): + yield lst[i : i + n] + def load_mibs(self, mibs: List[str]) -> None: logger.info(f"loading mib modules {mibs}") for mib in mibs: @@ -389,86 +410,30 @@ def get_var_binds(self, address, walk=False, profiles=[]): bulk_mapping = {} if walk and not profiles: varbinds_bulk.add(ObjectType(ObjectIdentity("1.3.6"))) - else: - needed_mibs = [] - if walk and profiles: - # as we have base profile configured, we need to make sure that those two MIB families are walked - required_bulk = {"IF-MIB": None, "SNMPv2-MIB": None} - else: - required_bulk = {} - - # First pass we only look at profiles for a full mib walk - for profile in profiles: - # In case scheduler processes doesn't yet updated profiles information - if profile not in self.profiles: - self.profiles = self.profiles_manager.return_collection() - self.last_modified = time.time() - # Its possible a profile is removed on upgrade but schedule doesn't yet know - if profile in self.profiles and "varBinds" in self.profiles[profile]: - profile_spec = self.profiles[profile] - profile_varbinds = profile_spec["varBinds"] - for vb in profile_varbinds: - if len(vb) == 1: - if vb[0] not in required_bulk: - required_bulk[vb[0]] = None - if not walk: - bulk_mapping[f"{vb[0]}"] = profile - if vb[0] not in needed_mibs: - needed_mibs.append(vb[0]) - else: - logger.warning( - f"There is either profile: {profile} missing from the configuration, or varBinds section not" - f"present inside the profile" - ) - - for profile in profiles: - # Its possible a profile is removed on upgrade but schedule doesn't yet know - if profile in self.profiles and "varBinds" in self.profiles[profile]: - profile_spec = self.profiles[profile] - profile_varbinds = profile_spec["varBinds"] - for vb in profile_varbinds: - if len(vb) == 2: - if vb[0] not in required_bulk or ( - required_bulk[vb[0]] - and vb[1] not in required_bulk[vb[0]] - ): - if vb[0] not in required_bulk: - required_bulk[vb[0]] = [vb[1]] - else: - required_bulk[vb[0]].append(vb[1]) - if not walk: - bulk_mapping[f"{vb[0]}:{vb[1]}"] = profile - - for mib, entries in required_bulk.items(): - if entries is None: - varbinds_bulk.add(ObjectType(ObjectIdentity(mib))) - else: - for entry in entries: - varbinds_bulk.add(ObjectType(ObjectIdentity(mib, entry))) - - for profile in profiles: - # Its possible a profile is removed on upgrade but schedule doesn't yet know - if profile in self.profiles and "varBinds" in self.profiles[profile]: - profile_spec = self.profiles[profile] - profile_varbinds = profile_spec["varBinds"] - for vb in profile_varbinds: - if len(vb) == 3: - if vb[0] not in required_bulk or ( - required_bulk[vb[0]] - and vb[1] not in required_bulk[vb[0]] - ): - varbinds_get.add( - ObjectType(ObjectIdentity(vb[0], vb[1], vb[2])) - ) - if not walk: - get_mapping[f"{vb[0]}:{vb[1]}:{vb[2]}"] = profile - self.load_mibs(needed_mibs) + return varbinds_get, get_mapping, varbinds_bulk, bulk_mapping + joined_profile_object = self.profiles_collection.get_polling_info_from_profiles( + profiles, walk + ) + if joined_profile_object: + mib_families = joined_profile_object.get_mib_families() + mib_files_to_load = [ + mib_family + for mib_family in mib_families + if mib_family not in self.already_loaded_mibs + ] + if mib_files_to_load: + self.load_mibs(mib_files_to_load) + ( + varbinds_get, + get_mapping, + varbinds_bulk, + bulk_mapping, + ) = joined_profile_object.return_mapping_and_varbinds() logger.debug(f"host={address} varbinds_get={varbinds_get}") logger.debug(f"host={address} get_mapping={get_mapping}") logger.debug(f"host={address} varbinds_bulk={varbinds_bulk}") logger.debug(f"host={address} bulk_mapping={bulk_mapping}") - return varbinds_get, get_mapping, varbinds_bulk, bulk_mapping def process_snmp_data(self, varBindTable, metrics, target, mapping={}): @@ -485,9 +450,11 @@ def process_snmp_data(self, varBindTable, metrics, target, mapping={}): if isMIBResolved(id): group_key = get_group_key(mib, oid, index) if group_key not in metrics: + indexes = extract_indexes(index) metrics[group_key] = { "metrics": {}, "fields": {}, + "indexes": indexes, } if mapping: metrics[group_key]["profiles"] = [] @@ -500,15 +467,16 @@ def process_snmp_data(self, varBindTable, metrics, target, mapping={}): metric_value = valueAsBest(snmp_val.prettyPrint()) index_number = extract_index_number(index) - oid_index_part = extract_index_oid_part(varBind) metric_value = fill_empty_value(index_number, metric_value, target) profile = None if mapping: profile = mapping.get( - f"{mib}:{metric}:{index_number}", - mapping.get(f"{mib}:{metric}", mapping.get(mib)), + id.replace('"', ""), + mapping.get(f"{mib}::{metric}", mapping.get(mib)), ) + if profile and "__" in profile: + profile = profile.split("__")[0] if metric_value == "No more variables left in this MIB View": continue @@ -517,7 +485,6 @@ def process_snmp_data(self, varBindTable, metrics, target, mapping={}): "time": time.time(), "type": metric_type, "value": metric_value, - "index": oid_index_part, "oid": oid, } if profile and profile not in metrics[group_key]["profiles"]: diff --git a/splunk_connect_for_snmp/snmp/varbinds_resolver.py b/splunk_connect_for_snmp/snmp/varbinds_resolver.py new file mode 100644 index 000000000..81693eb9b --- /dev/null +++ b/splunk_connect_for_snmp/snmp/varbinds_resolver.py @@ -0,0 +1,281 @@ +from functools import reduce +from typing import List + +from celery.utils.log import get_task_logger +from pysnmp.smi.rfc1902 import ObjectIdentity, ObjectType + +logger = get_task_logger(__name__) + + +class Varbind: + def __init__(self, varbind_list): + # In case object will be initialized by only on word - 1 element list + # like Varbind("IF-MIB") + if isinstance(varbind_list, str): + varbind_list = [varbind_list] + self.list = varbind_list + self.object_identity = ObjectType(ObjectIdentity(*varbind_list)) + + def mapping_key(self): + if len(self.list) == 1: + return self.list[0] + elif len(self.list) == 2: + return f"{self.list[0]}::{self.list[1]}" + else: + mib_prefix = f"{self.list[0]}::{self.list[1]}" + mib_index = ".".join(str(varbind) for varbind in self.list[2:]) + return f"{mib_prefix}.{mib_index}" + + def __repr__(self): + return f"{self.list}" + + +class VarBindContainer: + def __init__(self): + self.map = {} + + def insert_varbind(self, varbind): + """ + This function puts varbind in VarBindContainer. We shouldn't keep descriptive elements here when we have general + ones. For example, when we already have ["TCP-MIB"], there's no need to put ["TCP-MIB", "tcpHCOutSegs"], as it + is already polled in scope of ["TCP-MIB"]. + + :param varbind: + :return: + """ + mapping_key = varbind.mapping_key() + if mapping_key in self.map: + print(f"Element {mapping_key} already in the varbind container") + return + if len(varbind.list) > 1: + if varbind.list[0] in self.map: + print( + f"Element {mapping_key} not added as {varbind.list[0]} is already in the varbind container" + ) + return + if len(varbind.list) > 2: + varbind_tmp = Varbind(varbind.list[:2]) + mapping_key_for_two = varbind_tmp.mapping_key() + if mapping_key_for_two in self.map: + print( + f"Element {mapping_key} not added as {mapping_key_for_two} is already in the varbind container" + ) + return + self.map[mapping_key] = varbind + + def return_varbind_keys(self) -> List[str]: + """ + Returns all keys from the map. When the map is: + {'IF-MIB::ifOutOctets': ['IF-MIB', 'ifOutOctets'], + 'IF-MIB::ifInOctets': ['IF-MIB', 'ifInOctets'], + 'TCP-MIB::tcpOutRsts': ['TCP-MIB', 'tcpOutRsts']} + + It will return ['IF-MIB:ifOutOctets', 'IF-MIB:ifInOctets', 'TCP-MIB:tcpOutRsts'] + :return: + """ + return list(self.map.keys()) + + def return_varbind_values(self) -> List[Varbind]: + """ + Returns all values from the map. When the map is: + {'IF-MIB::ifOutOctets': ['IF-MIB', 'ifOutOctets'], + 'IF-MIB::ifInOctets': ['IF-MIB', 'ifInOctets'], + 'TCP-MIB::tcpOutRsts': ['TCP-MIB', 'tcpOutRsts']} + + It will return [['IF-MIB', 'ifOutOctets'], ['IF-MIB', 'ifInOctets'], ['TCP-MIB', 'tcpOutRsts']] + Remember, ['IF-MIB', 'ifOutOctets'] objects represent Varbind structures. + :return: + """ + return list(self.map.values()) + + def get_mib_families(self): + """ + Gathers all MIB families to load it from mibserver whenever they're missing. When the map is: + {'IF-MIB::ifOutOctets': ['IF-MIB', 'ifOutOctets'], + 'IF-MIB::ifInOctets': ['IF-MIB', 'ifInOctets'], + 'TCP-MIB::tcpOutRsts': ['TCP-MIB', 'tcpOutRsts']} + + It will return ['IF-MIB, 'TCP-MIB'] + :return: + """ + mib_families = [] + for varbind in self.map.values(): + mib_families.append(varbind.list[0]) + return mib_families + + def get_profile_mapping(self, profile_name): + """ + Prepares a ready structure for a further mapping from a resolved varbind to profile. When the map is: + {'IF-MIB:ifOutOctets': ['IF-MIB', 'ifOutOctets']} and the profile name is "profile" + + it will return {'IF-MIB:ifOutOctets': 'profile'} + :param profile_name: + :return: + """ + varbind_keys = self.return_varbind_keys() + dict_of_keys_and_profiles = {} + for varbind_key in varbind_keys: + dict_of_keys_and_profiles[varbind_key] = profile_name + return dict_of_keys_and_profiles + + def are_parents_in_map(self, varbind): + """ + Checks if something that we want to add to a structure is already in some other VarbindContainer. + :param varbind: + :return: + """ + varbind_root, varbind_field = varbind.split("::") + varbind_field = varbind_field.split(".")[0] + current_varbinds = self.return_varbind_keys() + return ( + varbind_root in current_varbinds + or f"{varbind_root}::{varbind_field}" in current_varbinds + ) + + def __repr__(self): + return f"{self.map}" + + def __add__(self, other): + joined_maps = {} + new_instance = VarBindContainer() + joined_maps.update(self.map) + joined_maps.update(other.map) + key_list = sorted(list(joined_maps.keys()), key=len) + for varbind_key in key_list: + new_instance.insert_varbind(joined_maps.get(varbind_key)) + return new_instance + + def return_varbinds(self): + varbinds = [] + for varbind in self.map: + varbinds += self.map[varbind] + return varbinds + + +class Profile: + def __init__(self, name, profile_dict): + self.name = name + self.type = profile_dict.get("condition", {}).get("type") + self.varbinds = profile_dict.get("varBinds", None) + self.varbinds_bulk = VarBindContainer() + self.varbinds_get = VarBindContainer() + self.varbinds_bulk_mapping = {} + self.varbinds_get_mapping = {} + + def process(self): + if self.type == "walk": + varbind_obj = Varbind(["SNMPv2-MIB"]) + self.varbinds_bulk.insert_varbind(varbind_obj) + self.divide_on_bulk_and_get() + if self.type != "walk": + self.varbinds_bulk_mapping = self.varbinds_bulk.get_profile_mapping( + self.name + ) + self.varbinds_get_mapping = self.varbinds_get.get_profile_mapping(self.name) + + def divide_on_bulk_and_get(self): + for varbind in sorted(self.varbinds, key=len): + varbind_obj = Varbind(varbind) + if len(varbind) < 3: + self.varbinds_bulk.insert_varbind(varbind_obj) + else: + if not self.varbinds_bulk.are_parents_in_map(varbind_obj.mapping_key()): + self.varbinds_get.insert_varbind(varbind_obj) + + def get_varbinds(self): + return self.varbinds_bulk, self.varbinds_get + + def get_mib_families(self): + return set( + self.varbinds_bulk.get_mib_families() + self.varbinds_get.get_mib_families() + ) + + def return_mapping_and_varbinds(self): + varbinds_get = [ + value.object_identity for value in self.varbinds_get.return_varbind_values() + ] + varbinds_bulk = [ + value.object_identity + for value in self.varbinds_bulk.return_varbind_values() + ] + return ( + varbinds_get, + self.varbinds_get_mapping, + varbinds_bulk, + self.varbinds_bulk_mapping, + ) + + def __add__(self, other): + new_instance = Profile(f"{self.name}:{other.name}", {}) + new_instance.varbinds_bulk = self.varbinds_bulk + other.varbinds_bulk + new_instance.varbinds_get = self.varbinds_get + other.varbinds_get + new_instance.varbinds_bulk_mapping = dict( + self.varbinds_bulk_mapping, **other.varbinds_bulk_mapping + ) + new_instance.varbinds_get_mapping = dict( + self.varbinds_get_mapping, **other.varbinds_get_mapping + ) + return new_instance + + def __repr__(self): + return f"Profile: {self.name}, varbinds_get: {self.varbinds_get}, varbinds_bulk: {self.varbinds_bulk}" + + +class ProfileCollection: + def __init__(self, list_of_profiles): + self.list_of_profiles_raw = list_of_profiles + self.list_of_profiles = {} + + def process_profiles(self): + for profile_name, profile_body in self.list_of_profiles_raw.items(): + current_profile = Profile(profile_name, profile_body) + current_profile.process() + self.list_of_profiles[profile_name] = current_profile + + def get_polling_info_from_profiles(self, profiles_names, walk=False) -> Profile: + profiles = [self.get_profile(name) for name in profiles_names] + if len(profiles) == 1 or walk: + return profiles[0] + return reduce(self.combine_profiles, profiles) + + def combine_profiles(self, first_profile, second_profile): + if isinstance(first_profile, Profile) and isinstance(second_profile, Profile): + return first_profile + second_profile + elif isinstance(first_profile, Profile): + return first_profile + elif isinstance(second_profile, Profile): + return second_profile + + def update(self, list_of_profiles): + if self.list_of_profiles_raw == list_of_profiles: + logger.info("No change in profiles") + else: + self.list_of_profiles_raw = list_of_profiles + self.process_profiles() + + def get_profile(self, profile_name): + is_profile_conditional = bool("__" in profile_name) + if profile_name in self.list_of_profiles: + profile = self.list_of_profiles.get(profile_name) + if ( + not profile.varbinds + and profile.type != "walk" + and not is_profile_conditional + ): + logger.warning( + f"VarBinds section not present inside the profile {profile_name}" + ) + return {} + return self.list_of_profiles.get(profile_name) + else: + if is_profile_conditional: + conditional_profile_name = profile_name.split("__")[0] + logger.warning( + f"Conditional profile {conditional_profile_name} initialization in progress..." + ) + else: + logger.warning( + f"There is either profile: {profile_name} missing from the configuration, or varBinds section not " + f"present inside the profile" + ) + return {} diff --git a/splunk_connect_for_snmp/splunk/tasks.py b/splunk_connect_for_snmp/splunk/tasks.py index 0311c7224..a71f247b3 100644 --- a/splunk_connect_for_snmp/splunk/tasks.py +++ b/splunk_connect_for_snmp/splunk/tasks.py @@ -197,8 +197,9 @@ def prepare(self, work): metric["fields"][f"metric_name:sc4snmp.{field}"] = valueAsBest( values["value"] ) - if METRICS_INDEXING_ENABLED and "index" in values: - metric["fields"]["mibIndex"] = values["index"] + if METRICS_INDEXING_ENABLED and "indexes" in data: + indexes_to_string = [str(index) for index in data["indexes"]] + metric["fields"]["mibIndex"] = ".".join(indexes_to_string) metrics.append(json.dumps(metric, indent=None)) else: event = { diff --git a/test/common/base_profiles/base.yaml b/test/common/base_profiles/base.yaml index 64bb4b194..28981b34a 100644 --- a/test/common/base_profiles/base.yaml +++ b/test/common/base_profiles/base.yaml @@ -6,7 +6,7 @@ BaseUpTime: - [ "IF-MIB", "ifName" ] - [ "IF-MIB", "ifAlias" ] - ["SNMPv2-MIB", "sysUpTime", 0] -EnirchIF: +EnrichIF: frequency: 600 condition: type: "base" diff --git a/test/common/base_profiles/runtime_config_enabled.yaml b/test/common/base_profiles/runtime_config_enabled.yaml index 112d2964f..80861870e 100644 --- a/test/common/base_profiles/runtime_config_enabled.yaml +++ b/test/common/base_profiles/runtime_config_enabled.yaml @@ -1,7 +1,7 @@ profiles: BaseUpTime: enabled: false - EnirchIF: + EnrichIF: frequency: 200 condition: type: "base" diff --git a/test/common/test_custom_translations.py b/test/common/test_custom_translations.py index 765fe0fd3..5cc55d501 100644 --- a/test/common/test_custom_translations.py +++ b/test/common/test_custom_translations.py @@ -8,9 +8,11 @@ ifInDiscards: myCustomName1 ifOutErrors: myCustomName2""" -mock_config_empty = """profiles: +mock_config_only_profiles = """profiles: profile1:""" +mock_config_empty = "" + class TestCustomTranslations(TestCase): @patch("builtins.open", new_callable=mock_open, read_data=mock_config) @@ -26,6 +28,11 @@ def test_load_custom_translations(self, m_open): result, ) + @patch("builtins.open", new_callable=mock_open, read_data=mock_config_only_profiles) + def test_load_custom_translations_only_profiles(self, m_open): + result = load_custom_translations() + self.assertIsNone(result) + @patch("builtins.open", new_callable=mock_open, read_data=mock_config_empty) def test_load_custom_translations_empty(self, m_open): result = load_custom_translations() diff --git a/test/common/test_inventory_processor.py b/test/common/test_inventory_processor.py index f9f8d2112..9b499e3ab 100644 --- a/test/common/test_inventory_processor.py +++ b/test/common/test_inventory_processor.py @@ -189,11 +189,11 @@ def test_get_group_hosts(self): inventory_processor.inventory_records, group_object_returned ) - def test_get_group_hosts_no_group_found(self): + def test_get_group_hosts_hostname(self): group_manager = Mock() logger = Mock() group_object = { - "address": "group1", + "address": "ec2-54-91-99-115.compute-1.amazonaws.com", "port": "", "version": "2c", "community": "public", @@ -206,10 +206,14 @@ def test_get_group_hosts_no_group_found(self): } inventory_processor = InventoryProcessor(group_manager, logger) group_manager.return_element.return_value = [] - inventory_processor.get_group_hosts(group_object, "group1") + inventory_processor.get_group_hosts( + group_object, "ec2-54-91-99-115.compute-1.amazonaws.com" + ) logger.warning.assert_called_with( - "Group group1 doesn't exist in the configuration. Skipping..." + "Group ec2-54-91-99-115.compute-1.amazonaws.com doesn't exist in the configuration. Treating ec2-54-91-99-115.compute-1.amazonaws.com as a hostname" ) + self.assertEqual(inventory_processor.single_hosts, [group_object]) + self.assertEqual(inventory_processor.inventory_records, []) def test_process_line_comment(self): logger = Mock() diff --git a/test/common/test_profiles.py b/test/common/test_profiles.py index 9d2e3ed67..bac781027 100644 --- a/test/common/test_profiles.py +++ b/test/common/test_profiles.py @@ -96,7 +96,7 @@ def test_read_base_profiles(self): ["SNMPv2-MIB", "sysUpTime", 0], ], }, - "EnirchIF": { + "EnrichIF": { "frequency": 600, "condition": {"type": "base"}, "varBinds": [ @@ -162,7 +162,7 @@ def test_all_profiles(self): ["SNMPv2-MIB", "sysUpTime", 0], ], }, - "EnirchIF": { + "EnrichIF": { "frequency": 600, "condition": {"type": "base"}, "varBinds": [ @@ -206,7 +206,7 @@ def test_all_profiles(self): ) def test_disabled_profiles(self): active_profiles = { - "EnirchIF": { + "EnrichIF": { "frequency": 200, "condition": {"type": "base"}, "varBinds": [ diff --git a/test/inventory/test_assign_profiles.py b/test/inventory/test_assign_profiles.py index 9f451759d..4b08347a8 100644 --- a/test/inventory/test_assign_profiles.py +++ b/test/inventory/test_assign_profiles.py @@ -1,3 +1,4 @@ +import os from unittest import TestCase, mock from splunk_connect_for_snmp.common.inventory_record import InventoryRecord @@ -57,7 +58,7 @@ def test_assignment_of_static_profiles(self, return_all_profiles): } ) - result = assign_profiles(ir, profiles, {}) + result, _ = assign_profiles(ir, profiles, {}) self.assertEqual({20: ["profile1"], 30: ["profile2"]}, result) def test_assignment_of_base_profiles(self, return_all_profiles): @@ -68,9 +69,21 @@ def test_assignment_of_base_profiles(self, return_all_profiles): "profile2": {"frequency": 30, "condition": {"type": "base"}}, } - result = assign_profiles(ir_smart, profiles, {}) + result, _ = assign_profiles(ir_smart, profiles, {}) self.assertEqual({60: ["BaseUpTime"], 30: ["profile2"]}, result) + @mock.patch("splunk_connect_for_snmp.inventory.tasks.POLL_BASE_PROFILES", False) + def test_assignment_of_base_profiles_polling_disabled(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import assign_profiles + + profiles = { + "BaseUpTime": {"frequency": 60, "condition": {"type": "base"}}, + "profile2": {"frequency": 30, "condition": {"type": "base"}}, + } + + result, _ = assign_profiles(ir_smart, profiles, {}) + self.assertEqual({}, result) + def test_assignment_of_field_profiles(self, return_all_profiles): from splunk_connect_for_snmp.inventory.tasks import assign_profiles @@ -109,13 +122,13 @@ def test_assignment_of_field_profiles(self, return_all_profiles): } } - result = assign_profiles(ir_smart, profiles, target) + result, _ = assign_profiles(ir_smart, profiles, target) self.assertEqual({60: ["BaseUpTime", "MyProfile", "OtherProfile"]}, result) def test_assignment_of_field_profiles_missing_state(self, return_all_profiles): from splunk_connect_for_snmp.inventory.tasks import assign_profiles - result = assign_profiles(ir_smart, simple_profiles, {}) + result, _ = assign_profiles(ir_smart, simple_profiles, {}) self.assertEqual({}, result) def test_assignment_of_field_profiles_db_missing_field_value( @@ -125,7 +138,7 @@ def test_assignment_of_field_profiles_db_missing_field_value( target = {"state": {"SNMPv2-MIB|sysDescr": {}}} - result = assign_profiles(ir_smart, simple_profiles, target) + result, _ = assign_profiles(ir_smart, simple_profiles, target) self.assertEqual({}, result) def test_assignment_of_field_not_matching_regex(self, return_all_profiles): @@ -133,7 +146,7 @@ def test_assignment_of_field_not_matching_regex(self, return_all_profiles): target = {"state": {"SNMPv2-MIB|sysDescr": {"value": "WRONG"}}} - result = assign_profiles(ir_smart, simple_profiles, target) + result, _ = assign_profiles(ir_smart, simple_profiles, target) self.assertEqual({}, result) def test_assignment_of_static_and_smart_profiles(self, return_all_profiles): @@ -161,11 +174,72 @@ def test_assignment_of_static_and_smart_profiles(self, return_all_profiles): } ) - result = assign_profiles(ir, profiles, {}) + result, _ = assign_profiles(ir, profiles, {}) self.assertEqual( {60: ["BaseUpTime"], 30: ["profile5", "profile2"], 20: ["profile1"]}, result ) + def test_assignment_of_static_and_smart_profiles_and_conditional_profiles( + self, return_all_profiles + ): + from splunk_connect_for_snmp.inventory.tasks import assign_profiles + + profiles = { + "profile1": {"frequency": 20}, + "profile2": {"frequency": 30}, + "BaseUpTime": {"frequency": 60, "condition": {"type": "base"}}, + "profile5": {"frequency": 30, "condition": {"type": "base"}}, + "conditional_profile": { + "frequency": 120, + "conditions": [ + { + "operation": "equals", + "field": "IF-MIB.ifAdminStatus", + "value": "up", + } + ], + "varBinds": [["IF-MIB", "IfDescr"]], + }, + } + + ir = InventoryRecord( + **{ + "address": "192.168.0.1", + "port": "34", + "version": "2c", + "community": "public", + "secret": "secret", + "securityEngine": "ENGINE", + "walk_interval": 1850, + "profiles": "profile1;profile2;conditional_profile", + "SmartProfiles": True, + "delete": False, + } + ) + + result, computed_result = assign_profiles(ir, profiles, {}) + self.assertEqual( + {60: ["BaseUpTime"], 30: ["profile5", "profile2"], 20: ["profile1"]}, result + ) + self.assertEqual( + [ + { + "conditional_profile": { + "frequency": 120, + "conditions": [ + { + "operation": "equals", + "field": "IF-MIB.ifAdminStatus", + "value": "up", + } + ], + "varBinds": [["IF-MIB", "IfDescr"]], + } + } + ], + computed_result, + ) + def test_assignment_of_walk_profile_as_a_static_profile(self, return_all_profiles): from splunk_connect_for_snmp.inventory.tasks import assign_profiles @@ -191,7 +265,7 @@ def test_assignment_of_walk_profile_as_a_static_profile(self, return_all_profile } ) - result = assign_profiles(ir, profiles, {}) + result, _ = assign_profiles(ir, profiles, {}) self.assertEqual({30: ["profile5", "profile2"], 20: ["profile1"]}, result) def test_assignment_of_walk_profile_as_a_static_profile_without_frequency( @@ -221,7 +295,7 @@ def test_assignment_of_walk_profile_as_a_static_profile_without_frequency( } ) - result = assign_profiles(ir, profiles, {}) + result, _ = assign_profiles(ir, profiles, {}) self.assertEqual({30: ["profile5", "profile2"], 20: ["profile1"]}, result) def test_smart_profiles_as_static_ones(self, return_all_profiles): @@ -247,7 +321,7 @@ def test_smart_profiles_as_static_ones(self, return_all_profiles): } ) - result = assign_profiles(ir, profiles, {}) + result, _ = assign_profiles(ir, profiles, {}) self.assertEqual({30: ["profile5"], 20: ["profile1"]}, result) def test_smart_profiles_disabled_mandatory_profile(self, return_all_profiles): @@ -274,7 +348,7 @@ def test_smart_profiles_disabled_mandatory_profile(self, return_all_profiles): } ) - result = assign_profiles(ir, profiles, {}) + result, _ = assign_profiles(ir, profiles, {}) self.assertEqual( {30: ["profile5", "profile_mandatory"], 20: ["profile1"]}, result ) @@ -305,7 +379,7 @@ def test_smart_profiles_disabled_mandatory_profile_without_static_base_profile( } ) - result = assign_profiles(ir, profiles, {}) + result, _ = assign_profiles(ir, profiles, {}) self.assertEqual({30: ["profile_mandatory"], 20: ["profile1"]}, result) def test_assign_profiles_no_profiles(self, return_all_profiles): @@ -328,5 +402,29 @@ def test_assign_profiles_no_profiles(self, return_all_profiles): } ) - result = assign_profiles(ir, profiles, {}) + result, _ = assign_profiles(ir, profiles, {}) self.assertEqual({}, result) + + def test_add_profile_to_assigned_list(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import add_profile_to_assigned_list + + assigned_list = {} + add_profile_to_assigned_list(assigned_list, 30, "profile1") + add_profile_to_assigned_list(assigned_list, 30, "profile2") + add_profile_to_assigned_list(assigned_list, 10, "profile3") + self.assertEqual( + {30: ["profile1", "profile2"], 10: ["profile3"]}, assigned_list + ) + + def test_add_profile_to_assigned_list_with_something_already_inside( + self, return_all_profiles + ): + from splunk_connect_for_snmp.inventory.tasks import add_profile_to_assigned_list + + assigned_list = {30: ["profile5"]} + add_profile_to_assigned_list(assigned_list, 30, "profile1") + add_profile_to_assigned_list(assigned_list, 30, "profile2") + add_profile_to_assigned_list(assigned_list, 10, "profile3") + self.assertEqual( + {30: ["profile5", "profile1", "profile2"], 10: ["profile3"]}, assigned_list + ) diff --git a/test/inventory/test_conditional_profiles.py b/test/inventory/test_conditional_profiles.py new file mode 100644 index 000000000..b62e49a3b --- /dev/null +++ b/test/inventory/test_conditional_profiles.py @@ -0,0 +1,236 @@ +import unittest +from unittest import mock +from unittest.mock import MagicMock + + +@mock.patch( + "splunk_connect_for_snmp.common.collection_manager.ProfilesManager.return_collection" +) +class TestCreateQuery(unittest.TestCase): + def test_single_equals_condition(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import create_query + + conditions = [ + {"field": "MIB-FAMILY.field1", "value": "value1", "operation": "equals"} + ] + address = "127.0.0.1" + expected_query = { + "$and": [ + {"address": address}, + {"group_key_hash": {"$regex": "^MIB-FAMILY"}}, + {"fields.MIB-FAMILY|field1.value": {"$eq": "value1"}}, + ] + } + self.assertEqual(create_query(conditions, address), expected_query) + + def test_single_lt_condition(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import create_query + + conditions = [{"field": "MIB-FAMILY.field2", "value": "10", "operation": "lt"}] + address = "127.0.0.1" + expected_query = { + "$and": [ + {"address": address}, + {"group_key_hash": {"$regex": "^MIB-FAMILY"}}, + {"fields.MIB-FAMILY|field2.value": {"$lt": 10.0}}, + ] + } + self.assertEqual(create_query(conditions, address), expected_query) + + def test_single_gt_condition(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import create_query + + conditions = [{"field": "MIB-FAMILY.field3", "value": "20", "operation": "gt"}] + address = "127.0.0.1" + expected_query = { + "$and": [ + {"address": address}, + {"group_key_hash": {"$regex": "^MIB-FAMILY"}}, + {"fields.MIB-FAMILY|field3.value": {"$gt": 20.0}}, + ] + } + self.assertEqual(create_query(conditions, address), expected_query) + + def test_single_in_condition(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import create_query + + conditions = [ + {"field": "MIB-FAMILY.field4", "value": [1, 2, 3], "operation": "in"} + ] + address = "127.0.0.1" + expected_query = { + "$and": [ + {"address": address}, + {"group_key_hash": {"$regex": "^MIB-FAMILY"}}, + {"fields.MIB-FAMILY|field4.value": {"$in": [1.0, 2.0, 3.0]}}, + ] + } + self.assertEqual(create_query(conditions, address), expected_query) + + def test_multiple_conditions(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import create_query + + conditions = [ + {"field": "MIB-FAMILY.field1", "value": "value1", "operation": "equals"}, + {"field": "MIB-FAMILY.field2", "value": "10", "operation": "lt"}, + {"field": "MIB-FAMILY.field3", "value": "20", "operation": "gt"}, + {"field": "MIB-FAMILY.field4", "value": [1, 2, 0], "operation": "in"}, + ] + address = "127.0.0.1" + expected_query = { + "$and": [ + {"address": address}, + {"group_key_hash": {"$regex": "^MIB-FAMILY"}}, + {"fields.MIB-FAMILY|field1.value": {"$eq": "value1"}}, + {"fields.MIB-FAMILY|field2.value": {"$lt": 10.0}}, + {"fields.MIB-FAMILY|field3.value": {"$gt": 20.0}}, + {"fields.MIB-FAMILY|field4.value": {"$in": [1.0, 2.0, 0.0]}}, + ] + } + self.assertDictEqual(create_query(conditions, address), expected_query) + + def test_in_conditions_with_many_types(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import create_query + + conditions = [ + {"field": "MIB-FAMILY.field4", "value": [1, "2", "up"], "operation": "in"}, + ] + address = "127.0.0.1" + expected_query = { + "$and": [ + {"address": address}, + {"group_key_hash": {"$regex": "^MIB-FAMILY"}}, + {"fields.MIB-FAMILY|field4.value": {"$in": [1.0, 2.0, "up"]}}, + ] + } + self.assertDictEqual(create_query(conditions, address), expected_query) + + def test_badly_formatted_lt_gt_condition(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import ( + BadlyFormattedFieldError, + create_query, + ) + + conditions = [ + {"field": "MIB-FAMILY.field4", "value": "up", "operation": "gt"}, + ] + address = "127.0.0.1" + with self.assertRaises(BadlyFormattedFieldError) as context: + create_query(conditions, address) + self.assertEqual("Value 'up' should be numeric", context.exception.args[0]) + + def test_badly_formatted_field(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import ( + BadlyFormattedFieldError, + create_query, + ) + + conditions = [ + {"field": "MIB-FAMILYfield4", "value": 5, "operation": "gt"}, + ] + address = "127.0.0.1" + with self.assertRaises(BadlyFormattedFieldError) as context: + create_query(conditions, address) + self.assertEqual( + "Field MIB-FAMILYfield4 is badly formatted", context.exception.args[0] + ) + + @unittest.mock.patch( + "splunk_connect_for_snmp.inventory.tasks.filter_condition_on_database" + ) + def test_generate_conditional_profile_with_varbinds( + self, filter_func, return_all_profiles + ): + from splunk_connect_for_snmp.inventory.tasks import generate_conditional_profile + + mongo_client = MagicMock() + filter_func.return_value = [ + { + "address": "54.91.99.113", + "group_key_hash": "IF-MIB::int=4", + "indexes": [4], + } + ] + profile_name = "test_profile" + conditional_profile_body = { + "frequency": 10, + "conditions": [ + {"field": "MIB-FAMILY.field4", "value": "up", "operation": "equals"}, + ], + "varBinds": [["MIB-FAMILY", "field"]], + } + address = "test_address" + expected = { + "test_profile": {"frequency": 10, "varBinds": [["MIB-FAMILY", "field", 4]]} + } + + result = generate_conditional_profile( + mongo_client, profile_name, conditional_profile_body, address + ) + + self.assertEqual(result, expected) + + def test_create_profile(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import create_profile + + initial_varbinds = [["IF-MIB", "ifDescr"], ["IF-MIB", "ifAlias"]] + filtered_records = [ + { + "address": "54.91.99.113", + "group_key_hash": "IF-MIB::int=4", + "indexes": [4], + }, + { + "address": "54.91.99.113", + "group_key_hash": "IF-MIB::int=5", + "indexes": [5], + }, + ] + frequency = 60 + result = create_profile( + "profile_name", frequency, initial_varbinds, filtered_records + ) + self.assertEqual( + { + "profile_name": { + "frequency": 60, + "varBinds": [ + ["IF-MIB", "ifDescr", 4], + ["IF-MIB", "ifAlias", 4], + ["IF-MIB", "ifDescr", 5], + ["IF-MIB", "ifAlias", 5], + ], + } + }, + result, + ) + + def test_create_profile_not_enough_varbinds(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import create_profile + + initial_varbinds = [["IF-MIB"]] + filtered_records = [ + { + "address": "54.91.99.113", + "group_key_hash": "IF-MIB::int=4", + "indexes": [4], + }, + { + "address": "54.91.99.113", + "group_key_hash": "IF-MIB::int=5", + "indexes": [5], + }, + ] + frequency = 60 + result = create_profile( + "profile_name", frequency, initial_varbinds, filtered_records + ) + self.assertEqual( + { + "profile_name": { + "frequency": 60, + "varBinds": [], + } + }, + result, + ) diff --git a/test/inventory/test_inventory_setup_poller.py b/test/inventory/test_inventory_setup_poller.py index 4d688daf4..0e3506781 100644 --- a/test/inventory/test_inventory_setup_poller.py +++ b/test/inventory/test_inventory_setup_poller.py @@ -56,7 +56,7 @@ def test_inventory_setup_poller( 60: ["BaseUpTime"], 30: ["profile5", "profile2"], 20: ["profile1"], - } + }, [] # when inventory_setup_poller(work) @@ -103,6 +103,135 @@ def test_inventory_setup_poller( ], ) + @patch( + "splunk_connect_for_snmp.common.collection_manager.ProfilesManager.return_collection" + ) + @patch("splunk_connect_for_snmp.customtaskmanager.CustomPeriodicTaskManager") + @mock.patch("pymongo.collection.Collection.find_one") + @mock.patch("pymongo.collection.Collection.replace_one") + @mock.patch("splunk_connect_for_snmp.inventory.tasks.assign_profiles") + @mock.patch("splunk_connect_for_snmp.inventory.tasks.get_inventory") + @mock.patch("splunk_connect_for_snmp.inventory.tasks.generate_conditional_profile") + def test_inventory_setup_poller_conditional_profile( + self, + generate_conditional_profile, + m_get_inventory, + m_assign_profiles, + m_replace_one, + m_find_one, + m_task_manager, + m_load_profiles, + ): + from splunk_connect_for_snmp.inventory.tasks import inventory_setup_poller + + periodic_obj_mock = Mock() + m_load_profiles.return_value = [] + m_task_manager.return_value = periodic_obj_mock + m_get_inventory.return_value = InventoryRecord( + **{ + "address": "192.168.0.1", + "port": "34", + "version": "2c", + "community": "public", + "secret": "secret", + "securityEngine": "ENGINE", + "walk_interval": 1850, + "profiles": "conditional_profile", + "SmartProfiles": True, + "delete": False, + } + ) + generate_conditional_profile.return_value = { + "conditional_profile:192.168.0.1": { + "frequency": 120, + "varBinds": [["IF-MIB", "IfDescr", 1]], + } + } + m_find_one.return_value = { + "state": { + "SNMPv2-MIB|sysDescr": {"value": "MIKROTIK"}, + "SNMPv2-MIB|sysName": {"value": "Linux Debian 2.0.1"}, + "SNMPv2-MIB|sysContact": {"value": "non-existing-name@splunk"}, + } + } + + work = {"address": "192.168.0.1"} + + m_assign_profiles.return_value = { + 60: ["BaseUpTime"], + 30: ["profile5", "profile2"], + 20: ["profile1"], + }, [ + { + "conditional_profile": { + "frequency": 120, + "conditions": [ + { + "operation": "equals", + "field": "IF-MIB.ifAdminStatus", + "value": "up", + } + ], + "varBinds": [["IF-MIB", "IfDescr"]], + } + } + ] + + # when + inventory_setup_poller(work) + + calls = periodic_obj_mock.manage_task.call_args_list + + calls[0][1]["kwargs"]["profiles"] = set(calls[0][1]["kwargs"]["profiles"]) + calls[1][1]["kwargs"]["profiles"] = set(calls[1][1]["kwargs"]["profiles"]) + calls[2][1]["kwargs"]["profiles"] = set(calls[2][1]["kwargs"]["profiles"]) + calls[3][1]["kwargs"]["profiles"] = set(calls[3][1]["kwargs"]["profiles"]) + self.assertEqual( + { + "address": "192.168.0.1", + "profiles": {"BaseUpTime"}, + "priority": 2, + "frequency": 60, + }, + calls[0][1]["kwargs"], + ) + self.assertEqual( + { + "address": "192.168.0.1", + "priority": 2, + "profiles": {"profile2", "profile5"}, + "frequency": 30, + }, + calls[1][1]["kwargs"], + ) + self.assertEqual( + { + "address": "192.168.0.1", + "profiles": {"profile1"}, + "priority": 2, + "frequency": 20, + }, + calls[2][1]["kwargs"], + ) + self.assertEqual( + { + "address": "192.168.0.1", + "profiles": {"conditional_profile__192|168|0|1"}, + "priority": 2, + "frequency": 120, + }, + calls[3][1]["kwargs"], + ) + periodic_obj_mock.delete_unused_poll_tasks.assert_called_with( + "192.168.0.1", + [ + "sc4snmp;192.168.0.1;60;poll", + "sc4snmp;192.168.0.1;30;poll", + "sc4snmp;192.168.0.1;20;poll", + "sc4snmp;192.168.0.1;120;poll", + ], + ) + @patch( "splunk_connect_for_snmp.common.collection_manager.ProfilesManager.return_collection" ) diff --git a/test/snmp/test_do_work.py b/test/snmp/test_do_work.py index 0e4b96e8f..c7dfe5e44 100644 --- a/test/snmp/test_do_work.py +++ b/test/snmp/test_do_work.py @@ -4,6 +4,7 @@ from splunk_connect_for_snmp.common.inventory_record import InventoryRecord from splunk_connect_for_snmp.snmp.exceptions import SnmpActionError from splunk_connect_for_snmp.snmp.manager import Poller +from splunk_connect_for_snmp.snmp.varbinds_resolver import ProfileCollection inventory_record = InventoryRecord( **{ @@ -34,7 +35,9 @@ def test_do_work_no_work_to_do(self): poller.last_modified = 1609675634 poller.snmpEngine = None poller.profiles_manager = MagicMock() - + poller.profiles_collection = MagicMock() + poller.profiles_collection.process_profiles = MagicMock() + poller.already_loaded_mibs = {} varbinds_bulk, varbinds_get = set(), set() get_mapping, bulk_mapping = {}, {} @@ -68,13 +71,16 @@ def test_do_work_bulk(self, load_profiles, getCmd, bulkCmd): m_process_data.return_value = (False, [], {}) poller.process_snmp_data = m_process_data requested_profiles = ["profile1", "profile2"] - poller.profiles_manager.return_collection.return_value = { + poller.profiles = { "profile1": { "frequency": 20, "varBinds": [["IF-MIB", "ifDescr"], ["IF-MIB", "ifSpeed"]], }, "profile2": {"frequency": 20, "varBinds": [["UDP-MIB", "udpOutDatagrams"]]}, } + poller.already_loaded_mibs = {} + poller.profiles_collection = ProfileCollection(poller.profiles) + poller.profiles_collection.process_profiles() bulkCmd.return_value = [(None, 0, 0, "Oid1"), (None, 0, 0, "Oid2")] poller.do_work(inventory_record, profiles=requested_profiles) self.assertEqual(poller.process_snmp_data.call_count, 2) @@ -101,7 +107,7 @@ def test_do_work_get(self, load_profiles, getCmd, bulkCmd): poller.process_snmp_data = MagicMock() poller.profiles_manager = MagicMock() requested_profiles = ["profile1", "profile2"] - poller.profiles_manager.return_collection.return_value = { + poller.profiles = { "profile1": { "frequency": 20, "varBinds": [["IF-MIB", "ifDescr", 1], ["IF-MIB", "ifSpeed", 2]], @@ -111,6 +117,9 @@ def test_do_work_get(self, load_profiles, getCmd, bulkCmd): "varBinds": [["UDP-MIB", "udpOutDatagrams", 1]], }, } + poller.already_loaded_mibs = {} + poller.profiles_collection = ProfileCollection(poller.profiles) + poller.profiles_collection.process_profiles() getCmd.return_value = [ (None, 0, 0, "Oid1"), (None, 0, 0, "Oid2"), @@ -141,9 +150,12 @@ def test_do_work_errors(self, load_profiles, getCmd, bulkCmd): poller.process_snmp_data = MagicMock() poller.profiles_manager = MagicMock() requested_profiles = ["profile1"] - poller.profiles_manager.return_collection.return_value = { + poller.profiles = { "profile1": {"frequency": 20, "varBinds": [["IF-MIB", "ifDescr", 1]]} } + poller.already_loaded_mibs = {} + poller.profiles_collection = ProfileCollection(poller.profiles) + poller.profiles_collection.process_profiles() getCmd.return_value = [(True, True, 2, [])] with self.assertRaises(SnmpActionError): poller.do_work(inventory_record, profiles=requested_profiles) diff --git a/test/snmp/test_get_varbinds.py b/test/snmp/test_get_varbinds.py index acd2e7740..034ab9d2a 100644 --- a/test/snmp/test_get_varbinds.py +++ b/test/snmp/test_get_varbinds.py @@ -2,11 +2,15 @@ from unittest.mock import Mock from splunk_connect_for_snmp.snmp.manager import Poller +from splunk_connect_for_snmp.snmp.varbinds_resolver import ProfileCollection class TestGetVarbinds(TestCase): def test_get_varbinds_for_walk(self): poller = Poller.__new__(Poller) + poller.profiles_collection = ProfileCollection({}) + poller.profiles_collection.process_profiles() + poller.already_loaded_mibs = set() varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = poller.get_var_binds( "192.168.0.1", walk=True ) @@ -38,11 +42,14 @@ def test_get_varbinds_for_walk_redundant(self): } poller.profiles = profiles + poller.profiles_collection = ProfileCollection(profiles) + poller.profiles_collection.process_profiles() + poller.already_loaded_mibs = {} poller.load_mibs = Mock() varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = poller.get_var_binds( "192.168.0.1", walk=True, profiles=["test1"] ) - self.assertEqual(0, len(varbinds_get)) + self.assertEqual(1, len(varbinds_get)) self.assertEqual(3, len(varbinds_bulk)) self.assertEqual(0, len(get_mapping)) self.assertEqual(0, len(bulk_mapping)) @@ -69,12 +76,15 @@ def test_get_varbinds_for_walk_none(self): } poller.profiles = profiles + poller.profiles_collection = ProfileCollection(profiles) + poller.profiles_collection.process_profiles() + poller.already_loaded_mibs = {} poller.load_mibs = Mock() varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = poller.get_var_binds( "192.168.0.1", walk=True, profiles=["test1"] ) self.assertEqual(0, len(varbinds_get)) - self.assertEqual(2, len(varbinds_bulk)) + self.assertEqual(1, len(varbinds_bulk)) self.assertEqual(0, len(get_mapping)) self.assertEqual(0, len(bulk_mapping)) @@ -83,9 +93,8 @@ def test_get_varbinds_for_walk_none(self): self.assertEqual( { walk_var_bind[0]._ObjectType__args[0]._ObjectIdentity__args[0], - walk_var_bind[1]._ObjectType__args[0]._ObjectIdentity__args[0], }, - {"SNMPv2-MIB", "IF-MIB"}, + {"SNMPv2-MIB"}, ) def test_get_varbinds_for_walk_with_three_profiles(self): @@ -99,12 +108,15 @@ def test_get_varbinds_for_walk_with_three_profiles(self): } poller.profiles = profiles + poller.profiles_collection = ProfileCollection(profiles) + poller.profiles_collection.process_profiles() + poller.already_loaded_mibs = {} poller.load_mibs = Mock() varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = poller.get_var_binds( "192.168.0.1", walk=True, profiles=["test1"] ) self.assertEqual(0, len(varbinds_get)) - self.assertEqual(5, len(varbinds_bulk)) + self.assertEqual(4, len(varbinds_bulk)) self.assertEqual(0, len(get_mapping)) self.assertEqual(0, len(bulk_mapping)) @@ -116,14 +128,13 @@ def test_get_varbinds_for_walk_with_three_profiles(self): walk_var_bind[1]._ObjectType__args[0]._ObjectIdentity__args[0], walk_var_bind[2]._ObjectType__args[0]._ObjectIdentity__args[0], walk_var_bind[3]._ObjectType__args[0]._ObjectIdentity__args[0], - walk_var_bind[4]._ObjectType__args[0]._ObjectIdentity__args[0], }, - {"SNMPv2-MIB", "IF-MIB", "UDP-MIB", "IP-MIB", "TCP-MIB"}, + {"SNMPv2-MIB", "UDP-MIB", "IP-MIB", "TCP-MIB"}, ) def test_get_varbinds_for_walk_next_time_no_profiles(self): poller = Poller.__new__(Poller) - + poller.profiles_collection = ProfileCollection({}) varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = poller.get_var_binds( "192.168.0.1", walk=True, profiles=[] ) @@ -141,12 +152,23 @@ def test_get_varbinds_for_walk_next_time_no_profiles(self): def test_get_varbinds_for_walk_with_profiles(self): profiles = { - "profile1": {"frequency": 20, "varBinds": [["IF-MIB"]]}, - "profile2": {"frequency": 20, "varBinds": [["UDP-MIB"]]}, + "profile1": { + "condition": {"type": "walk"}, + "frequency": 20, + "varBinds": [["IF-MIB"]], + }, + "profile2": { + "condition": {"type": "walk"}, + "frequency": 20, + "varBinds": [["UDP-MIB"]], + }, } poller = Poller.__new__(Poller) poller.profiles = profiles + poller.profiles_collection = ProfileCollection(profiles) + poller.profiles_collection.process_profiles() + poller.already_loaded_mibs = set() poller.load_mibs = Mock() varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = poller.get_var_binds( @@ -172,12 +194,23 @@ def test_get_varbinds_for_walk_with_profiles(self): def test_get_varbinds_for_walk_with_profiles_changed_sequence(self): profiles = { - "profile1": {"frequency": 20, "varBinds": [["IF-MIB"]]}, - "profile2": {"frequency": 20, "varBinds": [["UDP-MIB"]]}, + "profile1": { + "condition": {"type": "walk"}, + "frequency": 20, + "varBinds": [["IF-MIB"]], + }, + "profile2": { + "condition": {"type": "walk"}, + "frequency": 20, + "varBinds": [["UDP-MIB"]], + }, } poller = Poller.__new__(Poller) poller.profiles = profiles + poller.profiles_collection = ProfileCollection(profiles) + poller.profiles_collection.process_profiles() + poller.already_loaded_mibs = set() poller.load_mibs = Mock() varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = poller.get_var_binds( @@ -185,7 +218,7 @@ def test_get_varbinds_for_walk_with_profiles_changed_sequence(self): ) self.assertEqual(0, len(varbinds_get)) - self.assertEqual(3, len(varbinds_bulk)) + self.assertEqual(2, len(varbinds_bulk)) self.assertEqual(0, len(get_mapping)) self.assertEqual(0, len(bulk_mapping)) @@ -198,9 +231,8 @@ def test_get_varbinds_for_walk_with_profiles_changed_sequence(self): ) ) - self.assertEqual("IF-MIB", names[0]) - self.assertEqual("SNMPv2-MIB", names[1]) - self.assertEqual("UDP-MIB", names[2]) + self.assertEqual("SNMPv2-MIB", names[0]) + self.assertEqual("UDP-MIB", names[1]) def test_get_varbinds_for_poll_family_only(self): poller = Poller.__new__(Poller) @@ -209,6 +241,9 @@ def test_get_varbinds_for_poll_family_only(self): "profile1": {"frequency": 20, "varBinds": [["IF-MIB"]]}, "profile2": {"frequency": 20, "varBinds": [["UDP-MIB"]]}, } + poller.profiles_collection = ProfileCollection(poller.profiles) + poller.profiles_collection.process_profiles() + poller.already_loaded_mibs = set() poller.load_mibs = Mock() profiles_requested = ["profile1", "profile2"] @@ -233,7 +268,8 @@ def test_get_varbinds_for_poll_family_only(self): self.assertEqual("IF-MIB", names[0]) self.assertEqual("UDP-MIB", names[1]) self.assertEqual({"IF-MIB": "profile1", "UDP-MIB": "profile2"}, bulk_mapping) - poller.load_mibs.assert_called_with(["IF-MIB", "UDP-MIB"]) + poller.load_mibs.assert_called() + self.assertCountEqual(poller.load_mibs.call_args.args[0], ["IF-MIB", "UDP-MIB"]) def test_get_varbinds_for_poll_only_bulk_properties(self): poller = Poller.__new__(Poller) @@ -246,6 +282,9 @@ def test_get_varbinds_for_poll_only_bulk_properties(self): "profile2": {"frequency": 20, "varBinds": [["UDP-MIB", "udpOutDatagrams"]]}, } poller.load_mibs = Mock() + poller.profiles_collection = ProfileCollection(poller.profiles) + poller.profiles_collection.process_profiles() + poller.already_loaded_mibs = set() profiles_requested = ["profile1", "profile2"] varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = poller.get_var_binds( @@ -275,13 +314,14 @@ def test_get_varbinds_for_poll_only_bulk_properties(self): self.assertEqual( { - "IF-MIB:ifDescr": "profile1", - "IF-MIB:ifSpeed": "profile1", - "UDP-MIB:udpOutDatagrams": "profile2", + "IF-MIB::ifDescr": "profile1", + "IF-MIB::ifSpeed": "profile1", + "UDP-MIB::udpOutDatagrams": "profile2", }, bulk_mapping, ) - poller.load_mibs.assert_called_with(["IF-MIB", "UDP-MIB"]) + poller.load_mibs.assert_called() + self.assertCountEqual(poller.load_mibs.call_args.args[0], ["IF-MIB", "UDP-MIB"]) def test_get_varbinds_for_poll_only_get_properties(self): poller = Poller.__new__(Poller) @@ -297,6 +337,9 @@ def test_get_varbinds_for_poll_only_get_properties(self): }, } poller.load_mibs = Mock() + poller.profiles_collection = ProfileCollection(poller.profiles) + poller.profiles_collection.process_profiles() + poller.already_loaded_mibs = set() profiles_requested = ["profile1", "profile2"] varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = poller.get_var_binds( @@ -327,13 +370,73 @@ def test_get_varbinds_for_poll_only_get_properties(self): self.assertEqual( { - "IF-MIB:ifDescr:0": "profile1", - "IF-MIB:ifDescr:1": "profile1", - "UDP-MIB:udpOutDatagrams:1": "profile2", + "IF-MIB::ifDescr.0": "profile1", + "IF-MIB::ifDescr.1": "profile1", + "UDP-MIB::udpOutDatagrams.1": "profile2", }, get_mapping, ) - poller.load_mibs.assert_called_with(["IF-MIB", "UDP-MIB"]) + poller.load_mibs.assert_called() + self.assertCountEqual(poller.load_mibs.call_args.args[0], ["IF-MIB", "UDP-MIB"]) + + def test_get_varbinds_for_poll_only_get_properties_compound(self): + poller = Poller.__new__(Poller) + + poller.profiles = { + "profile1": { + "frequency": 20, + "varBinds": [["IF-MIB", "ifDescr", 0], ["IF-MIB", "ifDescr", 1]], + }, + "profile2": { + "frequency": 20, + "varBinds": [["TCP-MIB", "tcpListenerProcess", 0, 443]], + }, + } + poller.load_mibs = Mock() + poller.profiles_collection = ProfileCollection(poller.profiles) + poller.profiles_collection.process_profiles() + poller.already_loaded_mibs = set() + profiles_requested = ["profile1", "profile2"] + + varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = poller.get_var_binds( + "192.168.0.1", profiles=profiles_requested + ) + + self.assertEqual(3, len(varbinds_get)) + self.assertEqual(0, len(varbinds_bulk)) + self.assertEqual(3, len(get_mapping)) + self.assertEqual(0, len(bulk_mapping)) + + names = sorted( + list( + map( + lambda x: ( + x._ObjectType__args[0]._ObjectIdentity__args[0], + x._ObjectType__args[0]._ObjectIdentity__args[1], + x._ObjectType__args[0]._ObjectIdentity__args[2], + ), + varbinds_get, + ) + ) + ) + + self.assertEqual(("IF-MIB", "ifDescr", 0), names[0]) + self.assertEqual(("IF-MIB", "ifDescr", 1), names[1]) + self.assertEqual( + ("TCP-MIB", "tcpListenerProcess", 0, 443), + varbinds_get[2]._ObjectType__args[0]._ObjectIdentity__args, + ) + + self.assertEqual( + { + "IF-MIB::ifDescr.0": "profile1", + "IF-MIB::ifDescr.1": "profile1", + "TCP-MIB::tcpListenerProcess.0.443": "profile2", + }, + get_mapping, + ) + poller.load_mibs.assert_called() + self.assertCountEqual(poller.load_mibs.call_args.args[0], ["IF-MIB", "TCP-MIB"]) def test_get_varbinds_for_poll_shadowed_by_family(self): poller = Poller.__new__(Poller) @@ -357,6 +460,9 @@ def test_get_varbinds_for_poll_shadowed_by_family(self): }, } poller.load_mibs = Mock() + poller.profiles_collection = ProfileCollection(poller.profiles) + poller.profiles_collection.process_profiles() + poller.already_loaded_mibs = set() profiles_requested = ["profile1", "profile2"] varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = poller.get_var_binds( @@ -381,7 +487,8 @@ def test_get_varbinds_for_poll_shadowed_by_family(self): self.assertEqual("UDP-MIB", names[1]) self.assertEqual({"IF-MIB": "profile2", "UDP-MIB": "profile1"}, bulk_mapping) - poller.load_mibs.assert_called_with(["UDP-MIB", "IF-MIB"]) + poller.load_mibs.assert_called() + self.assertCountEqual(poller.load_mibs.call_args.args[0], ["IF-MIB", "UDP-MIB"]) def test_get_varbinds_for_poll_shadowed_by_bulk_name(self): poller = Poller.__new__(Poller) @@ -396,6 +503,9 @@ def test_get_varbinds_for_poll_shadowed_by_bulk_name(self): } } poller.load_mibs = Mock() + poller.profiles_collection = ProfileCollection(poller.profiles) + poller.profiles_collection.process_profiles() + poller.already_loaded_mibs = set() profiles_requested = ["profile1"] varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = poller.get_var_binds( @@ -421,6 +531,25 @@ def test_get_varbinds_for_poll_shadowed_by_bulk_name(self): self.assertEqual(("UDP-MIB", "udpOutDatagrams"), names[0]) - self.assertEqual({"UDP-MIB:udpOutDatagrams": "profile1"}, bulk_mapping) + self.assertEqual({"UDP-MIB::udpOutDatagrams": "profile1"}, bulk_mapping) poller.load_mibs.assert_called_with(["UDP-MIB"]) + + def test_get_varbind_chunk(self): + poller = Poller.__new__(Poller) + + initial_list = list(range(1, 13)) + expected_result = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] + expected_result_4 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] + expected_result_5 = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12]] + + self.assertEqual( + list(poller.get_varbind_chunk(initial_list, 3)), expected_result + ) + self.assertEqual( + list(poller.get_varbind_chunk(initial_list, 4)), expected_result_4 + ) + self.assertEqual( + list(poller.get_varbind_chunk(initial_list, 5)), expected_result_5 + ) + self.assertEqual([], []) diff --git a/test/snmp/test_process_snmp_data.py b/test/snmp/test_process_snmp_data.py index 9f264227b..bf3e5cd2f 100644 --- a/test/snmp/test_process_snmp_data.py +++ b/test/snmp/test_process_snmp_data.py @@ -9,12 +9,12 @@ class TestProcessSnmpData(TestCase): @patch("splunk_connect_for_snmp.snmp.manager.get_group_key") @patch("splunk_connect_for_snmp.snmp.manager.map_metric_type") @patch("splunk_connect_for_snmp.snmp.manager.extract_index_number") - @patch("splunk_connect_for_snmp.snmp.manager.extract_index_oid_part") + @patch("splunk_connect_for_snmp.snmp.manager.extract_indexes") @patch("time.time") def test_multiple_metrics_single_group( self, m_time, - m_extract_index_oid_part, + m_extract_indexes, m_extract_index_number, m_map_metric_type, m_get_group_key, @@ -26,7 +26,7 @@ def test_multiple_metrics_single_group( m_get_group_key.return_value = "QWERTYUIOP" m_map_metric_type.side_effect = ["g", "g"] m_extract_index_number.return_value = 1 - m_extract_index_oid_part.side_effect = ["7", "7.6"] + m_extract_indexes.return_value = [7] m_time.return_value = 1640609779.473053 @@ -59,6 +59,7 @@ def test_multiple_metrics_single_group( self.assertEqual( { "QWERTYUIOP": { + "indexes": [7], "fields": {}, "metrics": { "IF-MIB.some_metric": { @@ -66,14 +67,12 @@ def test_multiple_metrics_single_group( "time": 1640609779.473053, "type": "g", "value": 65.0, - "index": "7", }, "UDP-MIB.next_metric": { "oid": "9.8.7.6", "time": 1640609779.473053, "type": "g", "value": 123.0, - "index": "7.6", }, }, } @@ -85,12 +84,12 @@ def test_multiple_metrics_single_group( @patch("splunk_connect_for_snmp.snmp.manager.get_group_key") @patch("splunk_connect_for_snmp.snmp.manager.map_metric_type") @patch("splunk_connect_for_snmp.snmp.manager.extract_index_number") - @patch("splunk_connect_for_snmp.snmp.manager.extract_index_oid_part") + @patch("splunk_connect_for_snmp.snmp.manager.extract_indexes") @patch("time.time") def test_multiple_metrics_multiple_groups( self, m_time, - m_extract_index_oid_part, + m_extract_indexes, m_extract_index_number, m_map_metric_type, m_get_group_key, @@ -102,7 +101,7 @@ def test_multiple_metrics_multiple_groups( m_get_group_key.side_effect = ["GROUP1", "GROUP2"] m_map_metric_type.side_effect = ["g", "g"] m_extract_index_number.return_value = 1 - m_extract_index_oid_part.side_effect = ["7", "6"] + m_extract_indexes.return_value = [7] m_time.return_value = 1640609779.473053 @@ -135,6 +134,7 @@ def test_multiple_metrics_multiple_groups( self.assertEqual( { "GROUP1": { + "indexes": [7], "fields": {}, "metrics": { "IF-MIB.some_metric": { @@ -142,11 +142,11 @@ def test_multiple_metrics_multiple_groups( "time": 1640609779.473053, "type": "g", "value": 65.0, - "index": "7", } }, }, "GROUP2": { + "indexes": [7], "fields": {}, "metrics": { "UDP-MIB.next_metric": { @@ -154,7 +154,6 @@ def test_multiple_metrics_multiple_groups( "time": 1640609779.473053, "type": "g", "value": 123.0, - "index": "6", } }, }, @@ -166,12 +165,12 @@ def test_multiple_metrics_multiple_groups( @patch("splunk_connect_for_snmp.snmp.manager.get_group_key") @patch("splunk_connect_for_snmp.snmp.manager.map_metric_type") @patch("splunk_connect_for_snmp.snmp.manager.extract_index_number") - @patch("splunk_connect_for_snmp.snmp.manager.extract_index_oid_part") + @patch("splunk_connect_for_snmp.snmp.manager.extract_indexes") @patch("time.time") def test_metrics_and_fields( self, m_time, - m_extract_index_oid_part, + m_extract_indexes, m_extract_index_number, m_map_metric_type, m_get_group_key, @@ -183,7 +182,7 @@ def test_metrics_and_fields( m_get_group_key.return_value = "GROUP1" m_map_metric_type.side_effect = ["g", "r"] m_extract_index_number.return_value = 1 - m_extract_index_oid_part.return_value = "6" + m_extract_indexes.return_value = [7] m_time.return_value = 1640609779.473053 @@ -216,6 +215,7 @@ def test_metrics_and_fields( self.assertEqual( { "GROUP1": { + "indexes": [7], "fields": { "UDP-MIB.some_field": { "oid": "9.8.7.6", @@ -230,7 +230,6 @@ def test_metrics_and_fields( "time": 1640609779.473053, "type": "g", "value": 65.0, - "index": "6", } }, } @@ -242,12 +241,12 @@ def test_metrics_and_fields( @patch("splunk_connect_for_snmp.snmp.manager.get_group_key") @patch("splunk_connect_for_snmp.snmp.manager.map_metric_type") @patch("splunk_connect_for_snmp.snmp.manager.extract_index_number") - @patch("splunk_connect_for_snmp.snmp.manager.extract_index_oid_part") + @patch("splunk_connect_for_snmp.snmp.manager.extract_indexes") @patch("time.time") def test_metrics_with_profile( self, m_time, - m_extract_index_oid_part, + m_extract_indexes, m_extract_index_number, m_map_metric_type, m_get_group_key, @@ -259,7 +258,7 @@ def test_metrics_with_profile( m_get_group_key.return_value = "QWERTYUIOP" m_map_metric_type.side_effect = ["g", "g"] m_extract_index_number.return_value = 1 - m_extract_index_oid_part.side_effect = ["6.7", "6"] + m_extract_indexes.return_value = [6, 7] m_time.return_value = 1640609779.473053 @@ -285,13 +284,17 @@ def test_metrics_with_profile( (var_bind_mock2_1, var_bind_mock2_2), ] metrics = {} - mapping = {"IF-MIB:some_metric": "profile1", "UDP-MIB:next_metric": "profile2"} + mapping = { + "IF-MIB::some_metric": "profile1", + "UDP-MIB::next_metric": "profile2", + } poller.process_snmp_data(varBindTable, metrics, "some_target", mapping) self.assertEqual( { "QWERTYUIOP": { + "indexes": [6, 7], "fields": {}, "metrics": { "IF-MIB.some_metric": { @@ -299,14 +302,12 @@ def test_metrics_with_profile( "time": 1640609779.473053, "type": "g", "value": 65.0, - "index": "6.7", }, "UDP-MIB.next_metric": { "oid": "9.8.7.6", "time": 1640609779.473053, "type": "g", "value": 123.0, - "index": "6", }, }, "profiles": ["profile1", "profile2"], diff --git a/test/snmp/test_utils.py b/test/snmp/test_utils.py index 29b64dceb..9308bb029 100644 --- a/test/snmp/test_utils.py +++ b/test/snmp/test_utils.py @@ -1,5 +1,5 @@ from unittest import TestCase, mock -from unittest.mock import Mock +from unittest.mock import MagicMock, Mock from pysnmp.proto.rfc1902 import ObjectName @@ -8,7 +8,7 @@ from splunk_connect_for_snmp.snmp.manager import ( _any_failure_happened, extract_index_number, - extract_index_oid_part, + extract_indexes, fill_empty_value, get_inventory, is_increasing_oids_ignored, @@ -175,26 +175,18 @@ def test_is_increasing_oids_ignored_empty(self): self.assertFalse(is_increasing_oids_ignored("127.0.0.2", "161")) self.assertFalse(is_increasing_oids_ignored("127.0.0.1", "162")) - def test_extract_index_oid_part(self): - object_identity, mib_node, object_instance_id = Mock(), Mock(), Mock() - mib_node.getName.return_value = ObjectName( - (1, 3, 6, 1, 4, 1, 9, 9, 109, 1, 1, 1, 1, 2) - ) - object_instance_id = ObjectName("1.3.6.1.4.1.9.9.109.1.1.1.1.2.7") - object_identity.getOid.return_value = object_instance_id - object_identity.getMibNode.return_value = mib_node - varBind = (object_identity, None) - result = extract_index_oid_part(varBind) - self.assertEqual("7", result) - - def test_extract_index_oid_part_complex_index(self): - object_identity, mib_node, object_instance_id = Mock(), Mock(), Mock() - mib_node.getName.return_value = ObjectName( - (1, 3, 6, 1, 4, 1, 9, 9, 109, 1, 2, 2, 1, 1) - ) - object_instance_id = ObjectName("1.3.6.1.4.1.9.9.109.1.2.2.1.1.7.12147") - object_identity.getOid.return_value = object_instance_id - object_identity.getMibNode.return_value = mib_node - varBind = (object_identity, None) - result = extract_index_oid_part(varBind) - self.assertEqual("7.12147", result) + def test_extract_indexes_one_element(self): + pysnmp_index_object = ObjectName() + pysnmp_index_object._value = MagicMock() + pysnmp_index_object._value = tuple([0]) + index = tuple([pysnmp_index_object]) + result = extract_indexes(index) + self.assertEqual([0], result) + + def test_extract_indexes_multiple_elements(self): + pysnmp_index_object = ObjectName() + pysnmp_index_object._value = MagicMock() + pysnmp_index_object._value = b"\xac\x1f\x1b\x90" + index = tuple([pysnmp_index_object]) + result = extract_indexes(index) + self.assertEqual(["172.31.27.144"], result) diff --git a/test/snmp/test_varbinds_resolver.py b/test/snmp/test_varbinds_resolver.py new file mode 100644 index 000000000..7a57888b8 --- /dev/null +++ b/test/snmp/test_varbinds_resolver.py @@ -0,0 +1,187 @@ +import unittest +from unittest.mock import MagicMock + +from pysnmp.smi.rfc1902 import ObjectType + +from splunk_connect_for_snmp.snmp.varbinds_resolver import ( + Profile, + Varbind, + VarBindContainer, +) + + +class TestVarbind(unittest.TestCase): + def test_init_with_list(self): + varbind = Varbind(["SNMPv2-MIB", "sysDescr", "0"]) + self.assertEqual(varbind.list, ["SNMPv2-MIB", "sysDescr", "0"]) + self.assertIsInstance(varbind.object_identity, ObjectType) + + def test_init_with_string(self): + varbind = Varbind("SNMPv2-MIB") + self.assertEqual(varbind.list, ["SNMPv2-MIB"]) + self.assertIsInstance(varbind.object_identity, ObjectType) + + def test_mapping_key(self): + varbind = Varbind(["SNMPv2-MIB", "sysDescr", "0"]) + self.assertEqual(varbind.mapping_key(), "SNMPv2-MIB::sysDescr.0") + + def test_mapping_key_multiple(self): + varbind = Varbind(["TCP-MIB", "tcpListenerProcess", 0, 443]) + self.assertEqual(varbind.mapping_key(), "TCP-MIB::tcpListenerProcess.0.443") + + def test_repr(self): + varbind = Varbind(["SNMPv2-MIB", "sysDescr", "0"]) + self.assertEqual(repr(varbind), "['SNMPv2-MIB', 'sysDescr', '0']") + + +class TestVarBindContainer(unittest.TestCase): + def setUp(self): + self.varbind_container = VarBindContainer() + + def test_add_varbind(self): + varbind1 = Varbind(["IF-MIB", "ifInOctets", 1]) + self.varbind_container.insert_varbind(varbind1) + self.assertIn(varbind1.mapping_key(), self.varbind_container.map) + + varbind2 = Varbind(["IP-MIB", "ipInReceives"]) + self.varbind_container.insert_varbind(varbind2) + self.assertIn(varbind2.mapping_key(), self.varbind_container.map) + + varbind3 = Varbind(["IP-MIB", "ipInReceives", 1]) + self.varbind_container.insert_varbind(varbind3) + self.assertNotIn(varbind3.mapping_key(), self.varbind_container.map) + + varbind4 = Varbind(["IF-MIB", "ifOutOctets"]) + self.varbind_container.insert_varbind(varbind4) + self.assertIn(varbind4.mapping_key(), self.varbind_container.map) + + def test_return_varbind_keys(self): + varbind1 = Varbind(["IF-MIB", "ifInOctets", 1]) + self.varbind_container.insert_varbind(varbind1) + varbind2 = Varbind(["IP-MIB", "ipInReceives"]) + self.varbind_container.insert_varbind(varbind2) + + varbind_keys = self.varbind_container.return_varbind_keys() + self.assertIn(varbind1.mapping_key(), varbind_keys) + self.assertIn(varbind2.mapping_key(), varbind_keys) + self.assertCountEqual( + varbind_keys, ["IF-MIB::ifInOctets.1", "IP-MIB::ipInReceives"] + ) + + def test_return_varbind_values(self): + varbind1 = Varbind(["IF-MIB", "ifInOctets", 1]) + self.varbind_container.insert_varbind(varbind1) + varbind2 = Varbind(["IP-MIB", "ipInReceives"]) + self.varbind_container.insert_varbind(varbind2) + + varbind_values = self.varbind_container.return_varbind_values() + self.assertIn(varbind1, varbind_values) + self.assertIn(varbind2, varbind_values) + + def test_get_mib_families(self): + varbind1 = Varbind(["IF-MIB", "ifInOctets", 1]) + self.varbind_container.insert_varbind(varbind1) + varbind2 = Varbind(["IP-MIB", "ipInReceives"]) + self.varbind_container.insert_varbind(varbind2) + + mib_families = self.varbind_container.get_mib_families() + self.assertIn("IF-MIB", mib_families) + self.assertIn("IP-MIB", mib_families) + + def test_get_profile_mapping(self): + profile_name = "profile1" + varbind1 = Varbind(["IF-MIB", "ifInOctets", 1]) + self.varbind_container.insert_varbind(varbind1) + varbind2 = Varbind(["IP-MIB", "ipInReceives"]) + self.varbind_container.insert_varbind(varbind2) + + profile_mapping = self.varbind_container.get_profile_mapping(profile_name) + self.assertIn(varbind1.mapping_key(), profile_mapping) + self.assertIn(varbind2.mapping_key(), profile_mapping) + self.assertEqual(len(profile_mapping), 2) + self.assertEqual( + {"IF-MIB::ifInOctets.1": "profile1", "IP-MIB::ipInReceives": "profile1"}, + profile_mapping, + ) + + +class TestProfile(unittest.TestCase): + def setUp(self): + self.profile_dict = { + "condition": {"type": "walk"}, + "varBinds": [["IF-MIB", "ifInOctets", 1]], + } + + def test_init(self): + name = "test" + profile = Profile(name, self.profile_dict) + self.assertEqual(profile.name, name) + self.assertEqual(len(profile.varbinds), 1) + + def test_process(self): + profile_dict = { + "frequency": 30, + "condition": {"type": "walk"}, + "varBinds": [["IF-MIB", "ifOutOctets", 1]], + } + profile = Profile("test", profile_dict) + profile.process() + expected_get_varbinds = VarBindContainer() + expected_bulk_varbinds = VarBindContainer() + expected_bulk_varbinds.insert_varbind(Varbind(["SNMPv2-MIB"])) + expected_get_varbinds.insert_varbind(Varbind(["IF-MIB", "ifOutOctets", 1])) + self.assertEqual( + str(expected_bulk_varbinds.map), str(profile.varbinds_bulk.map) + ) + self.assertEqual(str(expected_get_varbinds.map), str(profile.varbinds_get.map)) + + def test_divide_on_bulk_and_get(self): + profile = Profile("test", self.profile_dict) + varbind = ["IF-MIB", "ifOutOctets", 1] + profile.varbinds = [varbind] + profile.divide_on_bulk_and_get() + expected_result = { + "IF-MIB::ifOutOctets.1": Varbind(["IF-MIB", "ifOutOctets", 1]) + } + self.assertEqual(str(expected_result), str(profile.varbinds_get.map)) + + def test_divide_on_bulk_and_get_many_elements(self): + profile = Profile("test", self.profile_dict) + varbinds = [ + ["IF-MIB", "ifOutOctets", 1], + ["IP-MIB"], + ["IP-MIB", "ipField"], + ["TCP-MIB", "tcpField", 0, 1], + ] + profile.varbinds = varbinds + profile.divide_on_bulk_and_get() + expected_result_get = { + "IF-MIB::ifOutOctets.1": Varbind(["IF-MIB", "ifOutOctets", 1]), + "TCP-MIB::tcpField.0.1": Varbind(["TCP-MIB", "tcpField", 0, 1]), + } + expected_result_bulk = {"IP-MIB": Varbind(["IP-MIB"])} + self.assertEqual(str(expected_result_get), str(profile.varbinds_get.map)) + self.assertEqual(str(expected_result_bulk), str(profile.varbinds_bulk.map)) + + def test_get_varbinds(self): + profile = Profile("test", self.profile_dict) + profile.varbinds_bulk = VarBindContainer() + profile.varbinds_bulk.insert_varbind(Varbind(["IF-MIB"])) + profile.varbinds_bulk.insert_varbind(Varbind(["IP-MIB", "ipField"])) + profile.varbinds_get = VarBindContainer() + profile.varbinds_get.insert_varbind(Varbind(["TCP-MIB", "field1", 1])) + profile.varbinds_get.insert_varbind(Varbind(["UDP-MIB", "fiel1d", 2])) + varbinds_bulk, varbinds_get = profile.get_varbinds() + self.assertEqual(varbinds_bulk, profile.varbinds_bulk) + self.assertEqual(varbinds_get, profile.varbinds_get) + + def test_get_mib_families(self): + profile = Profile("test", self.profile_dict) + profile.varbinds_bulk = VarBindContainer() + profile.varbinds_bulk.insert_varbind(Varbind(["IF-MIB"])) + profile.varbinds_bulk.insert_varbind(Varbind(["IP-MIB", "ipField"])) + profile.varbinds_get = VarBindContainer() + profile.varbinds_get.insert_varbind(Varbind(["TCP-MIB", "field1", 1])) + profile.varbinds_get.insert_varbind(Varbind(["UDP-MIB", "fiel1d", 2])) + mib_families = profile.get_mib_families() + self.assertEqual(mib_families, {"IF-MIB", "IP-MIB", "TCP-MIB", "UDP-MIB"}) diff --git a/test/splunk/test_prepare.py b/test/splunk/test_prepare.py index fa503dee7..281461009 100644 --- a/test/splunk/test_prepare.py +++ b/test/splunk/test_prepare.py @@ -82,9 +82,10 @@ def test_prepare_metrics(self, m_custom): "frequency": 15, "result": { "SOME_GROUP_KEY1": { + "indexes": [6], "metrics": { "metric_one": {"value": 23}, - "metric_two": {"value": 26, "index": "6"}, + "metric_two": {"value": 26}, }, "fields": { "field_one": {"value": "on"},