diff --git a/.fossa.yml b/.fossa.yml new file mode 100644 index 000000000..d8ea192bc --- /dev/null +++ b/.fossa.yml @@ -0,0 +1,5 @@ +version: 3 +server: https://app.fossa.com +project: + id: "splunk-connect-for-snmp" + team: "TA-Automation" \ No newline at end of file diff --git a/.github/workflows/cd-docs-pdf.yaml b/.github/workflows/cd-docs-pdf.yaml index 221e1b6ed..53a126d87 100644 --- a/.github/workflows/cd-docs-pdf.yaml +++ b/.github/workflows/cd-docs-pdf.yaml @@ -33,8 +33,8 @@ jobs: name: Produce PDF from docs run: | sudo apt-get install -y pandoc texlive librsvg2-bin texlive-latex-extra - pandoc -s --pdf-engine=pdflatex -o /tmp/SC4S_docs.pdf $(find . -type f -name "*.md") -H deeplists.tex + pandoc -s --pdf-engine=pdflatex -o /tmp/SC4S4SNMP_docs.pdf $(find . -type f -name "*.md") -H deeplists.tex - name: Release uses: softprops/action-gh-release@v1 with: - files: /tmp/SC4S_docs.pdf + files: /tmp/SC4SNMP_docs.pdf diff --git a/.github/workflows/ci-main.yaml b/.github/workflows/ci-main.yaml index 0b221356e..47075fb7e 100644 --- a/.github/workflows/ci-main.yaml +++ b/.github/workflows/ci-main.yaml @@ -28,9 +28,37 @@ on: - "main" - "develop" - "next" + workflow_call: + secrets: + FOSSA_API_KEY: + description: API token for FOSSA app + required: true jobs: - semgrep: - name: Scan + fossa-scan: + name: fossa + continue-on-error: true + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: run fossa anlyze and create report + run: | + curl -H 'Cache-Control: no-cache' https://raw.githubusercontent.com/fossas/fossa-cli/master/install-latest.sh | bash + fossa analyze --debug + fossa report attribution --format text > /tmp/THIRDPARTY + env: + FOSSA_API_KEY: ${{ secrets.FOSSA_API_KEY }} + - name: upload THIRDPARTY file + uses: actions/upload-artifact@v2 + with: + name: THIRDPARTY + path: /tmp/THIRDPARTY + - name: run fossa test + run: | + fossa test --debug + env: + FOSSA_API_KEY: ${{ secrets.FOSSA_API_KEY }} + semgrep-scan: + name: semgrep runs-on: ubuntu-latest if: (github.actor != 'dependabot[bot]') steps: @@ -210,21 +238,4 @@ jobs: @google/semantic-release-replace-plugin env: GITHUB_TOKEN: ${{ secrets.GH_TOKEN_ADMIN }} - mike: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - uses: oleksiyrudenko/gha-git-credentials@v2-latest - with: - token: "${{ secrets.PAT_CLATOOL }}" - - name: Upload Docs - run: | - BRANCH=$(echo $GITHUB_REF | cut -d / -f 3) - echo $BRANCH - pip3 install poetry - poetry install - poetry run mike deploy -p $BRANCH diff --git a/.github/workflows/mike.yaml b/.github/workflows/mike.yaml new file mode 100644 index 000000000..bc756a981 --- /dev/null +++ b/.github/workflows/mike.yaml @@ -0,0 +1,44 @@ +# ######################################################################## +# Copyright 2021 Splunk Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ######################################################################## +name: mike +on: + push: + branches: + - "main" + - "develop" + - "next" + tags-ignore: + - "*beta*" + - "*next*" +jobs: + mike: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + - uses: oleksiyrudenko/gha-git-credentials@v2-latest + with: + token: "${{ secrets.PAT_CLATOOL }}" + + - name: Upload Docs + run: | + BRANCH=$(echo $GITHUB_REF | cut -d / -f 3) + echo $BRANCH + pip3 install poetry + poetry install + poetry run mike deploy -p $BRANCH diff --git a/.github/workflows/offline-installation.yaml b/.github/workflows/offline-installation.yaml new file mode 100644 index 000000000..e6b3a0a01 --- /dev/null +++ b/.github/workflows/offline-installation.yaml @@ -0,0 +1,29 @@ +name: offline-installation +on: + push: + tags: + - "v*" + +jobs: + build: + name: Build + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@v1 + + - name: Download images and upload them + run: | + ./${{ env.SCRIPT_NAME }} + VERSION=$(echo $GITHUB_REF | cut -d / -f 3) + gh release upload $VERSION /tmp/package/packages/* + env: + SCRIPT_NAME: create_packages.sh + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Create artifact + uses: actions/upload-artifact@v2 + with: + name: ${{ env.ARTIFACT_NAME }} + path: /tmp/package/packages + env: + ARTIFACT_NAME: offline-installation-packages diff --git a/.gitignore b/.gitignore index 7be662d1e..acb1d2a79 100644 --- a/.gitignore +++ b/.gitignore @@ -141,3 +141,15 @@ secrets integration_tests/scripts/set_env.sh + +# Edited by wzya +integration_tests/scripts/.terraform/providers/registry.terraform.io/hashicorp/aws/4.22.0/darwin_arm64/terraform-provider-aws_v4.22.0_x5 +integration_tests/scripts/splunk-connect-for-snmp.tgz +integration_tests/scripts/terraform.tfstate +integration_tests/scripts/terraform.tfstate.backup +integration_tests/scripts/.terraform.lock.hcl +integration_tests/scripts/inventory.yaml +integration_tests/scripts/main.tf +integration_tests/scripts/.terraform +.DS_Store +snmp-ssh-key.pem diff --git a/README.md b/README.md index d84aaf231..f00eb0175 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,5 @@ # splunk-connect-for-snmp Splunk Connect for SNMP Gets SNMP data in to Splunk Enterprise and Splunk Cloud Platform. -This project repo is primarily the documentation and publishing repo for the integrated set -of components. # Badges @@ -11,5 +9,7 @@ of components. For deployment and user documentation [see](https://splunk.github.io/splunk-connect-for-snmp/) -# +# Contact +Feel free to contact us via [#splunk-connect-for-snmp](https://splunk-usergroups.slack.com/archives/C01K4V86WV7) slack channel. + # diff --git a/charts/splunk-connect-for-snmp/Chart.lock b/charts/splunk-connect-for-snmp/Chart.lock index a4682fc15..0bf526ea1 100644 --- a/charts/splunk-connect-for-snmp/Chart.lock +++ b/charts/splunk-connect-for-snmp/Chart.lock @@ -2,11 +2,11 @@ dependencies: - name: mongodb repository: https://charts.bitnami.com/bitnami version: 10.30.12 -- name: rabbitmq +- name: redis repository: https://charts.bitnami.com/bitnami - version: 8.24.13 + version: 16.8.10 - name: mibserver repository: https://pysnmp.github.io/mibs/charts/ - version: 1.14.0 -digest: sha256:545a9f4d0a644b6d8dd920bf64833959cd75dd956805de6c7a42d47484ee52de -generated: "2022-02-03T15:52:47.974625+01:00" + version: 1.14.2 +digest: sha256:882ca6e0cd371b94f1f1f06861f593b515b908e7504d40d187595ecdb863c4c6 +generated: "2022-06-15T16:29:53.985105+02:00" diff --git a/charts/splunk-connect-for-snmp/Chart.yaml b/charts/splunk-connect-for-snmp/Chart.yaml index a94ceefa0..5af6725c7 100644 --- a/charts/splunk-connect-for-snmp/Chart.yaml +++ b/charts/splunk-connect-for-snmp/Chart.yaml @@ -25,8 +25,8 @@ dependencies: - name: mongodb version: ~10.30 repository: https://charts.bitnami.com/bitnami - - name: rabbitmq - version: ~8.24 + - name: redis + version: ~16.8 repository: https://charts.bitnami.com/bitnami - name: mibserver version: ~1.14.0 diff --git a/charts/splunk-connect-for-snmp/templates/NOTES.txt b/charts/splunk-connect-for-snmp/templates/NOTES.txt index eb2002c44..1c51b4d46 100644 --- a/charts/splunk-connect-for-snmp/templates/NOTES.txt +++ b/charts/splunk-connect-for-snmp/templates/NOTES.txt @@ -1,22 +1,9 @@ -1. Get the application URL by running these commands: -{{- if .Values.traps.ingress.enabled }} -{{- range $host := .Values.traps.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.traps.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} - {{- end }} -{{- end }} -{{- else if contains "NodePort" .Values.traps.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "splunk-connect-for-snmp.traps.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.traps.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "splunk-connect-for-snmp.traps.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "splunk-connect-for-snmp.traps.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.traps.service.port }} -{{- else if contains "ClusterIP" .Values.traps.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "splunk-connect-for-snmp.traps.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") - echo "Visit http://127.0.0.1:8080 to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT -{{- end }} +Version 1.7 of SC4SNMP add new feature which enables horizontal worker pods autoscaling, in order to use it you will need to turn on microk8s metrics-server addon: + +microk8s enable metrics-server + +and you should also update worker configuration in values.yaml file according to the documentation: +https://splunk.github.io/splunk-connect-for-snmp/main/configuration/worker-configuration + +values.yaml template is available here: +https://splunk.github.io/splunk-connect-for-snmp/main/gettingstarted/sc4snmp-installation \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/_helpers.tpl b/charts/splunk-connect-for-snmp/templates/_helpers.tpl index 7b64d0e11..894b748ac 100644 --- a/charts/splunk-connect-for-snmp/templates/_helpers.tpl +++ b/charts/splunk-connect-for-snmp/templates/_helpers.tpl @@ -7,10 +7,13 @@ {{- end }} {{- define "splunk-connect-for-snmp.celery_url" -}} -{{- printf "amqp://%s:%s@%s-rabbitmq:5672/" .Values.rabbitmq.auth.username .Values.rabbitmq.auth.password .Release.Name }} -{{- end }} +{{- printf "redis://%s-redis-headless:6379/0" .Release.Name }} +{{- end }} +{{- define "splunk-connect-for-snmp.redis_url" -}} +{{- printf "redis://%s-redis-headless:6379/1" .Release.Name }} +{{- end }} {{- define "splunk-connect-for-snmp.name" -}} {{- default (printf "%s" .Chart.Name ) .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/inventory/job.yaml b/charts/splunk-connect-for-snmp/templates/inventory/job.yaml index 4c62a7b3b..3c3d57fd6 100644 --- a/charts/splunk-connect-for-snmp/templates/inventory/job.yaml +++ b/charts/splunk-connect-for-snmp/templates/inventory/job.yaml @@ -29,6 +29,8 @@ spec: env: - name: CONFIG_PATH value: /app/config/config.yaml + - name: REDIS_URL + value: {{ include "splunk-connect-for-snmp.redis_url" . }} - name: INVENTORY_PATH value: /app/inventory/inventory.csv - name: CELERY_BROKER_URL diff --git a/charts/splunk-connect-for-snmp/templates/scheduler/deployment.yaml b/charts/splunk-connect-for-snmp/templates/scheduler/deployment.yaml index 40af9c81e..de4ee136b 100644 --- a/charts/splunk-connect-for-snmp/templates/scheduler/deployment.yaml +++ b/charts/splunk-connect-for-snmp/templates/scheduler/deployment.yaml @@ -40,6 +40,8 @@ spec: env: - name: CONFIG_PATH value: /app/config/config.yaml + - name: REDIS_URL + value: {{ include "splunk-connect-for-snmp.redis_url" . }} - name: CELERY_BROKER_URL value: {{ include "splunk-connect-for-snmp.celery_url" . }} - name: MONGO_URI diff --git a/charts/splunk-connect-for-snmp/templates/traps/_helpers.tpl b/charts/splunk-connect-for-snmp/templates/traps/_helpers.tpl index 4bd053626..b83a4d0a3 100644 --- a/charts/splunk-connect-for-snmp/templates/traps/_helpers.tpl +++ b/charts/splunk-connect-for-snmp/templates/traps/_helpers.tpl @@ -2,7 +2,7 @@ Expand the name of the chart. */}} {{- define "splunk-connect-for-snmp.traps.name" -}} -{{- default (printf "%s-%s" .Chart.Name "traps") .Values.traps.nameOverride | trunc 63 | trimSuffix "-" }} +{{- default (printf "%s-%s" .Chart.Name "trap") .Values.traps.nameOverride | trunc 63 | trimSuffix "-" }} {{- end }} {{/* @@ -14,7 +14,7 @@ If release name contains chart name it will be used as a full name. {{- if .Values.traps.fullnameOverride }} {{- .Values.traps.fullnameOverride | trunc 63 | trimSuffix "-" }} {{- else }} -{{- $name := default (printf "%s-%s" .Chart.Name "traps") .Values.traps.nameOverride }} +{{- $name := default (printf "%s-%s" .Chart.Name "trap") .Values.traps.nameOverride }} {{- if contains $name .Release.Name }} {{- .Release.Name | trunc 63 | trimSuffix "-" }} {{- else }} diff --git a/charts/splunk-connect-for-snmp/templates/traps/deployment.yaml b/charts/splunk-connect-for-snmp/templates/traps/deployment.yaml index 4f85411a1..7868fb5ea 100644 --- a/charts/splunk-connect-for-snmp/templates/traps/deployment.yaml +++ b/charts/splunk-connect-for-snmp/templates/traps/deployment.yaml @@ -55,7 +55,7 @@ spec: {{- if .Values.splunk.protocol }} - name: SPLUNK_HEC_SCHEME value: {{ .Values.splunk.protocol | default "https" | quote }} - {{- end}} + {{- end}} - name: SPLUNK_HEC_HOST value: {{ .Values.splunk.host | quote }} {{- if .Values.splunk.port }} diff --git a/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl b/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl index 974af91c9..3a1a70184 100644 --- a/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl +++ b/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl @@ -45,11 +45,53 @@ app.kubernetes.io/managed-by: {{ .Release.Service }} {{/* Selector labels */}} + {{- define "splunk-connect-for-snmp.worker.selectorLabels" -}} app.kubernetes.io/name: {{ include "splunk-connect-for-snmp.worker.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} +{{- define "splunk-connect-for-snmp.worker.poller.selectorLabels" -}} +app.kubernetes.io/name: {{ include "splunk-connect-for-snmp.worker.name" . }}-poller +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{- define "splunk-connect-for-snmp.worker.sender.selectorLabels" -}} +app.kubernetes.io/name: {{ include "splunk-connect-for-snmp.worker.name" . }}-sender +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{- define "splunk-connect-for-snmp.worker.trap.selectorLabels" -}} +app.kubernetes.io/name: {{ include "splunk-connect-for-snmp.worker.name" . }}-trap +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{- define "splunk-connect-for-snmp.worker.trap.labels" -}} +helm.sh/chart: {{ include "splunk-connect-for-snmp.worker.chart" . }} +{{ include "splunk-connect-for-snmp.worker.trap.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{- define "splunk-connect-for-snmp.worker.poller.labels" -}} +helm.sh/chart: {{ include "splunk-connect-for-snmp.worker.chart" . }} +{{ include "splunk-connect-for-snmp.worker.poller.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{- define "splunk-connect-for-snmp.worker.sender.labels" -}} +helm.sh/chart: {{ include "splunk-connect-for-snmp.worker.chart" . }} +{{ include "splunk-connect-for-snmp.worker.sender.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} {{/* Create the name of the service account to use */}} @@ -60,3 +102,92 @@ Create the name of the service account to use {{- default "default" .Values.worker.serviceAccount.name }} {{- end }} {{- end }} + +{{- define "environmental-variables" -}} +- name: CONFIG_PATH + value: /app/config/config.yaml +- name: REDIS_URL + value: {{ include "splunk-connect-for-snmp.redis_url" . }} +- name: SC4SNMP_VERSION + value: {{ .Chart.Version | default "0.0.0" }} +- name: CELERY_BROKER_URL + value: {{ include "splunk-connect-for-snmp.celery_url" . }} +- name: MONGO_URI + value: {{ include "splunk-connect-for-snmp.mongo_uri" . }} +- name: WALK_RETRY_MAX_INTERVAL + value: {{ .Values.worker.walkRetryMaxInterval | default "600" | quote }} +{{- if .Values.worker.ignoreNotIncreasingOid }} +- name: IGNORE_NOT_INCREASING_OIDS + value: {{ join "," .Values.worker.ignoreNotIncreasingOid }} +{{- end}} +{{- if .Values.sim.enabled }} +- name: OTEL_METRICS_URL + value: "http://{{ .Release.Name }}-{{ include "splunk-connect-for-snmp.name" . }}-sim:8882" +{{- end}} +- name: LOG_LEVEL + value: {{ .Values.worker.logLevel | default "INFO" }} +- name: UDP_CONNECTION_TIMEOUT + value: {{ .Values.worker.udpConnectionTimeout | default "3" | quote }} +- name: PROFILES_RELOAD_DELAY + value: {{ .Values.worker.profilesReloadDelay | default "60" | quote }} +- name: MIB_SOURCES + value: "http://{{ printf "%s-%s" .Release.Name "mibserver" }}/asn1/@mib@" +- name: MIB_INDEX + value: "http://{{ printf "%s-%s" .Release.Name "mibserver" }}/index.csv" +- name: MIB_STANDARD + value: "http://{{ printf "%s-%s" .Release.Name "mibserver" }}/standard.txt" +{{- if .Values.splunk.enabled }} +{{- if .Values.splunk.protocol }} +- name: SPLUNK_HEC_SCHEME + value: {{ .Values.splunk.protocol | default "https" | quote }} +{{- end}} +- name: SPLUNK_HEC_HOST + value: {{ .Values.splunk.host | quote }} +- name: IGNORE_EMPTY_VARBINDS + value: {{ .Values.worker.ignoreEmptyVarbinds | default "false" | quote }} +{{- if .Values.splunk.port }} +- name: SPLUNK_HEC_PORT + value: {{ .Values.splunk.port | default "" | quote }} +{{- end}} +{{- if .Values.splunk.path }} +- name: SPLUNK_HEC_PATH + value: {{ .Values.splunk.path | default "/services/collector" | quote }} +{{- end}} +- name: SPLUNK_HEC_INSECURESSL + value: {{ .Values.splunk.insecureSSL | default "false" | quote }} +- name: SPLUNK_HEC_TOKEN + valueFrom: + secretKeyRef: + name: {{ include "splunk-connect-for-snmp.name" . }}-splunk + key: hec_token +{{- if .Values.splunk.eventIndex }} +- name: SPLUNK_HEC_INDEX_EVENTS + value: {{ .Values.splunk.eventIndex | default "netops" }} +{{- end}} +{{- if .Values.splunk.metricsIndex }} +- name: SPLUNK_HEC_INDEX_METRICS + value: {{ .Values.splunk.metricsIndex | default "netmetrics" }} +{{- end}} +{{- end}} +{{- end }} + +{{- define "environmental-variables-poller" -}} +- name: WORKER_CONCURRENCY + value: {{ .Values.worker.poller.concurrency | default "2" | quote }} +- name: PREFETCH_COUNT + value: {{ .Values.worker.poller.prefetch | default "1" | quote }} +{{- end }} + +{{- define "environmental-variables-sender" -}} +- name: WORKER_CONCURRENCY + value: {{ .Values.worker.sender.concurrency | default "2" | quote }} +- name: PREFETCH_COUNT + value: {{ .Values.worker.sender.prefetch | default "1" | quote }} +{{- end }} + +{{- define "environmental-variables-trap" -}} +- name: WORKER_CONCURRENCY + value: {{ .Values.worker.trap.concurrency | default "2" | quote }} +- name: PREFETCH_COUNT + value: {{ .Values.worker.trap.prefetch | default "1" | quote }} +{{- end }} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/worker/deployment.yaml b/charts/splunk-connect-for-snmp/templates/worker/deployment.yaml deleted file mode 100644 index d006e8c56..000000000 --- a/charts/splunk-connect-for-snmp/templates/worker/deployment.yaml +++ /dev/null @@ -1,204 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "splunk-connect-for-snmp.worker.fullname" . }} - labels: - {{- include "splunk-connect-for-snmp.worker.labels" . | nindent 4 }} -spec: - {{- if not .Values.worker.autoscaling.enabled }} - replicas: {{ .Values.worker.replicaCount }} - {{- end }} - selector: - matchLabels: - {{- include "splunk-connect-for-snmp.worker.selectorLabels" . | nindent 6 }} - template: - metadata: - {{- with .Values.worker.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "splunk-connect-for-snmp.worker.selectorLabels" . | nindent 8 }} - spec: - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "splunk-connect-for-snmp.worker.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.worker.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }}-worker - securityContext: - {{- toYaml .Values.worker.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - args: - [ - "celery", "worker", - ] - env: - - name: CONFIG_PATH - value: /app/config/config.yaml - - name: SC4SNMP_VERSION - value: {{ .Chart.Version | default "0.0.0" }} - - name: WORKER_CONCURRENCY - value: {{ .Values.worker.concurrency | default "2" | quote }} - - name: CELERY_BROKER_URL - value: {{ include "splunk-connect-for-snmp.celery_url" . }} - - name: MONGO_URI - value: {{ include "splunk-connect-for-snmp.mongo_uri" . }} - - name: CELERY_TASK_TIMEOUT - value: {{ .Values.worker.taskTimeout | default "2400" | quote }} - {{- if .Values.worker.ignoreNotIncreasingOid }} - - name: IGNORE_NOT_INCREASING_OIDS - value: {{ join "," .Values.worker.ignoreNotIncreasingOid }} - {{- end}} - {{- if .Values.sim.enabled }} - - name: OTEL_METRICS_URL - value: "http://{{ .Release.Name }}-{{ include "splunk-connect-for-snmp.name" . }}-sim:8882" - {{- end}} - - name: LOG_LEVEL - value: {{ .Values.worker.logLevel | default "INFO" }} - - name: UDP_CONNECTION_TIMEOUT - value: {{ .Values.worker.udpConnectionTimeout | default "3" | quote }} - - name: PROFILES_RELOAD_DELAY - value: {{ .Values.worker.profilesReloadDelay | default "300" | quote }} - - name: MIB_SOURCES - value: "http://{{ printf "%s-%s" .Release.Name "mibserver" }}/asn1/@mib@" - - name: MIB_INDEX - value: "http://{{ printf "%s-%s" .Release.Name "mibserver" }}/index.csv" - - name: MIB_STANDARD - value: "http://{{ printf "%s-%s" .Release.Name "mibserver" }}/standard.txt" - {{- if .Values.splunk.enabled }} - {{- if .Values.splunk.protocol }} - - name: SPLUNK_HEC_SCHEME - value: {{ .Values.splunk.protocol | default "https" | quote }} - {{- end}} - - name: SPLUNK_HEC_HOST - value: {{ .Values.splunk.host | quote }} - - name: IGNORE_EMPTY_VARBINDS - value: {{ .Values.worker.ignoreEmptyVarbinds | default "false" | quote }} - {{- if .Values.splunk.port }} - - name: SPLUNK_HEC_PORT - value: {{ .Values.splunk.port | default "" | quote }} - {{- end}} - {{- if .Values.splunk.path }} - - name: SPLUNK_HEC_PATH - value: {{ .Values.splunk.path | default "/services/collector" | quote }} - {{- end}} - - name: SPLUNK_HEC_INSECURESSL - value: {{ .Values.splunk.insecureSSL | default "false" | quote }} - - name: SPLUNK_HEC_TOKEN - valueFrom: - secretKeyRef: - name: {{ include "splunk-connect-for-snmp.name" . }}-splunk - key: hec_token - {{- if .Values.splunk.eventIndex }} - - name: SPLUNK_HEC_INDEX_EVENTS - value: {{ .Values.splunk.eventIndex | default "netops" }} - {{- end}} - {{- if .Values.splunk.metricsIndex }} - - name: SPLUNK_HEC_INDEX_METRICS - value: {{ .Values.splunk.metricsIndex | default "netmetrics" }} - {{- end}} - {{- end}} - volumeMounts: - - name: config - mountPath: "/app/config" - readOnly: true - - name: pysnmp-cache-volume - mountPath: "/.pysnmp/" - readOnly: false - - name: tmp - mountPath: "/tmp/" - readOnly: false - {{- if (.Values.poller).usernameSecrets }} - - name: snmpv3-secrets - mountPath: "/app/secrets/snmpv3" - readOnly: true - {{- end }} - resources: - {{- toYaml .Values.worker.resources | nindent 12 }} - {{- with .Values.worker.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- if eq .Values.worker.podAntiAffinity "hard" }} - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: "kubernetes.io/hostname" - labelSelector: - matchLabels: - {{- include "splunk-connect-for-snmp.worker.selectorLabels" . | nindent 22 }} - {{- else if eq .Values.worker.podAntiAffinity "soft" }} - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - podAffinityTerm: - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - {{- include "splunk-connect-for-snmp.worker.selectorLabels" . | nindent 22 }} - {{- end }} - {{- with .Values.worker.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} - volumes: - # You set volumes at the Pod level, then mount them into containers inside that Pod - - name: config - configMap: - # Provide the name of the ConfigMap you want to mount. - name: {{ include "splunk-connect-for-snmp.name" . }}-config - # An array of keys from the ConfigMap to create as files - items: - - key: "config.yaml" - path: "config.yaml" - {{- if (.Values.poller).usernameSecrets }} - - name: snmpv3-secrets - projected: - sources: - {{- range .Values.poller.usernameSecrets }} - {{- $secret := (lookup "v1" "Secret" $.Release.Namespace . ) -}} - {{- if $secret }} - - secret: - name: {{ . }} - items: - {{ if $secret.data.userName -}} - - key: userName - path: {{ . }}/userName - {{- end }} - {{ if $secret.data.authKey -}} - - key: authKey - path: {{ . }}/authKey - {{- end }} - {{ if $secret.data.privKey -}} - - key: privKey - path: {{ . }}/privKey - {{- end }} - {{ if $secret.data.authProtocol -}} - - key: authProtocol - path: {{ . }}/authProtocol - {{- end }} - {{ if $secret.data.privProtocol -}} - - key: privProtocol - path: {{ . }}/privProtocol - {{- end }} - {{ if $secret.data.contextEngineId -}} - - key: contextEngineId - path: {{ . }}/contextEngineId - {{- end }} - {{ if $secret.data.contextName -}} - - key: contextName - path: {{ . }}/contextName - {{- end }} - {{- end }} - {{- end }} - {{- end }} - - name: pysnmp-cache-volume - emptyDir: {} - - name: tmp - emptyDir: {} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/worker/hpa.yaml b/charts/splunk-connect-for-snmp/templates/worker/hpa.yaml deleted file mode 100644 index a32769876..000000000 --- a/charts/splunk-connect-for-snmp/templates/worker/hpa.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.worker.autoscaling.enabled }} -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "splunk-connect-for-snmp.worker.fullname" . }} - labels: - {{- include "splunk-connect-for-snmp.worker.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "splunk-connect-for-snmp.worker.fullname" . }} - minReplicas: {{ .Values.worker.autoscaling.minReplicas }} - maxReplicas: {{ .Values.worker.autoscaling.maxReplicas }} - metrics: - {{- if .Values.worker.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - targetAverageUtilization: {{ .Values.worker.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.worker.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - targetAverageUtilization: {{ .Values.worker.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml b/charts/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml new file mode 100644 index 000000000..2f9d73f77 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml @@ -0,0 +1,141 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "splunk-connect-for-snmp.worker.fullname" . }}-poller + labels: + {{- include "splunk-connect-for-snmp.worker.poller.labels" . | nindent 4 }} +spec: + {{- if not .Values.worker.poller.autoscaling.enabled }} + replicas: {{ .Values.worker.poller.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "splunk-connect-for-snmp.worker.poller.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.worker.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "splunk-connect-for-snmp.worker.poller.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "splunk-connect-for-snmp.worker.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.worker.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }}-worker-poller + securityContext: + {{- toYaml .Values.worker.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + [ + "celery", "worker-poller", + ] + env: + {{- include "environmental-variables" . | nindent 12 }} + {{- include "environmental-variables-poller" . | nindent 12 }} + volumeMounts: + - name: config + mountPath: "/app/config" + readOnly: true + - name: pysnmp-cache-volume + mountPath: "/.pysnmp/" + readOnly: false + - name: tmp + mountPath: "/tmp/" + readOnly: false + {{- if (.Values.poller).usernameSecrets }} + - name: snmpv3-secrets + mountPath: "/app/secrets/snmpv3" + readOnly: true + {{- end }} + resources: + {{- toYaml .Values.worker.poller.resources | nindent 12 }} + {{- with .Values.worker.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if eq .Values.worker.podAntiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + {{- include "splunk-connect-for-snmp.worker.poller.selectorLabels" . | nindent 22 }} + {{- else if eq .Values.worker.podAntiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + {{- include "splunk-connect-for-snmp.worker.poller.selectorLabels" . | nindent 22 }} + {{- end }} + {{- with .Values.worker.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: config + configMap: + # Provide the name of the ConfigMap you want to mount. + name: {{ include "splunk-connect-for-snmp.name" . }}-config + # An array of keys from the ConfigMap to create as files + items: + - key: "config.yaml" + path: "config.yaml" + {{- if (.Values.poller).usernameSecrets }} + - name: snmpv3-secrets + projected: + sources: + {{- range .Values.poller.usernameSecrets }} + {{- $secret := (lookup "v1" "Secret" $.Release.Namespace . ) -}} + {{- if $secret }} + - secret: + name: {{ . }} + items: + {{ if $secret.data.userName -}} + - key: userName + path: {{ . }}/userName + {{- end }} + {{ if $secret.data.authKey -}} + - key: authKey + path: {{ . }}/authKey + {{- end }} + {{ if $secret.data.privKey -}} + - key: privKey + path: {{ . }}/privKey + {{- end }} + {{ if $secret.data.authProtocol -}} + - key: authProtocol + path: {{ . }}/authProtocol + {{- end }} + {{ if $secret.data.privProtocol -}} + - key: privProtocol + path: {{ . }}/privProtocol + {{- end }} + {{ if $secret.data.contextEngineId -}} + - key: contextEngineId + path: {{ . }}/contextEngineId + {{- end }} + {{ if $secret.data.contextName -}} + - key: contextName + path: {{ . }}/contextName + {{- end }} + {{- end }} + {{- end }} + {{- end }} + - name: pysnmp-cache-volume + emptyDir: {} + - name: tmp + emptyDir: {} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/worker/poller/hpa.yaml b/charts/splunk-connect-for-snmp/templates/worker/poller/hpa.yaml new file mode 100644 index 000000000..0a130bf20 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/worker/poller/hpa.yaml @@ -0,0 +1,24 @@ +{{- if .Values.worker.poller.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "splunk-connect-for-snmp.worker.fullname" . }}-poller + labels: + {{- include "splunk-connect-for-snmp.worker.poller.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "splunk-connect-for-snmp.worker.fullname" . }}-poller + minReplicas: {{ .Values.worker.poller.autoscaling.minReplicas }} + maxReplicas: {{ .Values.worker.poller.autoscaling.maxReplicas }} + metrics: + {{- if .Values.worker.poller.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.worker.poller.autoscaling.targetCPUUtilizationPercentage | default 80 }} + {{- end }} +{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/charts/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml new file mode 100644 index 000000000..4cd579326 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml @@ -0,0 +1,142 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "splunk-connect-for-snmp.worker.fullname" . }}-sender + labels: + {{- include "splunk-connect-for-snmp.worker.sender.labels" . | nindent 4 }} +spec: + {{- if not .Values.worker.sender.autoscaling.enabled }} + replicas: {{ .Values.worker.sender.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "splunk-connect-for-snmp.worker.sender.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.worker.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "splunk-connect-for-snmp.worker.sender.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "splunk-connect-for-snmp.worker.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.worker.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }}-worker-sender + securityContext: + {{- toYaml .Values.worker.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + [ + "celery", "worker-sender", + ] + env: + {{- include "environmental-variables" . | nindent 12 }} + {{- include "environmental-variables-sender" . | nindent 12 }} + volumeMounts: + - name: config + mountPath: "/app/config" + readOnly: true + - name: pysnmp-cache-volume + mountPath: "/.pysnmp/" + readOnly: false + - name: tmp + mountPath: "/tmp/" + readOnly: false + {{- if (.Values.poller).usernameSecrets }} + - name: snmpv3-secrets + mountPath: "/app/secrets/snmpv3" + readOnly: true + {{- end }} + resources: + {{- toYaml .Values.worker.sender.resources | nindent 12 }} + {{- with .Values.worker.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if eq .Values.worker.podAntiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + {{- include "splunk-connect-for-snmp.worker.sender.selectorLabels" . | nindent 22 }} + {{- else if eq .Values.worker.podAntiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + {{- include "splunk-connect-for-snmp.worker.sender.selectorLabels" . | nindent 22 }} + {{- end }} + {{- with .Values.worker.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: config + configMap: + # Provide the name of the ConfigMap you want to mount. + name: {{ include "splunk-connect-for-snmp.name" . }}-config + # An array of keys from the ConfigMap to create as files + items: + - key: "config.yaml" + path: "config.yaml" + {{- if (.Values.poller).usernameSecrets }} + - name: snmpv3-secrets + projected: + sources: + {{- range .Values.poller.usernameSecrets }} + {{- $secret := (lookup "v1" "Secret" $.Release.Namespace . ) -}} + {{- if $secret }} + - secret: + name: {{ . }} + items: + {{ if $secret.data.userName -}} + - key: userName + path: {{ . }}/userName + {{- end }} + {{ if $secret.data.authKey -}} + - key: authKey + path: {{ . }}/authKey + {{- end }} + {{ if $secret.data.privKey -}} + - key: privKey + path: {{ . }}/privKey + {{- end }} + {{ if $secret.data.authProtocol -}} + - key: authProtocol + path: {{ . }}/authProtocol + {{- end }} + {{ if $secret.data.privProtocol -}} + - key: privProtocol + path: {{ . }}/privProtocol + {{- end }} + {{ if $secret.data.contextEngineId -}} + - key: contextEngineId + path: {{ . }}/contextEngineId + {{- end }} + {{ if $secret.data.contextName -}} + - key: contextName + path: {{ . }}/contextName + {{- end }} + {{- end }} + {{- end }} + {{- end }} + - name: pysnmp-cache-volume + emptyDir: {} + - name: tmp + emptyDir: {} + diff --git a/charts/splunk-connect-for-snmp/templates/worker/sender/hpa.yaml b/charts/splunk-connect-for-snmp/templates/worker/sender/hpa.yaml new file mode 100644 index 000000000..91230c417 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/worker/sender/hpa.yaml @@ -0,0 +1,24 @@ +{{- if .Values.worker.sender.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "splunk-connect-for-snmp.worker.fullname" . }}-sender + labels: + {{- include "splunk-connect-for-snmp.worker.sender.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "splunk-connect-for-snmp.worker.fullname" . }}-sender + minReplicas: {{ .Values.worker.sender.autoscaling.minReplicas }} + maxReplicas: {{ .Values.worker.sender.autoscaling.maxReplicas }} + metrics: + {{- if .Values.worker.sender.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.worker.sender.autoscaling.targetCPUUtilizationPercentage | default 80 }} + {{- end }} +{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/charts/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml new file mode 100644 index 000000000..6d81974ba --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -0,0 +1,141 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "splunk-connect-for-snmp.worker.fullname" . }}-trap + labels: + {{- include "splunk-connect-for-snmp.worker.trap.labels" . | nindent 4 }} +spec: + {{- if not .Values.worker.trap.autoscaling.enabled }} + replicas: {{ .Values.worker.trap.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "splunk-connect-for-snmp.worker.trap.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.worker.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "splunk-connect-for-snmp.worker.trap.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "splunk-connect-for-snmp.worker.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.worker.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }}-worker-trap + securityContext: + {{- toYaml .Values.worker.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + [ + "celery", "worker-trap", + ] + env: + {{- include "environmental-variables" . | nindent 12 }} + {{- include "environmental-variables-trap" . | nindent 12 }} + volumeMounts: + - name: config + mountPath: "/app/config" + readOnly: true + - name: pysnmp-cache-volume + mountPath: "/.pysnmp/" + readOnly: false + - name: tmp + mountPath: "/tmp/" + readOnly: false + {{- if (.Values.poller).usernameSecrets }} + - name: snmpv3-secrets + mountPath: "/app/secrets/snmpv3" + readOnly: true + {{- end }} + resources: + {{- toYaml .Values.worker.trap.resources | nindent 12 }} + {{- with .Values.worker.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if eq .Values.worker.podAntiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + {{- include "splunk-connect-for-snmp.worker.trap.selectorLabels" . | nindent 22 }} + {{- else if eq .Values.worker.podAntiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + {{- include "splunk-connect-for-snmp.worker.trap.selectorLabels" . | nindent 22 }} + {{- end }} + {{- with .Values.worker.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: config + configMap: + # Provide the name of the ConfigMap you want to mount. + name: {{ include "splunk-connect-for-snmp.name" . }}-config + # An array of keys from the ConfigMap to create as files + items: + - key: "config.yaml" + path: "config.yaml" + {{- if (.Values.poller).usernameSecrets }} + - name: snmpv3-secrets + projected: + sources: + {{- range .Values.poller.usernameSecrets }} + {{- $secret := (lookup "v1" "Secret" $.Release.Namespace . ) -}} + {{- if $secret }} + - secret: + name: {{ . }} + items: + {{ if $secret.data.userName -}} + - key: userName + path: {{ . }}/userName + {{- end }} + {{ if $secret.data.authKey -}} + - key: authKey + path: {{ . }}/authKey + {{- end }} + {{ if $secret.data.privKey -}} + - key: privKey + path: {{ . }}/privKey + {{- end }} + {{ if $secret.data.authProtocol -}} + - key: authProtocol + path: {{ . }}/authProtocol + {{- end }} + {{ if $secret.data.privProtocol -}} + - key: privProtocol + path: {{ . }}/privProtocol + {{- end }} + {{ if $secret.data.contextEngineId -}} + - key: contextEngineId + path: {{ . }}/contextEngineId + {{- end }} + {{ if $secret.data.contextName -}} + - key: contextName + path: {{ . }}/contextName + {{- end }} + {{- end }} + {{- end }} + {{- end }} + - name: pysnmp-cache-volume + emptyDir: {} + - name: tmp + emptyDir: {} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/worker/trap/hpa.yaml b/charts/splunk-connect-for-snmp/templates/worker/trap/hpa.yaml new file mode 100644 index 000000000..ae225915a --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/worker/trap/hpa.yaml @@ -0,0 +1,24 @@ +{{- if .Values.worker.trap.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "splunk-connect-for-snmp.worker.fullname" . }}-trap + labels: + {{- include "splunk-connect-for-snmp.worker.trap.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "splunk-connect-for-snmp.worker.fullname" . }}-trap + minReplicas: {{ .Values.worker.trap.autoscaling.minReplicas }} + maxReplicas: {{ .Values.worker.trap.autoscaling.maxReplicas }} + metrics: + {{- if .Values.worker.trap.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.worker.trap.autoscaling.targetCPUUtilizationPercentage | default 80 }} + {{- end }} +{{- end }} diff --git a/charts/splunk-connect-for-snmp/values.yaml b/charts/splunk-connect-for-snmp/values.yaml index a2b1e4461..34aa3f3b7 100644 --- a/charts/splunk-connect-for-snmp/values.yaml +++ b/charts/splunk-connect-for-snmp/values.yaml @@ -73,12 +73,12 @@ scheduler: runAsGroup: 10001 resources: {} - # limits: - # cpu: 1 - # memory: 1Gi - # requests: - # cpu: 100m - # memory: 256Mi +# limits: +# cpu: 800m +# memory: 512Mi +# requests: +# cpu: 500m +# memory: 256Mi autoscaling: enabled: false @@ -94,10 +94,51 @@ scheduler: podAntiAffinity: soft worker: - ignoreNotIncreasingOid: [] - replicaCount: 2 - concurrency: 4 taskTimeout: 2400 + walkRetryMaxInterval: 600 + ignoreNotIncreasingOid: [] + poller: + replicaCount: 2 + concurrency: 4 + prefetch: 1 + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 10 + targetCPUUtilizationPercentage: 80 + resources: + limits: + cpu: 500m + requests: + cpu: 250m + trap: + replicaCount: 2 + concurrency: 4 + prefetch: 30 + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 10 + targetCPUUtilizationPercentage: 80 + resources: + limits: + cpu: 500m + requests: + cpu: 250m + sender: + replicaCount: 1 + concurrency: 4 + prefetch: 30 + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 10 + targetCPUUtilizationPercentage: 80 + resources: + limits: + cpu: 500m + requests: + cpu: 250m nameOverride: "" fullnameOverride: "" @@ -124,13 +165,13 @@ worker: runAsUser: 10001 runAsGroup: 10001 - resources: {} - # limits: - # cpu: 2 - # memory: 512Mi - # requests: - # cpu: 200m - # memory: 256Mi + resources: + limits: + cpu: 500m + memory: 5128Mi + requests: + cpu: 300m + memory: 256Mi autoscaling: enabled: false @@ -330,78 +371,7 @@ mongodb: annotations: prometheus.io/scrape: "true" prometheus.io/port: "9216" -rabbitmq: - extraConfiguration: | - consumer_timeout = 7200000 - replicaCount: 1 - pdb: - create: true - - podManagementPolicy: Parallel - ## @section Common parameters - - rbac: - create: true +redis: + architecture: standalone auth: - username: sc4snmp - password: password - erlangCookie: cookie - - # ## RabbitMQ containers' resource requests and limits - # ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - # ## We usually recommend not to specify default resources and to leave this as a conscious - # ## choice for the user. This also increases chances charts run on environments with little - # ## resources, such as Minikube. If you do want to specify resources, uncomment the following - # ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # ## @param resources.limits The resources limits for RabbitMQ containers - # ## @param resources.requests The requested resources for RabbitMQ containers - # ## - resources: {} - # limits: - # cpu: 1000m - # memory: 2Gi - # requests: - # cpu: 250m - # memory: 1Gi - - ## @section Persistence parameters - - persistence: - ## @param persistence.enabled Enable RabbitMQ data persistence using PVC - ## - enabled: true - - # ## @param persistence.storageClass PVC Storage Class for RabbitMQ data volume - # ## If defined, storageClassName: - # ## If set to "-", storageClassName: "", which disables dynamic provisioning - # ## If undefined (the default) or set to null, no storageClassName spec is - # ## set, choosing the default provisioner. (gp2 on AWS, standard on - # ## GKE, AWS & OpenStack) - # ## - # storageClass: "" - # ## @param persistence.selector Selector to match an existing Persistent Volume - # ## selector: - # ## matchLabels: - # ## app: my-app - # ## - # selector: {} - # ## @param persistence.accessMode PVC Access Mode for RabbitMQ data volume - # ## - # accessMode: ReadWriteOnce - - # ## @param persistence.existingClaim Provide an existing PersistentVolumeClaims - # ## The value is evaluated as a template - # ## So, for example, the name can depend on .Release or .Chart - # ## - # existingClaim: "" - - ## @param persistence.size PVC Storage Request for RabbitMQ data volume - ## If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well - ## - size: 4Gi - - # ## @param persistence.volumes Additional volumes without creating PVC - # ## - name: volume_name - # ## emptyDir: {} - # ## - # volumes: [] + enabled: false \ No newline at end of file diff --git a/create_packages.sh b/create_packages.sh new file mode 100755 index 000000000..149236c90 --- /dev/null +++ b/create_packages.sh @@ -0,0 +1,235 @@ +#!/bin/bash + +export DOCKER_DEFAULT_PLATFORM=linux/amd64 +combine_image_name(){ + #Function to combine registry, repository and tag + # into one image, so that it can be pulled by docker + + image_registry=$1 + image_repository=$2 + image_tag=$3 + app_version=$4 + + if [ -n "$image_registry" ]; + then + result="$result""$image_registry/" + fi + + if [ -n "$image_repository" ]; + then + result="$result""$image_repository" + if [ -n "$image_tag" ]; + then + result="$result":"$image_tag" + elif [ -n "$app_version" ]; + then + result="$result":"$app_version" + fi + echo "$result" + else + echo "" + fi +} + +images_to_pack="" +pull_image(){ + #Function to pull image required for specified chart + + chart_dir="$1" + if [ -d "$chart_dir" ] && { [ -a "$chart_dir/Chart.yaml" ] || [ -a "$chart_dir/Chart.yml" ]; } && { [ -a "$chart_dir/values.yaml" ] || [ -a "$chart_dir/values.yml" ]; } + then + if [ -a "$chart_dir/Chart.yaml" ] + then + chart_file="$chart_dir/Chart.yaml" + else + chart_file="$chart_dir/Chart.yml" + fi + + if [ -a "$chart_dir/values.yaml" ] + then + values_file="$chart_dir/values.yaml" + else + values_file="$chart_dir/values.yml" + fi + + #Get all the lines with information about docker image from values.yaml + docker_info=$(sed -nE '/^image:/,/^[a-zA-Z#]/p' "$values_file") + + #Get appVersion from Chart.yaml in case there is no tag specified in values.yaml + app_version=$(grep -Eo 'appVersion:\s\S+$' "$chart_file" | cut -d : -f2 | xargs) + + image_registry=$(grep -Eo 'registry:\s\S+$' <<< "$docker_info" | cut -d : -f2 | xargs) + image_repository=$(grep -Eo 'repository:\s\S+$' <<< "$docker_info" | cut -d : -f2 | xargs) + image_tag=$(grep -Eo 'tag:\s\S+$' <<< "$docker_info" | cut -d : -f2 | xargs) + + docker_pull_image="" + docker_pull_image=$(combine_image_name "$image_registry" "$image_repository" "$image_tag" "$app_version") + + if [ -z "$docker_pull_image" ] + then + echo "No image to pull" + exit 0 + fi + + echo "Pulling: $docker_pull_image" + docker pull "$docker_pull_image" + images_to_pack="$images_to_pack""$docker_pull_image " + + #For mongodb we need one more image from values.yaml + if [[ "$chart_dir" == *"mongodb"* ]] + then + docker_pull_image="" + metrics_info=$(sed -nE '/^metrics:/,/^[a-zA-Z#]/p' "$values_file") + docker_info=$(sed -nE '/image:/,/[#]/p' <<< "$metrics_info") + + image_registry=$(grep -Eo 'registry:\s\S+$' <<< "$docker_info" | cut -d : -f2 | xargs) + image_repository=$(grep -Eo 'repository:\s\S+$' <<< "$docker_info" | cut -d : -f2 | xargs) + image_tag=$(grep -Eo 'tag:\s\S+$' <<< "$docker_info" | cut -d : -f2 | xargs) + + docker_pull_image=$(combine_image_name "$image_registry" "$image_repository" "$image_tag" "$app_version") + + printf "\n" + if [ -z "$docker_pull_image" ] + then + echo "No image to pull" + exit 0 + fi + echo "Pulling: ""$docker_pull_image" + docker pull "$docker_pull_image" + images_to_pack="$images_to_pack""$docker_pull_image " + + docker_pull_image="" + volumePermissions_info=$(sed -nE '/^volumePermissions:/,/^[a-zA-Z#]/p' "$values_file") + docker_info=$(sed -nE '/image:/,/[#]/p' <<< "$volumePermissions_info") + + image_registry=$(grep -Eo 'registry:\s\S+$' <<< "$docker_info" | cut -d : -f2 | xargs) + image_repository=$(grep -Eo 'repository:\s\S+$' <<< "$docker_info" | cut -d : -f2 | xargs) + image_tag=$(grep -Eo 'tag:\s\S+$' <<< "$docker_info" | cut -d : -f2 | xargs) + + docker_pull_image=$(combine_image_name "$image_registry" "$image_repository" "$image_tag" "$app_version") + + printf "\n" + if [ -z "$docker_pull_image" ] + then + echo "No image to pull" + exit 0 + fi + echo "Pulling: ""$docker_pull_image" + docker pull "$docker_pull_image" + images_to_pack="$images_to_pack""$docker_pull_image " + fi + printf "\n\n" + else + echo "Invalid directory" + exit 0 + fi +} + + +helm repo add bitnami https://charts.bitnami.com/bitnami +helm repo add pysnmp-mibs https://pysnmp.github.io/mibs/charts +helm dependency build charts/splunk-connect-for-snmp +helm package charts/splunk-connect-for-snmp -d /tmp/package +cd /tmp/package || exit +SPLUNK_FILE=$(ls) +tar -xvf "$SPLUNK_FILE" + +DIRS=$(ls) +SPLUNK_DIR="" +for d in $DIRS +do + #Find a directory name to the unpacked splunk chart + if [[ "$d" =~ splunk.* ]] && [[ ! "$d" =~ .+\.tgz$ ]] && [[ ! "$d" =~ .+\.tar$ ]] + then + SPLUNK_DIR="$d" + fi +done +if [ -z "$SPLUNK_DIR" ] +then + exit +fi +cd "$SPLUNK_DIR" || exit + +rm -rf charts +mkdir charts +helm dep update +cd charts || exit + +#Unpack charts and delete .tgz files +FILES=$(ls) +for f in $FILES +do + tar -xvf "$f" + rm "$f" +done + +#Pull images from charts +DIRS=$(ls) +for d in $DIRS +do + pull_image "$d" +done + +mkdir /tmp/package/packages +docker save $images_to_pack > /tmp/package/packages/dependencies-images.tar +cd ../.. +tar -czvf packages/splunk-connect-for-snmp-chart.tar splunk-connect-for-snmp + +# Download and pack image for sim +cd "$SPLUNK_DIR" || exit + +# Check if there is a value for sim docker image in values.yaml +# If not, get default image from templates/sim/deployment.yaml + +if [ -f "values.yaml" ] +then + values_file="values.yaml" +else + values_file="values.yml" +fi + +sim_info=$(sed -nE '/^sim:/,/^[a-zA-Z#]/p' "$values_file") +docker_link=$(grep -oE 'image:.+' <<< "$sim_info" | cut -d : -f2 | xargs) +docker_tag=$(grep -oE 'tag:.+' <<< "$sim_info" | cut -d : -f2 | xargs) + + +if [ -z "$docker_link" ] +then + cd templates/sim || exit + if [ -f "deployment.yaml" ] + then + depl_file="deployment.yaml" + else + depl_file="deployment.yml" + fi + docker_info=$(grep image: "$depl_file") + + docker_link=$(cut -d : -f2 <<< "$docker_info" | grep -oE '".+"') + docker_link="${docker_link#?}" + docker_link="${docker_link%?}" + + docker_tag=$(cut -d : -f3 <<< "$docker_info" | grep -oE '".+"') + docker_tag="${docker_tag#?}" + docker_tag="${docker_tag%?}" +fi + +if [ -z "$docker_tag" ] +then + docker_image_pull="$docker_link" +else + docker_image_pull="$docker_link:$docker_tag" +fi + +docker pull "$docker_image_pull" +docker save $docker_image_pull > /tmp/package/packages/sim_image.tar + +# Download and package otel charts +cd /tmp/package/packages/ || exit +LOCATION=$(curl -s https://api.github.com/repos/signalfx/splunk-otel-collector-chart/releases/latest | grep "zipball_url" | awk '{ print $2 }' | sed 's/,$//' | sed 's/"//g' ) +curl -L -o otel-repo.zip $LOCATION +unzip otel-repo.zip +rm otel-repo.zip +OTEL_DIR=$(ls | grep -E "signalfx-splunk.+") +CHART_DIT="$OTEL_DIR/helm-charts/splunk-otel-collector" +helm package $CHART_DIT -d /tmp/package/packages/ +rm -rf $OTEL_DIR diff --git a/docs/bestpractices.md b/docs/bestpractices.md index b4a72a91d..a8b78278b 100644 --- a/docs/bestpractices.md +++ b/docs/bestpractices.md @@ -56,27 +56,7 @@ worker: If you put only IP address (ex. `127.0.0.1`), then errors will be ignored for all of its devices (like `127.0.0.1:161`, `127.0.0.1:163`...). If you put IP address and host structured as `{host}:{port}` that means the error will be ignored only for this device. -### Walking a device takes much time -If you would like to limit the scope of the walk, you should set one of the profiles in the inventory to point to the profile definition of type `walk` -```yaml -scheduler: - profiles: | - small_walk: - condition: - type: "walk" - varBinds: - - ['UDP-MIB'] -``` -Such profile should be placed in the profiles section of inventory definition. It will be executed with the frequency defined in walk_interval. -In case of multiple profiles of type `walk` will be placed in profiles, the last one will be used. +### Walking a device takes too much time -```yaml -poller: - inventory: | - address,port,version,community,secret,securityEngine,walk_interval,profiles,SmartProfiles,delete - 10.202.4.202,,2c,public,,,2000,small_walk,, -``` +Enable small walk functionality with the following instruction: [Configure small walk profile](../configuration/configuring-profiles/#walk-profile). -NOTE: When small walk is configured, you can set up polling only of OIDs belonging to walk profile varBinds. -Additionally, there are two MIB families that are enabled by default (we need them to create state of the device in the database and poll base profiles): `IF-MIB` and `SNMPv2-MIB`. -For example, if you've decided to use `small_walk` from the example above, you'll be able to poll only `UDP-MIB`, `IF-MIB` and `SNMPv2-MIB` OIDs. diff --git a/docs/configuration/configuring-profiles.md b/docs/configuration/configuring-profiles.md new file mode 100644 index 000000000..695d99df4 --- /dev/null +++ b/docs/configuration/configuring-profiles.md @@ -0,0 +1,230 @@ +# Configuring profiles + +Profiles are the units, where you can configure what you want to poll and then assign them to the device. The definition of profile can be found in `values.yaml` file +under the `scheduler` section. + +Here is the instruction of how to use profiles: [Update Inventory and Profile](../deployment-configuration/#update-inventory-and-profile). + +There are two types of profiles in general: + +1. Static profile - polling starts when profile is added to `profiles` field in `inventory` of the device +2. Smart profile - polling starts when configured conditions are fulfilled, and the device to poll from has `smart_profiles` enabled in inventory. +Smart profiles are useful when we have many devices of certain kind, and we don't want to configure all of them "one by one" with static profiles. + + In order to configure smart profile do the following: + + 1. Choose one of the fields polled from device, most commonly sysDescr is being used + 2. Set the filter to match all the devices of this kind + 3. Setup polling of the profile by enabling smart profiles for devices you want this profile to be polled + +The template of the profile looks like following: + +```yaml +scheduler: + profiles: | + #Name of profile + basev1: + # Define frequency for profile + frequency: 10 + #Define condition + condition: + # Define type of condition. Allowed value field, base and walk + type: field + field: "SNMPv2-MIB.sysDescr" + # Define paterns + patterns: + - '.*STRING_TO_BE_MATCHED.*' + #Define varbinds to query + varBinds: + # Syntax: [ "MIB-Component", "MIB object name"[Optional], "MIB index number"[Optional]] + - ['SNMPv2-MIB'] + - ['SNMPv2-MIB', 'sysName'] + - ['SNMPv2-MIB', 'sysUpTime',0] +``` + +For example, we have configured two profiles. One is smart, the other one is static: + +```yaml +scheduler: + profiles: | + smart_profile: + frequency: 10 + condition: + type: field + field: "SNMPv2-MIB.sysDescr" + patterns: + - '.*linux.*' + varBinds: + - ['SNMPv2-MIB'] + - ['SNMPv2-MIB', 'sysName'] + - ['SNMPv2-MIB', 'sysUpTime',0] + static_profile: + frequency: 300 + varBinds: + - ['IP-MIB'] +``` + +If we want to enable only `static_profile` polling for host `10.202.4.202`, we will configure inventory like that: + +```yaml +poller: + inventory: | + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 10.202.4.202,,2c,public,,,2000,static_profile,f, +``` + +If we want to enable checking `10.202.4.202` device against smart profiles, we need to set `smart_profiles` to `t`: + +```yaml +poller: + inventory: | + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 10.202.4.202,,2c,public,,,2000,,t, +``` + +Then if the device `sysDescr` match `'.*linux.*'` filter, `smart_profile` profile will be polled. + + +## varBinds configuration +`varBinds` short for "variable binding" in SNMP. The combination of an Object Identifier (OID) and a value. +`varBinds` are used for defining what OIDs should be requested from SNMP Agents. `varBinds` is a required +subsection of each profile. Syntax configuration of `varBinds` looks following: + + [ "MIB-Component", "MIB object"[Optional], "MIB index number"[Optional]] + + - `MIB-Component` - The SNMP MIB, itself, consists of distinct component MIBs, each of which refers to a specific + defined collection of management information that is part of the overall SNMP MIB eg. `SNMPv2-MIB`. + If only `MIB-Component` is set then SC4SNMP will get whole subtree. + - `MIB object` - The SNMP MIB stores only simple data types: scalars and two-dimensional arrays of scalars, + called tables. Keywords SYNTAX, ACCESS, and DESCRIPTION as well as other keywords such as STATUS and + INDEX is used to define the SNMP MIB managed objects. + - `MIB index number` - Define index number for given MIB Object eg. `0`. + +Example: +```yaml + varBinds: + # Syntax: [ "MIB-Component", "MIB object name"[Optional], "MIB index number"[Optional]] + - ['SNMPv2-MIB'] + - ['SNMPv2-MIB', 'sysName'] + - ['SNMPv2-MIB', 'sysUpTime',0] +``` + +## Static Profile configuration +Static Profile are used when they are defined on a list of profiles in inventory configuration in `poller` +service [Inventory configuration](../poller-configuration/#configure-inventory). Static Profiles are executed +even if the SmartProfile flag in inventory is set to false. +To configure Static Profile following value needs to be set in `profiles` section: + + - `ProfileName` - define as subsection key in `profiles`. + - `frequency` - define interval between executing SNMP gets in second. + - `varBinds` - define var binds to query. + +Example: +```yaml +scheduler: + profiles: | + static_profile_example: + frequency: 20 + varBinds: + - ['SNMPv2-MIB'] + - ['SNMPv2-MIB', 'sysName'] + - ['SNMPv2-MIB', 'sysUpTime',0] +``` + +### Particular kinds of static profiles + +Sometimes static profiles have additional functionalities, to be used in some special scenarios. + +#### WALK profile + +If you would like to limit the scope of the walk, you should set one of the profiles in the inventory to point to the profile definition of type `walk` +```yaml +scheduler: + profiles: | + small_walk: + condition: + type: "walk" + varBinds: + - ['UDP-MIB'] +``` +Such profile should be placed in the profiles section of inventory definition. It will be executed with the frequency defined in `walk_interval`. +In case of multiple profiles of type `walk` will be placed in profiles, the last one will be used. + +This is how to use `walk` profiles: + +```yaml +poller: + inventory: | + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 10.202.4.202,,2c,public,,,2000,small_walk,, +``` + +NOTE: When small walk is configured, you can set up polling only of OIDs belonging to walk profile varBinds. +Additionally, there are two MIB families that are enabled by default (we need them to create state of the device in the database and poll base profiles): `IF-MIB` and `SNMPv2-MIB`. +For example, if you've decided to use `small_walk` from the example above, you'll be able to poll only `UDP-MIB`, `IF-MIB` and `SNMPv2-MIB` OIDs. + + +## SmartProfile configuration +SmartProfile is executed when the SmartProfile flag in inventory is set to true and the condition defined in profile match. +More information about configuring inventory can be found in [Inventory configuration](../poller-configuration/#configure-inventory) + +To configure Smart Profile following value need to be set in `profiles` section: + + - `ProfileName` - define as subsection key in `profiles`. + - `frequency` - define an interval between executing SNMP's gets in second. + - `condition` - section define conditions to match profile + - `type` - key of `condition` section which defines type of condition. Allowed value `base` and `field` (`walk` type is also allowed here, but it's not part of smart profiles) + - `base` type of condition will be executed when `SmartProfile` in inventory is set to true. + - `field` type of condition will be executed if match `pattern` for defined `field`. Supported fields: + - "SNMPv2-MIB.sysDescr" + - "SNMPv2-MIB.sysObjectID" + - `field` Define field name for condition type field. + - `pattern` Define list of regular expression patterns for MIB object field defined in `field` section. For example: + - ".*linux.*" + - `varBinds` - define var binds to query. + +Example of `base` type profile +```yaml +scheduler: + profiles: | + SmartProfile_base_example: + frequency: 10 + condition: + type: "base" + varBinds: + - ['SNMPv2-MIB'] + - ['SNMPv2-MIB', 'sysName'] +``` + +Example of `field` type profile, also called an automatic profile +```yaml +scheduler: + profiles: | + SmartProfile_field_example: + frequency: 10 + condition: + type: "field" + field: "SNMPv2-MIB.sysDescr" + patterns: + - '.*STRING_TO_BE_MATCHED.*' + varBinds: + - ['SNMPv2-MIB'] + - ['SNMPv2-MIB', 'sysName'] +``` + +NOTE: Be aware that profile changes may not be reflected immediately. It can take up to 1 minute for changes to propagate. In case you changed frequency, or a profile type, the change will be reflected only after the next walk. +There is also 5 minute TTL for an inventory pod. Basically, SC4SNMP allows one inventory upgrade and then block updates for the next 5 minutes + +## Custom translations +If the user wants to use custom names/translations of MIB names, it can be configured under customTranslations section under scheduler config. +Translations are grouped by MIB family. In the example below IF-MIB.ifInDiscards will be translated to IF-MIB.myCustomName1 +```yaml +scheduler: + customTranslations: + IF-MIB: + ifInDiscards: myCustomName1 + ifOutErrors: myCustomName2 + SNMPv2-MIB: + sysDescr: myCustomName3 +``` + diff --git a/docs/configuration/deployment-configuration.md b/docs/configuration/deployment-configuration.md index 068757bf4..f73fa1594 100644 --- a/docs/configuration/deployment-configuration.md +++ b/docs/configuration/deployment-configuration.md @@ -14,7 +14,7 @@ The whole file is divided into the following components: 3. sim - more detail [sim configuration](sim-configuration.md) 4. traps - more detail [trap configuration](trap-configuration.md) 5. mongodb - more detail [mongo configuration](mongo-configuration.md) -6. rabbitmq - more detail [rabbitmq configuration](rabbitmq-configuration.md) +6. redis - more detail [redis configuration](redis-configuration.md) ### Shared values All of the components have the `resources` field for adjusting memory resources: diff --git a/docs/configuration/poller-configuration.md b/docs/configuration/poller-configuration.md index cfc24dad0..d72350ac0 100644 --- a/docs/configuration/poller-configuration.md +++ b/docs/configuration/poller-configuration.md @@ -16,7 +16,7 @@ Poller example configuration: poller: logLevel: "WARN" inventory: | - address,port,version,community,secret,securityEngine,walk_interval,profiles,SmartProfiles,delete + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete 10.202.4.202,,2c,public,,,2000,,, ``` @@ -37,11 +37,11 @@ To update inventory follow instruction: [Update Inventory and Profile](../deploy - `secret` [OPTIONAL] - usernameSecrets define which secrets in "Secret" objects in k8s should be use, as a value it need to put name of "Secret" objects. Field is required when `version` is `3`. More information how to define "Secrets" object for SNMPv3 can be found in [SNMPv3 Configuration](snmpv3-configuration.md) - - `securityEngine` [OPTIONAL] - Security engine required by SNMPv3. Field is required when `version` is `3`. - - `walk_interval` [OPTIONAL] - Define interval in second for SNMP walk, default value `42000` + - `security_engine` [OPTIONAL] - Security engine required by SNMPv3. Field is required when `version` is `3`. + - `walk_interval` [OPTIONAL] - Define interval in seconds for SNMP walk, default value `42000`. This value needs to be between `1800` and `42000` - `profiles` [OPTIONAL] - list of SNMP profiles which need to be used for device. More than one profile can be added by semicolon -separation eg. `profiale1;profile2`. More about profile in [Profile Configuration](../scheduler-configuration/#configure-profile) - - `SmartProfiles` [OPTIONAL] - enabled SmartProfile, default value true. Allowed value: `true`, `false`. Default value is `true` +separation eg. `profile1;profile2`. More about profiles in [Profile Configuration](../scheduler-configuration/#configure-profile) + - `smart_profiles` [OPTIONAL] - enabled smart profiles, by default it's true. Allowed value: `true`, `false`. Default value is `true` - `delete` [OPTIONAL] - flags which define if inventory should be deleted from scheduled tasks for walk and gets. Allowed value: `true`, `false`. Default value is `false`. @@ -49,7 +49,7 @@ Example: ```yaml poller: inventory: | - address,port,version,community,secret,securityEngine,walk_interval,profiles,SmartProfiles,delete + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete 10.202.4.202,,2c,public,,,2000,,, ``` diff --git a/docs/configuration/rabbitmq-configuration.md b/docs/configuration/rabbitmq-configuration.md deleted file mode 100644 index d8da863e4..000000000 --- a/docs/configuration/rabbitmq-configuration.md +++ /dev/null @@ -1,32 +0,0 @@ -#RabbitMQ configuration -RabbitMQ is a service with is used as a queue service for SC4SNMP. It is queuing tasks like SNMP Walk and GETs. - -### RabbitMQ configuration file - -RabbitMQ configuration is kept in `values.yaml` file in section `rabbitmq`. -`values.yaml` is being used during the installation process for configuring Kubernetes values. - -Example: -```yaml -rabbitmq: - pdb: - create: true - #For HA configuration at least three replicas should be used - replicaCount: 1 - persistence: - enabled: true - storageClass: "microk8s-hostpath" - volumePermissions: - enabled: true - #The following requests and limits are appropriate starting points - #For productions deployments - resources: - limits: - cpu: 2 - memory: 2Gi - requests: - cpu: 750m - memory: 512Mi -``` - -The recommendation is to do not to change this setting. In case of need to change it please follow documentation: [RabbitMQ on Kubernetes](https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq/) diff --git a/docs/configuration/redis-configuration.md b/docs/configuration/redis-configuration.md new file mode 100644 index 000000000..e05570d1d --- /dev/null +++ b/docs/configuration/redis-configuration.md @@ -0,0 +1,12 @@ +#Redis configuration + +Recently, RabbitMQ was replaced with Redis as a queue service and periodic task database. The reason for that was to increase SC4SNMP performance and protect against bottlenecks. + +Redis is a service with is used for both managing periodic tasks and as a queue service for SC4SNMP. It is queuing tasks like SNMP Walk and Poll. + +### Redis configuration file + +Redis configuration is kept in `values.yaml` file in section `redis`. +`values.yaml` is being used during the installation process for configuring Kubernetes values. + +In case of need to change it please follow documentation: [Redis on Kubernetes](https://github.com/bitnami/charts/tree/master/bitnami/redis) diff --git a/docs/configuration/scheduler-configuration.md b/docs/configuration/scheduler-configuration.md index 8fea4dd6f..acde92c5e 100644 --- a/docs/configuration/scheduler-configuration.md +++ b/docs/configuration/scheduler-configuration.md @@ -25,7 +25,7 @@ scheduler: ``` ### Define log level -Log level for trap can be set by changing the value for key `logLevel`. Allowed values are: `DEBUG`, `INFO`, `WARNING`, `ERROR`. +Log level for scheduler can be set by changing the value for key `logLevel`. Allowed values are: `DEBUG`, `INFO`, `WARNING`, `ERROR`. The default value is `WARNING` ### Define resource requests and limits @@ -41,140 +41,3 @@ scheduler: cpu: 200m memory: 128Mi ``` -### Configure profile -To update profile follow instruction: [Update Inventory and Profile](../deployment-configuration/#update-inventory-and-profile). -Profiles used in inventory can be created in `values.yaml`, which can be modified in scheduler config in -`values.yaml`, example: -```yaml -scheduler: - profiles: | - #Name of profile - basev1: - # Define frequency for profile - frequency: 10 - #Define condition - condition: - # Define type of condition. Allowed value field and base - type: field - field: "SNMPv2-MIB.sysDescr" - # Define paterns - patterns: - - '.*STRING_TO_BE_MATCHED.*' - #Define varbinds to query - varBinds: - # Syntax: [ "MIB-Component", "MIB object name"[Optional], "MIB index number"[Optional]] - - ['SNMPv2-MIB'] - - ['SNMPv2-MIB', 'sysName'] - - ['SNMPv2-MIB', 'sysUpTime',0] -``` - -#### varBinds configuration -`varBinds` short for "variable binding" in SNMP. The combination of an Object Identifier (OID) and a value. -`varBinds` are used for defining in profiles what OIDs should be getting from SNMP Agents. `varBinds` is a required -subsection of each profile. Syntax configuration of `varBinds` looks following: - - [ "MIB-Component", "MIB object"[Optional], "MIB index number"[Optional]] - - - `MIB-Component` - The SNMP MIB, itself, consists of distinct component MIBs, each of which refers to a specific - defined collection of management information that is part of the overall SNMP MIB eg. `SNMPv2-MIB`. - If only `MIB-Component` is set then all whole subtree is getting. - - `MIB object` - The SNMP MIB stores only simple data types: scalars and two-dimensional arrays of scalars, - called tables. Keywords SYNTAX, ACCESS, and DESCRIPTION as well as other keywords such as STATUS and - INDEX is used to define the SNMP MIB managed objects. - - `MIB index number` - Define index number for given MIB Object eg. `0`. - -Example: -```yaml - varBinds: - # Syntax: [ "MIB-Component", "MIB object name"[Optional], "MIB index number"[Optional]] - - ['SNMPv2-MIB'] - - ['SNMPv2-MIB', 'sysName'] - - ['SNMPv2-MIB', 'sysUpTime',0] -``` - -#### Static Profile configuration -Static Profile are used when they are defined on a list of profiles in inventory configuration in `poller` -service [Inventory configuration](../poller-configuration/#configure-inventory). Static Profiles are executed -even if the SmartProfile flag in inventory is set to false. -To configure Static Profile following value needs to be set in `profiles` section: - - - `ProfileName` - define as subsection key in `profiles`. - - `frequency` - define interval between executing SNMP gets in second. - - `varBinds` - define var binds to query. - -Example: -```yaml -scheduler: - profiles: | - static_profile_example: - frequency: 20 - varBinds: - - ['SNMPv2-MIB'] - - ['SNMPv2-MIB', 'sysName'] - - ['SNMPv2-MIB', 'sysUpTime',0] -``` - -#### SmartProfile configuration -SmartProfile is executed when the SmartProfile flag in inventory is set to true and the condition defined in profile matching. -More information about configuring inventory can be found in [Inventory configuration](../poller-configuration/#configure-inventory) - -To configure Static Profile following value needs to be set in `profiles` section: - - - `ProfileName` - define as subsection key in `profiles`. - - `frequency` - define interval between executing SNMP gets in second. - - `condition` - section define conditions to much profile - - `type` - key of `condition` section which defines type of condition. Allowed value `base` and `field`. - - `base` type of condition will be executed when `SmartProfile` in inventory is set to true. - - `walk` such profile will be executed instead of full walk - - `field` type of condition will be executed if match `pattern` for defined `field`. Supported fields: - - "SNMPv2-MIB.sysDescr" - - "SNMPv2-MIB.sysObjectID" - - `field` Define field name for condition type field. - - `pattern` Define list of regular expression pattern for MIB object field defined in `field` section. - - `varBinds` - define var binds to query. - -Example of `base` type of condition -```yaml -scheduler: - profiles: | - SmartProfile_base_example: - frequency: 10 - condition: - type: "base" - varBinds: - - ['SNMPv2-MIB'] - - ['SNMPv2-MIB', 'sysName'] -``` - -Example of `field` type of condition -```yaml -scheduler: - profiles: | - SmartProfile_field_example: - frequency: 10 - condition: - type: "field" - field: "SNMPv2-MIB.sysDescr" - patterns: - - '.*STRING_TO_BE_MATCHED.*' - varBinds: - - ['SNMPv2-MIB'] - - ['SNMPv2-MIB', 'sysName'] -``` - -NOTE: Be aware that profile changes may not be reflected immediately. It can take up to 5 minutes for changes to propagate. -There is also 5 minute TTL for an inventory pod. Basically, SC4SNMP allows one inventory upgrade and then block updates for the next 5 minutes - -#### Custom translations -If the user wants to use custom names/translations of MIB names, it can be configured under customTranslations section under scheduler config. -Translations are grouped by MIB family. In the example below IF-MIB.ifInDiscards will be translated to IF-MIB.myCustomName1 -```yaml -scheduler: - customTranslations: - IF-MIB: - ifInDiscards: myCustomName1 - ifOutErrors: myCustomName2 - SNMPv2-MIB: - sysDescr: myCustomName3 -``` - diff --git a/docs/configuration/sim-configuration.md b/docs/configuration/sim-configuration.md index 3ecb8cfb0..2d29cca92 100644 --- a/docs/configuration/sim-configuration.md +++ b/docs/configuration/sim-configuration.md @@ -24,14 +24,13 @@ After executing `microk8s helm3 upgrade --install snmp -f values.yaml splunk-con ```yaml splunker@ip-10-202-13-233:~$ microk8s kubectl get pods -n sc4snmp NAME READY STATUS RESTARTS AGE -snmp-splunk-connect-for-snmp-worker-7496b66947-6hjhl 1/1 Running 0 32s -snmp-splunk-connect-for-snmp-worker-7496b66947-flcg7 1/1 Running 0 32s -snmp-splunk-connect-for-snmp-scheduler-846f9b4f69-4rxd8 1/1 Running 0 32s -snmp-mibserver-cdfccf586-cwz7h 1/1 Running 0 32s -snmp-splunk-connect-for-snmp-inventory--1-dxz5d 1/1 Running 0 32s -snmp-splunk-connect-for-snmp-traps-6bbf57497b-v8d7l 1/1 Running 0 32s -snmp-splunk-connect-for-snmp-traps-6bbf57497b-nvxrz 1/1 Running 0 31s -snmp-splunk-connect-for-snmp-sim-59b89747f-kn6tf 1/1 Running 0 32s -snmp-rabbitmq-0 0/1 Running 0 31s -snmp-mongodb-9957b9f4d-f94hv 2/2 Running 0 32s +snmp-splunk-connect-for-snmp-scheduler-7ddbc8d75-bljsj 1/1 Running 0 133m +snmp-splunk-connect-for-snmp-worker-poller-57cd8f4665-9z9vx 1/1 Running 0 133m +snmp-splunk-connect-for-snmp-worker-sender-5c44cbb9c5-ppmb5 1/1 Running 0 133m +snmp-splunk-connect-for-snmp-worker-trap-549766d4-28qzh 1/1 Running 0 133m +snmp-mibserver-7f879c5b7c-hz9tz 1/1 Running 0 133m +snmp-mongodb-869cc8586f-vvr9f 2/2 Running 0 133m +snmp-redis-master-0 1/1 Running 0 133m +snmp-splunk-connect-for-snmp-trap-78759bfc8b-79m6d 1/1 Running 0 99m +snmp-splunk-connect-for-snmp-sim-59b89747f-kn6tf 1/1 Running 0 32s ``` diff --git a/docs/configuration/snmpv3-configuration.md b/docs/configuration/snmpv3-configuration.md index 361822c2a..d31d2faf6 100644 --- a/docs/configuration/snmpv3-configuration.md +++ b/docs/configuration/snmpv3-configuration.md @@ -21,5 +21,5 @@ microk8s kubectl create -n secret generic \ --from-literal=privProtocol= ``` -Configured credential can be use in [poller](poller-configuration.md) and [trap](trap-configuration.md) services. -In services configuration, `secretname` need to be provided. +Configured credentials can be use in [poller](poller-configuration.md) and [trap](trap-configuration.md) services. +In services configuration, `secretname` needs to be provided. diff --git a/docs/configuration/trap-configuration.md b/docs/configuration/trap-configuration.md index 0ab798929..9a088e5cb 100644 --- a/docs/configuration/trap-configuration.md +++ b/docs/configuration/trap-configuration.md @@ -95,7 +95,7 @@ traps: ``` ### Define number of traps server replica -`replicas` Defines the number of replicas for trap container should be 2x number of nodes. The default value is `2`. +`replicaCount` Defines the number of replicas for trap container should be 2x number of nodes. The default value is `2`. Example: ```yaml traps: diff --git a/docs/configuration/worker-configuration.md b/docs/configuration/worker-configuration.md index 27a806610..d090c1503 100644 --- a/docs/configuration/worker-configuration.md +++ b/docs/configuration/worker-configuration.md @@ -1,24 +1,188 @@ # Worker Configuration -The worker is a service with is responsible for tasks execution like SNMP Walk, GET, or processing trap messages. +The worker is a service witch is responsible for tasks execution like SNMP Walk, GET, or processing trap messages. ### Worker configuration file -Worker configuration is kept in `values.yaml` file in section `worker`. +Worker configuration is kept in `values.yaml` file in the section `worker`. `worker` is divided on 3 types of workers: `poller`, `sender` and `trap`. `values.yaml` is being used during the installation process for configuring Kubernetes values. +### Worker types + +SC4SNMP has two base functionalities: monitoring traps and polling. There are 3 types of workers, every type is +responsible for something else. + +Trap workers consumes all the trap related tasks produced by the trap pod. + +Poller workers consumes all the tasks related to polling. + +Sender workers handle sending data to splunk. You need to always have at least one sender pod running. + +### Worker parameters + +| variable | description | default | +| --- | --- | --- | +| work.taskTimeout | task timeout in seconds (usually necessary when walk process takes a long time) | 2400 | +| work.poller.replicaCount | number of poller worker replicas | 2 | +| work.poller.autoscaling.enabled | enabling autoscaling for poller worker pods | false | +| work.poller.autoscaling.minReplicas | minimum number of running poller worker pods when autoscaling is enabled | 2 | +| work.poller.autoscaling.maxReplicas | maximum number of running poller worker pods when autoscaling is enabled | 40 | +| work.poller.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on poller worker pods to spawn another replica | 80 | +| work.poller.resources.limits | the resources limits for poller worker container | {} | +| work.poller.resources.requests | the requested resources for poller worker container | {} | +| work.trap.replicaCount | number of trap worker replicas | 2 | +| work.trap.autoscaling.enabled | enabling autoscaling for trap worker pods | false | +| work.trap.autoscaling.minReplicas | minimum number of running trap worker pods when autoscaling is enabled | 2 | +| work.trap.autoscaling.maxReplicas | maximum number of running trap worker pods when autoscaling is enabled | 40 | +| work.trap.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on trap worker pods to spawn another replica | 80 | +| work.trap.resources.limits | the resources limits for poller worker container | {} | +| work.trap.resources.requests | the requested resources for poller worker container | {} | +| work.sender.replicaCount | number of sender worker replicas | 2 | +| work.sender.autoscaling.enabled | enabling autoscaling for sender worker pods | false | +| work.sender.autoscaling.minReplicas | minimum number of running sender worker pods when autoscaling is enabled | 2 | +| work.sender.autoscaling.maxReplicas | maximum number of running sender worker pods when autoscaling is enabled | 40 | +| work.sender.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on sender worker pods to spawn another replica | 80 | +| work.sender.resources.limits | the resources limits for poller worker container | {} | +| work.sender.resources.requests | the requested resources for poller worker container | {} | + +### Worker scaling + +You can adjust number of worker pods to your needs in two ways: setting fixed value in `replicaCount` +or enabling `autoscaling` which scales pods automatically. + +#### Reallife scenario: I use SC4SNMP for only trap monitoring, I want to use my resources effectively + +If you don't use polling at all, would be the best to set `worker.poller.replicaCount` to `0`. +Remember, that if you'll want to use polling in the future you need to increase `replicaCount`, +otherwise it won't work. To monitor traps, adjust `worker.trap.replicaCount` depending on your needs +and `worker.sender.replicaCount` to send traps to splunk. Usually you need much less sender pods than trap ones. + +This is the example of `values.yaml` without using autoscaling: + +```yaml +worker: + trap: + replicaCount: 4 + sender: + replicaCount: 1 + poller: + replicaCount: 0 + logLevel: "WARNING" +``` + +This is the example of `values.yaml` with autoscaling: + ```yaml worker: - # replicas: The number of replicas for worker containers should be two or more - replicaCount: 2 - #Log level one of INFO, WARNING, CRITICAL, DEBUG, ERROR + trap: + autoscaling: + enabled: true + minReplicas: 4 + maxReplicas: 10 + targetCPUUtilizationPercentage: 80 + sender: + autoscaling: + enabled: true + minReplicas: 2 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + poller: + replicaCount: 0 logLevel: "WARNING" - #The following resource specification is appropriate for most deployments to scale the - #Environment increase the number of workers rather than the resources per container - resources: - limits: - cpu: 2 - memory: 512Mi - requests: - cpu: 500m - memory: 128Mi ``` + +In the example above both trap and sender pods are autoscaled. During an upgrade process +`minReplicas` number of pods is created, and then new ones are created only if CPU threshold +exceeds `targetCPUUtilizationPercentage` which by default is 80%. This solution helps you to keep +resources usage adjusted to what you actually need. + +After helm upgrade process, you will see `horizontalpodautoscaler` in `microk8s kubectl get all -n sc4snmp`: + +```yaml +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +horizontalpodautoscaler.autoscaling/snmp-mibserver Deployment/snmp-mibserver 1%/80% 1 3 1 97m +horizontalpodautoscaler.autoscaling/snmp-splunk-connect-for-snmp-worker-sender Deployment/snmp-splunk-connect-for-snmp-worker-sender 1%/80% 2 5 2 28m +horizontalpodautoscaler.autoscaling/snmp-splunk-connect-for-snmp-worker-trap Deployment/snmp-splunk-connect-for-snmp-worker-trap 1%/80% 4 10 4 28m +``` + +If you see `/80%` in `TARGETS` section instead of the CPU percentage, you probably don't have `metrics-server` addon enabled. +Enable it using: `microk8s enable metrics-server`. + + +#### Real life scenario: I have a significant delay in polling + +Sometimes when polling is configured to be run frequently and on many devices, workers get overloaded +and there is a delay in delivering data to splunk. To avoid such situations we can scale poller and sender pods. +Because of the walk cycles (walk is a costly operation ran once for a while), poller workers require more resources +for a short time. For this reason, enabling autoscaling is recommended. + +This is the example of `values.yaml` with autoscaling: + +```yaml +worker: + trap: + autoscaling: + enabled: true + minReplicas: 4 + maxReplicas: 10 + targetCPUUtilizationPercentage: 80 + sender: + autoscaling: + enabled: true + minReplicas: 2 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + poller: + autoscaling: + enabled: true + minReplicas: 2 + maxReplicas: 20 + targetCPUUtilizationPercentage: 80 + logLevel: "WARNING" +``` + +Remember, that the system won't scale itself infinitely, there is a finite amount of resources that you can allocate. +By default, every worker has configured following resources: + +```yaml + resources: + limits: + cpu: 500m + requests: + cpu: 250m +``` + + +#### I have autoscaling enabled and experience problems with Mongo and Redis pod + +If MongoDB and Redis pods are crushing, and some of the pods are in infinite `Pending` state, that means +you're over your resources and SC4SNMP cannot scale more. You should decrease number of `maxReplicas` in +workers, so that it's not going beyond available CPU. + +#### I don't know how to set autoscaling parameters and how many replicas I need + +The best way to see if pods are overloaded is to run command: + +```yaml +microk8s kubectl top pods -n sc4snmp +``` + +```yaml +NAME CPU(cores) MEMORY(bytes) +snmp-mibserver-7f879c5b7c-nnlfj 1m 3Mi +snmp-mongodb-869cc8586f-q8lkm 18m 225Mi +snmp-redis-master-0 10m 2Mi +snmp-splunk-connect-for-snmp-scheduler-558dccfb54-nb97j 2m 136Mi +snmp-splunk-connect-for-snmp-trap-5878f89bbf-24wrz 2m 129Mi +snmp-splunk-connect-for-snmp-trap-5878f89bbf-z9gd5 2m 129Mi +snmp-splunk-connect-for-snmp-worker-poller-599c7fdbfb-cfqjm 260m 354Mi +snmp-splunk-connect-for-snmp-worker-poller-599c7fdbfb-ztf7l 312m 553Mi +snmp-splunk-connect-for-snmp-worker-sender-579f796bbd-vmw88 14m 257Mi +snmp-splunk-connect-for-snmp-worker-trap-5474db6fc6-46zhf 3m 259Mi +snmp-splunk-connect-for-snmp-worker-trap-5474db6fc6-mjtpv 4m 259Mi +``` + +Here you can see how much CPU and Memory is being used by the pods. If the CPU is close to 500m (which is the limit for one pod by default), +you should enable autoscaling/increase maxReplicas or increase replicaCount with autoscaling off. + + +Here you can read about Horizontal Autoscaling and how to adjust maximum replica value to the resources you have: [Horizontal Autoscaling.](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) \ No newline at end of file diff --git a/docs/gettingstarted/mk8s/k8s-microk8s.md b/docs/gettingstarted/mk8s/k8s-microk8s.md index 445363559..f287c1f1e 100644 --- a/docs/gettingstarted/mk8s/k8s-microk8s.md +++ b/docs/gettingstarted/mk8s/k8s-microk8s.md @@ -55,8 +55,7 @@ sudo systemctl enable iscsid microk8s enable helm3 microk8s enable storage microk8s enable rbac -microk8s enable community -microk8s enable openebs +microk8s enable metrics-server microk8s status --wait-ready ``` diff --git a/docs/gettingstarted/sc4snmp-installation.md b/docs/gettingstarted/sc4snmp-installation.md index 2a29336e7..888d72a81 100644 --- a/docs/gettingstarted/sc4snmp-installation.md +++ b/docs/gettingstarted/sc4snmp-installation.md @@ -2,9 +2,13 @@ The basic installation process and configuration used in this section are typical for single node non HA deployments and do not have resource requests and limits. -See the configuration sections for mongo, Rabbitmq, scheduler, worker, and traps for guidance +See the configuration sections for mongo, redis, scheduler, worker, and traps for guidance on production configuration. +### Offline installation + +For offline installation instructions see [this page](../offlineinstallation/offline-sc4snmp.md). + ### Add SC4SNMP repository ``` microk8s helm3 repo add splunk-connect-for-snmp https://splunk.github.io/splunk-connect-for-snmp @@ -43,8 +47,34 @@ traps: #loadBalancerIP: The IP address in the metallb pool loadBalancerIP: ###X.X.X.X### worker: - # replicas: Number of replicas for worker container should two or more - #replicaCount: 2 + # There are 3 types of workers + trap: + # replicaCount: number of trap-worker pods which consumes trap tasks + replicaCount: 2 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 40 + # targetCPUUtilizationPercentage: 80 + poller: + # replicaCount: number of poller-worker pods which consumes polling tasks + replicaCount: 2 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 40 + # targetCPUUtilizationPercentage: 80 + sender: + # replicaCount: number of sender-worker pods which consumes sending tasks + replicaCount: 1 + # autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 40 + # targetCPUUtilizationPercentage: 80 # udpConnectionTimeout: timeout in seconds for SNMP operations #udpConnectionTimeout: 5 logLevel: "INFO" @@ -64,10 +94,10 @@ poller: # - sc4snmp-hlab-sha-aes # - sc4snmp-hlab-sha-des # inventory: | - # address,port,version,community,secret,securityEngine,walk_interval,profiles,SmartProfiles,delete - # 10.0.0.1,,3,,sc4snmp-hlab-sha-aes,,600,,, - # 10.0.0.199,,2c,public,,,600,,,True - # 10.0.0.100,,3,,sc4snmp-hlab-sha-des,,600,,, + # address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + # 10.0.0.1,,3,,sc4snmp-hlab-sha-aes,,1800,,, + # 10.0.0.199,,2c,public,,,3000,,,True + # 10.0.0.100,,3,,sc4snmp-hlab-sha-des,,1800,,, sim: # sim must be enabled if you want to use signalFx enabled: false @@ -80,15 +110,6 @@ mongodb: storageClass: "microk8s-hostpath" volumePermissions: enabled: true -rabbitmq: - pdb: - create: true - replicaCount: 1 - persistence: - enabled: true - storageClass: "microk8s-hostpath" - volumePermissions: - enabled: true ``` `values.yaml` is being used during the installation process for configuring Kubernetes values. @@ -147,14 +168,14 @@ microk8s kubectl get pods -n sc4snmp Example output: ``` NAME READY STATUS RESTARTS AGE -snmp-splunk-connect-for-snmp-worker-66685fcb6d-f6rxb 1/1 Running 0 6m4s -snmp-splunk-connect-for-snmp-scheduler-6586488d85-t6j5d 1/1 Running 0 6m4s -snmp-mongodb-arbiter-0 1/1 Running 0 6m4s -snmp-mibserver-6f575ddb7d-mmkmn 1/1 Running 0 6m4s -snmp-mongodb-0 2/2 Running 0 6m4s -snmp-mongodb-1 2/2 Running 0 4m58s -snmp-rabbitmq-0 1/1 Running 0 6m4s -snmp-splunk-connect-for-snmp-traps-54f79b945d-bmbg7 1/1 Running 0 6m4s +snmp-splunk-connect-for-snmp-scheduler-7ddbc8d75-bljsj 1/1 Running 0 133m +snmp-splunk-connect-for-snmp-worker-poller-57cd8f4665-9z9vx 1/1 Running 0 133m +snmp-splunk-connect-for-snmp-worker-sender-5c44cbb9c5-ppmb5 1/1 Running 0 133m +snmp-splunk-connect-for-snmp-worker-trap-549766d4-28qzh 1/1 Running 0 133m +snmp-mibserver-7f879c5b7c-hz9tz 1/1 Running 0 133m +snmp-mongodb-869cc8586f-vvr9f 2/2 Running 0 133m +snmp-redis-master-0 1/1 Running 0 133m +snmp-splunk-connect-for-snmp-trap-78759bfc8b-79m6d 1/1 Running 0 99m ``` ### Test SNMP Traps @@ -196,7 +217,7 @@ service snmpd stop service snmpd start ``` -- Configure SC4SNMP Poller to test add IP address which need to be poll. Add configuration entry in `value.yaml` file by +- Configure SC4SNMP Poller to test add IP address which needs to be polled. Add configuration entry in `values.yaml` file by replace the IP address `10.0.101.22` with the server IP address where snmpd were configured. ``` bash poller: @@ -204,11 +225,11 @@ poller: - sc4snmp-homesecure-sha-aes - sc4snmp-homesecure-sha-des inventory: | - address,version,community,walk_interval,profiles,SmartProfiles,delete - 10.0.101.22,public,60,,, + address,version,community,walk_interval,profiles,smart_profiles,delete + 10.0.101.22,public,42000,,, ``` -- Load `value.yaml` file in SC4SNMP +- Load `values.yaml` file in SC4SNMP ``` bash microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace @@ -216,12 +237,15 @@ microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/spl - Check-in Splunk - Up to 1 min events appear in Splunk: +Before polling starts, SC4SNMP must perform Walk process on the device. It is run after configuring new device and every `walk_interval`. +Its purpose is to gather all the data and provide meaningful context for the polling records. May be, that your device is so big that walk takes too long and scope of walking must be limited. +In such cases, enable the small walk using the instruction: [walk takes too much time](/bestpractices/#walking-a-device-takes-too-much-time). +When walk finishes, events appear in Splunk, check it with those queries: ``` bash index="netops" sourcetype="sc4snmp:event" ``` - Up to 1 min events appear in Splunk: + ``` bash | mpreview index="netmetrics" | search sourcetype="sc4snmp:metric" ``` diff --git a/docs/gettingstarted/sck-installation.md b/docs/gettingstarted/sck-installation.md index 1036697f9..0c704c53d 100644 --- a/docs/gettingstarted/sck-installation.md +++ b/docs/gettingstarted/sck-installation.md @@ -1,8 +1,17 @@ # Splunk OpenTelemetry Collector for Kubernetes installation +Splunk OpenTelemetry Collector for Kubernetes is not required for SC4SNMP installation. This is the tool that sends logs +and metrics from k8s cluster to Splunk instance, what makes SC4SNMP easier to debug. +You can do the same using `microk8s kubectl logs` command on instances you're interested in, but if you're not proficient in Kubernetes, +Splunk OpenTelemetry Collector for Kubernetes is strongly advised. + The below steps are sufficient for a Splunk OpenTelemetry Collector installation for the SC4SNMP project with Splunk Enterprise/Enterprise Cloud. In order to learn more about Splunk OpenTelemetry Collector visit [Splunk OpenTelemetry Collector](https://github.com/signalfx/splunk-otel-collector-chart). +### Offline installation + +For offline installation instructions see [this page](../offlineinstallation/offline-sck.md). + ### Add Splunk OpenTelemetry Collector repository to HELM ```bash diff --git a/docs/images/interface_analytics.png b/docs/images/interface_analytics.png new file mode 100644 index 000000000..1b19fdc25 Binary files /dev/null and b/docs/images/interface_analytics.png differ diff --git a/docs/images/interface_metrics.png b/docs/images/interface_metrics.png new file mode 100644 index 000000000..e0afb364f Binary files /dev/null and b/docs/images/interface_metrics.png differ diff --git a/docs/images/trap.png b/docs/images/trap.png new file mode 100644 index 000000000..f693120b5 Binary files /dev/null and b/docs/images/trap.png differ diff --git a/docs/index.md b/docs/index.md index e20c3261e..4b2f14ebe 100644 --- a/docs/index.md +++ b/docs/index.md @@ -7,3 +7,20 @@ Splunk Connect for SNMP is an edge-deployed, containerized, and highly available solution for collecting SNMP data for Splunk Enterprise, Splunk Enterprise Cloud and Splunk Infrastructure Monitoring. +SC4SNMP provides context-full information - not only forwards SNMP data to Splunk, but also combines +all of pieces into the meaningful objects. For example, you don't need to write queries in order to gather the information about +interfaces of the device, because SC4SNMP does that automatically: + +[![Interface metrics](images/interface_metrics.png)](images/interface_metrics.png) + +What makes it easy to visualize the data in Analytics of Splunk: + +[![Interface analytics](images/interface_analytics.png)](images/interface_analytics.png) + +Here is a short presentation of how to browse SNMP data in Splunk: + +![type:video](videos/setting_analytics.mov) + +SC4SNMP can also easily monitor trap events sent by different SNMP devices. Trap events are JSON formatted, and are being stored under `netops` index. + +[![Trap example](images/trap.png)](images/trap.png) \ No newline at end of file diff --git a/docs/offlineinstallation/offline-sc4snmp.md b/docs/offlineinstallation/offline-sc4snmp.md new file mode 100644 index 000000000..012cdf5da --- /dev/null +++ b/docs/offlineinstallation/offline-sc4snmp.md @@ -0,0 +1,158 @@ +# Offline SC4SNMP installation + +## Local machine with internet access +To install SC4SNMP offline first some packages must be downloaded from github release and then moved +to the sc4snmp installation server. Those packages are: + +- `dependencies-images.tar` +- `splunk-connect-for-snmp-chart.tar` + +Moreover, SC4SNMP Docker image must be pulled, saved as `.tar` package and then moved to the server as well. +This process requires Docker installed locally. + +Image can be pulled from the following repository: `ghcr.io/splunk/splunk-connect-for-snmp/container:`. +The latest tag can be found [here](https://github.com/splunk/splunk-connect-for-snmp) under Releases section with label `latest`. + + +Example of docker pull command: + +```bash +docker pull ghcr.io/splunk/splunk-connect-for-snmp/container: +``` + +Then save the image. Directory where this image will be saved can be specified after `>` sign: + +```bash +docker save ghcr.io/splunk/splunk-connect-for-snmp/container: > snmp_image.tar +``` +All three packages `snmp_image.tar`, `dependencies-images.tar` and `splunk-connect-for-snmp-chart.tar` must be moved to the sc4snmp installation server. + +## Installation on the server + +On the server all the images must be imported to the microk8s cluster. This can be done with the following command: + +```bash +microk8s ctr image import +``` + +In case of this installation the following commands must be run: + +```bash +microk8s ctr image import dependencies-images.tar +microk8s ctr image import snmp_image.tar +``` + +Then create `values.yaml`. It's a little different from `values.yaml` used in an online installation. +The difference are following lines added to prevent automatic image pulling: + +```yaml +image: + pullPolicy: "Never" +``` + +Example `values.yaml` file: +```yaml +splunk: + enabled: true + protocol: https + host: ###SPLUNK_HOST### + token: ###SPLUNK_TOKEN### + insecureSSL: "false" + port: "###SPLUNK_PORT###" +image: + pullPolicy: "Never" +traps: + communities: + 2c: + - public + - homelab + #usernameSecrets: + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + + #loadBalancerIP: The IP address in the metallb pool + loadBalancerIP: ###X.X.X.X### +worker: + # There are 3 types of workers + trap: + # replicaCount: number of trap-worker pods which consumes trap tasks + replicaCount: 2 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 40 + # targetCPUUtilizationPercentage: 80 + poller: + # replicaCount: number of poller-worker pods which consumes polling tasks + replicaCount: 2 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 40 + # targetCPUUtilizationPercentage: 80 + sender: + # replicaCount: number of sender-worker pods which consumes sending tasks + replicaCount: 1 + # autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 40 + # targetCPUUtilizationPercentage: 80 + # udpConnectionTimeout: timeout in seconds for SNMP operations + #udpConnectionTimeout: 5 + logLevel: "INFO" +scheduler: + logLevel: "INFO" +# profiles: | +# generic_switch: +# frequency: 60 +# varBinds: +# - ['SNMPv2-MIB', 'sysDescr'] +# - ['SNMPv2-MIB', 'sysName', 0] +# - ['IF-MIB'] +# - ['TCP-MIB'] +# - ['UDP-MIB'] +poller: + # usernameSecrets: + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + # inventory: | + # address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + # 10.0.0.1,,3,,sc4snmp-hlab-sha-aes,,1800,,, + # 10.0.0.199,,2c,public,,,3000,,,True + # 10.0.0.100,,3,,sc4snmp-hlab-sha-des,,1800,,, +sim: + # sim must be enabled if you want to use signalFx + enabled: false + image: + pullPolicy: "Never" +# signalfxToken: BCwaJ_Ands4Xh7Nrg +# signalfxRealm: us0 +mongodb: + image: + pullPolicy: "Never" + pdb: + create: true + persistence: + storageClass: "microk8s-hostpath" + volumePermissions: + enabled: true +redis: + image: + pullPolicy: "Never" +``` + +Next step is to unpack chart package `splunk-connect-for-snmp-chart.tar`. It will result in creating `splunk-connect-for-snmp` directory: + +```bash +tar -xvf splunk-connect-for-snmp-chart.tar --exclude='._*' +``` + +Finally run helm install command in the directory where both `values.yaml` and `splunk-connect-for-snmp` directory are located: + +```bash +microk8s helm3 install snmp -f values.yaml splunk-connect-for-snmp --namespace=sc4snmp --create-namespace +``` diff --git a/docs/offlineinstallation/offline-sck.md b/docs/offlineinstallation/offline-sck.md new file mode 100644 index 000000000..ee5d87075 --- /dev/null +++ b/docs/offlineinstallation/offline-sck.md @@ -0,0 +1,93 @@ +# Splunk OpenTelemetry Collector for Kubernetes offline installation + +## Local machine with internet access + +To install Splunk OpenTelemetry Collector offline first one must download packed chart `splunk-otel-collector-.tgz` +from github release where `` is the current OpenTelemetry release tag. This package must be later moved to the installation server. + +## Installation on the server + +Imported package must be unpacked with the following command : + +```bash +tar -xvf splunk-otel-collector-.tgz --exclude='._*' +``` + +In order to run Splunk OpenTelemetry Collector on your environment, replace `<>` variables according to the description presented below +```bash +microk8s helm3 install sck \ + --set="clusterName=" \ + --set="splunkPlatform.endpoint=" \ + --set="splunkPlatform.insecureSkipVerify=" \ + --set="splunkPlatform.token=" \ + --set="logsEngine=otel" \ + --set="splunkPlatform.metricsEnabled=true" \ + --set="splunkPlatform.metricsIndex=em_metrics" \ + --set="splunkPlatform.index=em_logs" \ + splunk-otel-collector +``` + +### Variables description + + +| Placeholder | Description | Example | +|---|---|---| +| splunk_endpoint | host address of splunk instance | https://endpoint.example.com:8088/services/collector | +| insecure_skip_verify | is insecure ssl allowed | false | +| splunk_token | Splunk HTTP Event Collector token | 450a69af-16a9-4f87-9628-c26f04ad3785 | +| cluster_name | name of the cluster | my-cluster | + +An example of filled up command is: +```bash +microk8s helm3 install sck \ + --set="clusterName=my-cluster" \ + --set="splunkPlatform.endpoint=https://endpoint.example.com/services/collector" \ + --set="splunkPlatform.insecureSkipVerify=false" \ + --set="splunkPlatform.token=4d22911c-18d9-4706-ae7b-dd1b976ca6f7" \ + --set="splunkPlatform.metricsEnabled=true" \ + --set="splunkPlatform.metricsIndex=em_metrics" \ + --set="splunkPlatform.index=em_logs" \ + splunk-otel-collector +``` + +## Install Splunk OpenTelemetry Collector with HELM for Splunk Observability for Kubernetes + +To run Splunk OpenTelemetry Collector on your environment, replace `<>` variables according to the description presented below + +```bash +microk8s helm3 install sck +--set="clusterName=" +--set="splunkObservability.realm=" +--set="splunkObservability.accessToken=" +--set="splunkObservability.ingestUrl=" +--set="splunkObservability.apiUrl=" +--set="splunkObservability.metricsEnabled=true" +--set="splunkObservability.tracesEnabled=false" +--set="splunkObservability.logsEnabled=false" +splunk-otel-collector +``` + +### Variables description + + +| Placeholder | Description | Example | +|---|---|---| +| cluster_name | name of the cluster | my_cluster | +| realm | Realm obtained from the Splunk Observability Cloud environment | us0 | +| token | Token obtained from the Splunk Observability Cloud environment | BCwaJ_Ands4Xh7Nrg | +| ingest_url | Ingest URL from the Splunk Observability Cloud environment | https://ingest..signalfx.com | +| api_url | API URL from the Splunk Observability Cloud environment | https://api..signalfx.com | + +An example of filled up command is: +```bash +microk8s helm3 install sck +--set="clusterName=my_cluster" +--set="splunkObservability.realm=us0" +--set="splunkObservability.accessToken=BCwaJ_Ands4Xh7Nrg" +--set="splunkObservability.ingestUrl=https://ingest..signalfx.com" +--set="splunkObservability.apiUrl=https://api..signalfx.com" +--set="splunkObservability.metricsEnabled=true" +--set="splunkObservability.tracesEnabled=false" +--set="splunkObservability.logsEnabled=false" +splunk-otel-collector +``` \ No newline at end of file diff --git a/docs/planning.md b/docs/planning.md index 8c9547231..f21ebd9b9 100644 --- a/docs/planning.md +++ b/docs/planning.md @@ -24,32 +24,24 @@ existing firewall. - HA Requires 3 or more instances (odd numbers) 8 core/16 thread 16 GB ram -- 100 GB root mount +- 50 GB root mount - HTTP access (non-proxy) allowed for the HTTP(s) connection from SC4SNMP to the Splunk destination. -- Splunk Enterprise/Cloud 8.x and or Splunk Infrastructure Monitoring +- Splunk Enterprise/Cloud 8.x or newer and/or Splunk Infrastructure Monitoring (SignalFx) - -- Splunk Enterprise/Cloud specific Requirements: - - * Splunk ITSI or Splunk IT Work - * Ability to create a HEC token - * Ability to create event and metrics indexes (or use - existing) - -- Splunk Infrastructure Monitoring specific requirements: - * Ability to create or obtain real and token + ## Planning Infrastructure A single installation of Splunk Connect for SNMP (SC4SNMP) on a machine with -16 Core/32 threads x64 and 12 GB ram will be able to handle up to 1300 +16 Core/32 threads x64 and 64 GB ram will be able to handle up to 1500 SNMP TRAPs per sec. A single installation of Splunk Connect for SNMP (SC4SNMP) on a machine with -16 Core/32 threads x64 and 64 GB ram will be able to handle up to 1300 -SNMP GETs per sec. +16 Core/32 threads x64 and 64 GB ram is able to handle up to 2750 SNMP varbinds per sec. +As for events per second visible in Splunk, please remember that single SC4SNMP event can contain more than one varbind inside - auto aggregation/grouping feature (varbinds which are describing same thing ie. network interface will be grouped in one event). +That is why, depending on configuration the number of events per second may vary. When planning infrastructure for Splunk Connect for SNMP, (SC4SNMP) note the limitations highlighted above. diff --git a/docs/small-environment.md b/docs/small-environment.md new file mode 100644 index 000000000..d1ae1e01d --- /dev/null +++ b/docs/small-environment.md @@ -0,0 +1,139 @@ +# Installation of SC4SNMP on a small environment + +SC4SNMP can be successfully installed on small environments with 2 CPUs and 4 Gb of memory. +One important thing to remember is that Splunk OpenTelemetry Collector for Kubernetes cannot be installed on such a small +environment along with SC4SNMP. The other difference from normal installation is that resources limits must be set for Kubernetes +pods. Example `values.yaml` with the appropriate resources can be seen bellow: + +```yaml +splunk: + enabled: true + protocol: https + host: ###SPLUNK_HOST### + token: ###SPLUNK_TOKEN### + insecureSSL: "false" + port: "###SPLUNK_PORT###" +image: + pullPolicy: "Always" +traps: + replicaCount: 1 + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 40m + memory: 256Mi + communities: + 2c: + - public + - homelab + #usernameSecrets: + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + + #loadBalancerIP: The IP address in the metallb pool + loadBalancerIP: ###X.X.X.X### +worker: + # There are 3 types of workers + trap: + # replicaCount: number of trap-worker pods which consumes trap tasks + replicaCount: 1 + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 40m + memory: 150Mi + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 40 + # targetCPUUtilizationPercentage: 80 + poller: + # replicaCount: number of poller-worker pods which consumes polling tasks + replicaCount: 2 + resources: + limits: + cpu: 200m + memory: 600Mi + requests: + cpu: 60m + memory: 260Mi + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 40 + # targetCPUUtilizationPercentage: 80 + sender: + # replicaCount: number of sender-worker pods which consumes sending tasks + replicaCount: 1 + resources: + limits: + cpu: 100m + memory: 350Mi + requests: + cpu: 30m + memory: 250Mi + # autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 40 + # targetCPUUtilizationPercentage: 80 + # udpConnectionTimeout: timeout in seconds for SNMP operations + #udpConnectionTimeout: 5 + logLevel: "INFO" +scheduler: + logLevel: "INFO" + resources: + limits: + cpu: 40m + memory: 260Mi + requests: + cpu: 20m + memory: 180Mi +# profiles: | +# generic_switch: +# frequency: 60 +# varBinds: +# - ['SNMPv2-MIB', 'sysDescr'] +# - ['SNMPv2-MIB', 'sysName', 0] +# - ['IF-MIB'] +# - ['TCP-MIB'] +# - ['UDP-MIB'] +poller: + # usernameSecrets: + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + # inventory: | + # address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + # 10.0.0.1,,3,,sc4snmp-hlab-sha-aes,,1800,,, + # 10.0.0.199,,2c,public,,,3000,,,True + # 10.0.0.100,,3,,sc4snmp-hlab-sha-des,,1800,,, +sim: + # sim must be enabled if you want to use signalFx + enabled: false +# signalfxToken: BCwaJ_Ands4Xh7Nrg +# signalfxRealm: us0 +mongodb: + pdb: + create: true + persistence: + storageClass: "microk8s-hostpath" + volumePermissions: + enabled: true +inventory: + resources: + limits: + cpu: 60m + memory: 300Mi + requests: + cpu: 20m +``` + +The rest of the installation is the same as in [online](gettingstarted/sc4snmp-installation.md) or +[offline](offlineinstallation/offline-sc4snmp.md) installation. \ No newline at end of file diff --git a/docs/videos/setting_analytics.mov b/docs/videos/setting_analytics.mov new file mode 100644 index 000000000..0e3177ac1 Binary files /dev/null and b/docs/videos/setting_analytics.mov differ diff --git a/entrypoint.sh b/entrypoint.sh index c1c8adc09..9cd252155 100755 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -3,7 +3,7 @@ set -e . /app/.venv/bin/activate LOG_LEVEL=${LOG_LEVEL:=INFO} WORKER_CONCURRENCY=${WORKER_CONCURRENCY:=4} -wait-for-dep "${CELERY_BROKER_URL}" "${MONGO_URI}" "${MIB_INDEX}" +wait-for-dep "${CELERY_BROKER_URL}" "${REDIS_URL}" "${MONGO_URI}" "${MIB_INDEX}" case $1 in @@ -14,13 +14,19 @@ inventory) celery) case $2 in beat) - celery -A splunk_connect_for_snmp.poller beat -l "$LOG_LEVEL" + celery -A splunk_connect_for_snmp.poller beat -l "$LOG_LEVEL" --max-interval=10 ;; - worker) - celery -A splunk_connect_for_snmp.poller worker -l "$LOG_LEVEL" --concurrency="$WORKER_CONCURRENCY" -O fair + worker-trap) + celery -A splunk_connect_for_snmp.poller worker -l "$LOG_LEVEL" -Q traps --autoscale=8,"$WORKER_CONCURRENCY" + ;; + worker-poller) + celery -A splunk_connect_for_snmp.poller worker -l "$LOG_LEVEL" -O fair -Q poll --autoscale=8,"$WORKER_CONCURRENCY" + ;; + worker-sender) + celery -A splunk_connect_for_snmp.poller worker -l "$LOG_LEVEL" -Q send --autoscale=6,"$WORKER_CONCURRENCY" ;; *) - celery -A splunk_connect_for_snmp.poller "${@:3}" -l "$LOG_LEVEL" + celery "$2" ;; esac ;; diff --git a/integration_tests/automatic_setup.sh b/integration_tests/automatic_setup.sh index 4eb16375a..8d2122845 100755 --- a/integration_tests/automatic_setup.sh +++ b/integration_tests/automatic_setup.sh @@ -60,6 +60,14 @@ wait_for_rabbitmq_to_be_up() { done } +sudo apt update -y +sudo apt install snmpd -y +sudo sed -i -E 's/agentaddress[[:space:]]+127.0.0.1,\[::1\]/#agentaddress 127.0.0.1,\[::1\]\nagentaddress udp:1161,udp6:[::1]:1161/g' /etc/snmp/snmpd.conf +echo "" | sudo tee -a /etc/snmp/snmpd.conf +echo "createUser r-wuser SHA admin1234 AES admin1234" | sudo tee -a /etc/snmp/snmpd.conf +echo "rwuser r-wuser priv" | sudo tee -a /etc/snmp/snmpd.conf +sudo systemctl restart snmpd + sudo apt -y install docker.io cd ~/splunk-connect-for-snmp @@ -90,6 +98,7 @@ sudo microk8s enable dns sudo microk8s enable rbac sudo microk8s enable community sudo microk8s enable openebs +sudo microk8s enable metrics-server sudo systemctl enable iscsid yes $(hostname -I | cut -d " " -f1)/32 | sudo microk8s enable metallb @@ -99,6 +108,7 @@ cd ~/splunk-connect-for-snmp/integration_tests echo $(green "Installing SC4SNMP on Kubernetes") sudo microk8s helm3 install snmp -f values.yaml ~/splunk-connect-for-snmp/charts/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace +sudo microk8s kubectl create -n sc4snmp secret generic sv3poller --from-literal=userName=r-wuser --from-literal=authKey=admin1234 --from-literal=privKey=admin1234 --from-literal=authProtocol=SHA --from-literal=privProtocol=AES --from-literal=securityEngineId=8000000903000A397056B8AC wait_for_pod_initialization wait_for_rabbitmq_to_be_up @@ -113,4 +123,4 @@ poetry run pytest --splunk_host="localhost" --splunk_password="changeme2" \ if [ ! -z "${S3_PATH}" ]; then aws s3 cp /home/ubuntu/splunk-connect-for-snmp/integration_tests/result.xml s3://snmp-integration-tests/$S3_PATH/ aws s3 cp /home/ubuntu/splunk-connect-for-snmp/integration_tests/pytest.log s3://snmp-integration-tests/$S3_PATH/ -fi +fi \ No newline at end of file diff --git a/integration_tests/deploy_and_test.sh b/integration_tests/deploy_and_test.sh index 6d87a5898..b4f87ec7d 100755 --- a/integration_tests/deploy_and_test.sh +++ b/integration_tests/deploy_and_test.sh @@ -51,7 +51,7 @@ docker0_ip() { } wait_for_load_balancer_external_ip() { - while [ "$(microk8s.kubectl get service/sc4-snmp-traps -n sc4snmp | grep pending)" != "" ] ; do + while [ "$(microk8s.kubectl get service/sc4-snmp-trap -n sc4snmp | grep pending)" != "" ] ; do echo "Waiting for service/sc4-snmp-traps to have a proper external IP..." sleep 1 done @@ -98,7 +98,7 @@ run_integration_tests() { splunk_ip=$1 splunk_password=$2 - trap_external_ip=$(microk8s.kubectl -n sc4snmp get service/sc4-snmp-traps | \ + trap_external_ip=$(microk8s.kubectl -n sc4snmp get service/sc4-snmp-trap | \ tail -1 | sed -e 's/[[:space:]]\+/\t/g' | cut -f4) deploy_poetry diff --git a/integration_tests/splunk_test_utils.py b/integration_tests/splunk_test_utils.py index c7bbf613c..2fb6b132d 100644 --- a/integration_tests/splunk_test_utils.py +++ b/integration_tests/splunk_test_utils.py @@ -52,7 +52,7 @@ def splunk_single_search(service, search): inventory_template = """poller: inventory: | - address,port,version,community,secret,securityEngine,walk_interval,profiles,SmartProfiles,delete + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete """ profiles_template = """scheduler: @@ -67,11 +67,16 @@ def splunk_single_search(service, search): usernameSecrets: """ +polling_secrets_template = """poller: + usernameSecrets: +""" + TEMPLATE_MAPPING = { "inventory.yaml": inventory_template, "profiles.yaml": profiles_template, "scheduler_secrets.yaml": poller_secrets_template, "traps_secrets.yaml": traps_secrets_template, + "polling_secrets.yaml": polling_secrets_template, } @@ -125,21 +130,28 @@ def upgrade_helm(yaml_files): ) -def create_v3_secrets(): +def create_v3_secrets( + secret_name="secretv4", + user_name="snmp-poller", + auth_key="PASSWORD1", + priv_key="PASSWORD1", + auth_protocol="SHA", + priv_protocol="AES", +): os.system( - "sudo microk8s kubectl create -n sc4snmp secret generic secretv4 \ - --from-literal=userName=snmp-poller \ - --from-literal=authKey=PASSWORD1 \ - --from-literal=privKey=PASSWORD1 \ - --from-literal=authProtocol=SHA \ - --from-literal=privProtocol=AES \ + f"sudo microk8s kubectl create -n sc4snmp secret generic {secret_name} \ + --from-literal=userName={user_name} \ + --from-literal=authKey={auth_key} \ + --from-literal=privKey={priv_key} \ + --from-literal=authProtocol={auth_protocol} \ + --from-literal=privProtocol={priv_protocol} \ --from-literal=securityEngineId=8000000903000A397056B8AC" ) def wait_for_pod_initialization(): script_body = f""" - while [ "$(sudo microk8s kubectl get pod -n sc4snmp | grep traps | grep Running | wc -l)" != "1" ] ; do + while [ "$(sudo microk8s kubectl get pod -n sc4snmp | grep "worker-trap" | grep Running | wc -l)" != "1" ] ; do echo "Waiting for POD initialization..." sleep 1 done """ diff --git a/integration_tests/test_poller_integration.py b/integration_tests/test_poller_integration.py index b6c554eb6..149788005 100644 --- a/integration_tests/test_poller_integration.py +++ b/integration_tests/test_poller_integration.py @@ -20,7 +20,6 @@ from ruamel.yaml.scalarstring import SingleQuotedScalarString as sq from integration_tests.splunk_test_utils import ( - create_v3_secrets, splunk_single_search, update_file, update_profiles, @@ -79,8 +78,6 @@ def setup_profile(request): } } update_profiles(profile) - upgrade_helm(["profiles.yaml"]) - time.sleep(60) update_file( [f"{trap_external_ip},,2c,public,,,600,generic_switch,,"], "inventory.yaml" ) @@ -123,8 +120,8 @@ def setup_profiles(request): }, } update_profiles(profile) - upgrade_helm(["profiles.yaml"]) - time.sleep(60) + # upgrade_helm(["profiles.yaml"]) + # time.sleep(60) update_file( [f"{trap_external_ip},,2c,public,,,600,new_profile;generic_switch,,"], "inventory.yaml", @@ -207,8 +204,8 @@ def setup_smart_profiles(request): } } update_profiles(profile) - upgrade_helm(["inventory.yaml", "profiles.yaml"]) - time.sleep(60) + # upgrade_helm(["inventory.yaml", "profiles.yaml"]) + # time.sleep(60) update_file([f"{trap_external_ip},,2c,public,,,600,,t,"], "inventory.yaml") upgrade_helm(["inventory.yaml", "profiles.yaml"]) time.sleep(30) @@ -267,8 +264,6 @@ def setup_modify_profile(request): }, } update_profiles(profile) - upgrade_helm(["inventory.yaml", "profiles.yaml"]) - time.sleep(60) update_file( [f"{trap_external_ip},,2c,public,,,600,test_modify,f,"], "inventory.yaml" ) @@ -382,8 +377,6 @@ def setup_small_walk(request): }, } update_profiles(profile) - upgrade_helm(["inventory.yaml", "profiles.yaml"]) - time.sleep(60) update_file([f"{trap_external_ip},,2c,public,,,20,walk1,f,"], "inventory.yaml") upgrade_helm(["inventory.yaml", "profiles.yaml"]) time.sleep(30) @@ -401,7 +394,7 @@ def test_check_if_walk_scope_was_smaller(self, setup_splunk): """| mpreview index=netmetrics earliest=-20s | search "TCP-MIB" """ ) result_count, metric_count = run_retried_single_search( - setup_splunk, search_string, 2 + setup_splunk, search_string, 1 ) assert result_count == 0 assert metric_count == 0 @@ -415,23 +408,33 @@ def test_check_if_walk_scope_was_smaller(self, setup_splunk): assert metric_count > 0 -class TestPoolingV3: - def test_pooling_v3(self, request, setup_splunk): - trap_external_ip = request.config.getoption("trap_external_ip") - logger.info("Integration test for v3 version of SNMP") - create_v3_secrets() - update_file(["- secretv4"], "scheduler_secrets.yaml") - update_file( - [f"{trap_external_ip},,3,snmp-poller,secretv4,,600,,,"], "inventory.yaml" - ) - upgrade_helm(["inventory.yaml", "scheduler_secrets.yaml"]) - time.sleep(40) - search_string = """| mpreview index=netmetrics earliest=-20s""" +@pytest.fixture() +def setup_v3_connection(request): + trap_external_ip = request.config.getoption("trap_external_ip") + time.sleep(60) + update_file( + [f"{trap_external_ip},1161,3,,sv3poller,,20,v3profile,f,"], "inventory.yaml" + ) + upgrade_helm(["inventory.yaml"]) + time.sleep(30) + yield + update_file( + [f"{trap_external_ip},1161,3,,sv3poller,,20,v3profile,f,t"], "inventory.yaml" + ) + upgrade_helm(["inventory.yaml"]) + time.sleep(20) + + +@pytest.mark.usefixtures("setup_v3_connection") +class TestSNMPv3Connection: + def test_snmpv3_walk(self, setup_splunk): + time.sleep(100) + search_string = """| mpreview index=netmetrics | search profiles=v3profile""" result_count, metric_count = run_retried_single_search( setup_splunk, search_string, 2 ) - assert result_count == 0 - assert metric_count == 0 + assert result_count > 0 + assert metric_count > 0 def run_retried_single_search(setup_splunk, search_string, retries): diff --git a/integration_tests/values.yaml b/integration_tests/values.yaml index 10bebcb2c..8f92d8a6c 100644 --- a/integration_tests/values.yaml +++ b/integration_tests/values.yaml @@ -22,9 +22,23 @@ traps: #loadBalancerIP: The IP address in the metallb pool loadBalancerIP: ###LOAD_BALANCER_ID### worker: + poller: + replicaCount: 1 + #changed replicaCount from 4 to 1 + concurrency: 4 + prefetch: 1 + trap: + autoscaling: + enabled: false + replicaCount: 1 + concurrency: 8 + prefetch: 60 + sender: + replicaCount: 1 + concurrency: 4 + prefetch: 60 profilesReloadDelay: 1 # replicas: Number of replicas for worker container should two or more - replicaCount: 1 # udpConnectionTimeout: timeout in seconds for SNMP operations #udpConnectionTimeout: 5 logLevel: "DEBUG" @@ -33,6 +47,13 @@ scheduler: customTranslations: IP-MIB: icmpOutEchoReps: myCustomName1 + profiles: | + v3profile: + frequency: 5 + varBinds: + - ['IF-MIB'] + - ['TCP-MIB'] + - ['UDP-MIB'] # profiles: | # generic_switch: # frequency: 60 @@ -42,11 +63,12 @@ scheduler: # - ['IF-MIB'] # - ['TCP-MIB'] poller: - # usernameSecrets: + usernameSecrets: + - sv3poller # - sc4snmp-hlab-sha-aes # - sc4snmp-hlab-sha-des inventory: | - address,port,version,community,secret,securityEngine,walk_interval,profiles,SmartProfiles,delete + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete ###LOAD_BALANCER_ID###,,2c,public,,,600,,, sim: # sim must be enabled if you want to use signalFx @@ -60,12 +82,7 @@ mongodb: storageClass: "microk8s-hostpath" volumePermissions: enabled: true -rabbitmq: - pdb: - create: true - replicaCount: 1 - persistence: - enabled: true - storageClass: "microk8s-hostpath" - volumePermissions: - enabled: true \ No newline at end of file +redis: + architecture: standalone + auth: + enabled: false \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 532cba09d..a7f4c72c3 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -12,6 +12,10 @@ markdown_extensions: - sane_lists - codehilite +plugins: + - mkdocs-video: + is_video: True + theme: name: "material" palette: @@ -29,13 +33,14 @@ nav: - Install SC4SNMP: "gettingstarted/sc4snmp-installation.md" - High Availability: ha.md - Configuration: + - Configuring Profiles: "configuration/configuring-profiles.md" - Deployment: "configuration/deployment-configuration.md" - Poller: "configuration/poller-configuration.md" - Trap: "configuration/trap-configuration.md" - Scheduler: "configuration/scheduler-configuration.md" - Worker: "configuration/worker-configuration.md" - Mongo DB: "configuration/mongo-configuration.md" - - RabbitMQ: "configuration/rabbitmq-configuration.md" + - Redis: "configuration/redis-configuration.md" - SNMPv3 configuration: "configuration/snmpv3-configuration.md" - Splunk Infrastructure Monitoring: "configuration/sim-configuration.md" - Planning: "planning.md" @@ -44,4 +49,8 @@ nav: - Upgrade SC4SNMP: "upgrade.md" - Troubleshooting : "bestpractices.md" - Releases: "releases.md" + - Offline Installation: + - Install Splunk OpenTelemetry Collector for Kubernetes: "offlineinstallation/offline-sck.md" + - Install SC4SNMP: "offlineinstallation/offline-sc4snmp.md" + - SC4SNMP lightweight installation: "small-environment.md" diff --git a/poetry.lock b/poetry.lock index bbfdc4720..240a7eb14 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,13 +1,13 @@ [[package]] name = "amqp" -version = "5.0.9" +version = "5.1.1" description = "Low-level AMQP client for Python (fork of amqplib)." category = "main" optional = false python-versions = ">=3.6" [package.dependencies] -vine = "5.0.0" +vine = ">=5.0.0" [[package]] name = "appdirs" @@ -17,6 +17,14 @@ category = "main" optional = false python-versions = "*" +[[package]] +name = "async-timeout" +version = "4.0.2" +description = "Timeout context manager for asyncio programs" +category = "main" +optional = false +python-versions = ">=3.6" + [[package]] name = "atomicwrites" version = "1.4.0" @@ -47,14 +55,6 @@ category = "main" optional = false python-versions = "*" -[[package]] -name = "blinker" -version = "1.4" -description = "Fast, simple object-to-object and broadcast signaling" -category = "main" -optional = false -python-versions = "*" - [[package]] name = "cattrs" version = "1.10.0" @@ -68,11 +68,11 @@ attrs = ">=20" [[package]] name = "celery" -version = "5.2.3" +version = "5.2.7" description = "Distributed Task Queue." category = "main" optional = false -python-versions = ">=3.7," +python-versions = ">=3.7" [package.dependencies] billiard = ">=3.6.4.0,<4.0" @@ -119,8 +119,8 @@ zookeeper = ["kazoo (>=1.3.1)"] zstd = ["zstandard"] [[package]] -name = "celerybeat-mongo" -version = "0.2.2" +name = "celery-redbeat" +version = "2.0.0" description = "" category = "main" optional = false @@ -128,28 +128,28 @@ python-versions = "*" develop = false [package.dependencies] -blinker = "*" -celery = "*" -mongoengine = "*" -pymongo = "*" +celery = ">=4.2" +python-dateutil = "*" +redis = ">=3.2" +tenacity = "*" [package.source] type = "git" -url = "https://github.com/splunk/celerybeat-mongo" +url = "https://github.com/splunk/redbeat" reference = "main" -resolved_reference = "68b7afe958ccb59a1a1095e2a51c1fd26f1f7ecb" +resolved_reference = "0ac78722b100d3f22e14d28d0e04ce154f56d1bd" [[package]] name = "certifi" -version = "2021.10.8" +version = "2022.6.15" description = "Python package for providing Mozilla's CA Bundle." category = "main" optional = false -python-versions = "*" +python-versions = ">=3.6" [[package]] name = "charset-normalizer" -version = "2.0.10" +version = "2.0.12" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." category = "main" optional = false @@ -160,11 +160,11 @@ unicode_backport = ["unicodedata2"] [[package]] name = "click" -version = "8.0.3" +version = "8.1.3" description = "Composable command line interface toolkit" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} @@ -209,7 +209,7 @@ six = "*" [[package]] name = "colorama" -version = "0.4.4" +version = "0.4.5" description = "Cross-platform colored terminal text." category = "main" optional = false @@ -217,14 +217,14 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] name = "coverage" -version = "6.3.2" +version = "6.4.1" description = "Code coverage measurement for Python" category = "dev" optional = false python-versions = ">=3.7" [package.dependencies] -tomli = {version = "*", optional = true, markers = "extra == \"toml\""} +tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} [package.extras] toml = ["tomli"] @@ -255,26 +255,11 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" DNSSEC = ["pycryptodome", "ecdsa (>=0.13)"] IDNA = ["idna (>=2.1)"] -[[package]] -name = "flower" -version = "1.0.0" -description = "Celery Flower" -category = "main" -optional = false -python-versions = "*" - -[package.dependencies] -celery = ">=5.0.5" -humanize = "*" -prometheus-client = ">=0.8.0" -pytz = "*" -tornado = ">=5.0.0,<7.0.0" - [[package]] name = "ghp-import" -version = "2.0.2" +version = "2.1.0" description = "Copy your docs directly to the gh-pages branch." -category = "dev" +category = "main" optional = false python-versions = "*" @@ -284,17 +269,6 @@ python-dateutil = ">=2.8.1" [package.extras] dev = ["twine", "markdown", "flake8", "wheel"] -[[package]] -name = "humanize" -version = "3.13.1" -description = "Python humanize utilities" -category = "main" -optional = false -python-versions = ">=3.6" - -[package.extras] -tests = ["freezegun", "pytest", "pytest-cov"] - [[package]] name = "idna" version = "3.3" @@ -305,9 +279,9 @@ python-versions = ">=3.5" [[package]] name = "importlib-metadata" -version = "4.10.1" +version = "4.11.4" description = "Read metadata from Python packages" -category = "dev" +category = "main" optional = false python-versions = ">=3.7" @@ -315,9 +289,9 @@ python-versions = ">=3.7" zipp = ">=0.5" [package.extras] -docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] +docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)"] perf = ["ipython"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)", "importlib-resources (>=1.3)"] [[package]] name = "iniconfig" @@ -329,11 +303,11 @@ python-versions = "*" [[package]] name = "jinja2" -version = "3.0.3" +version = "3.1.2" description = "A very fast and expressive template engine." -category = "dev" +category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] MarkupSafe = ">=2.0" @@ -351,7 +325,7 @@ python-versions = "*" [[package]] name = "kombu" -version = "5.2.3" +version = "5.2.4" description = "Messaging library for Python." category = "main" optional = false @@ -379,9 +353,9 @@ zookeeper = ["kazoo (>=1.3.1)"] [[package]] name = "markdown" -version = "3.3.6" +version = "3.3.7" description = "Python implementation of Markdown." -category = "dev" +category = "main" optional = false python-versions = ">=3.6" @@ -393,17 +367,17 @@ testing = ["coverage", "pyyaml"] [[package]] name = "markupsafe" -version = "2.0.1" +version = "2.1.1" description = "Safely add untrusted strings to HTML/XML markup." -category = "dev" +category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [[package]] name = "mergedeep" version = "1.3.4" description = "A deep merge function for 🐍." -category = "dev" +category = "main" optional = false python-versions = ">=3.6" @@ -427,17 +401,17 @@ test = ["coverage", "flake8 (>=3.0)", "shtab"] [[package]] name = "mkdocs" -version = "1.2.3" +version = "1.3.0" description = "Project documentation with Markdown." -category = "dev" +category = "main" optional = false python-versions = ">=3.6" [package.dependencies] click = ">=3.3" ghp-import = ">=1.0" -importlib-metadata = ">=3.10" -Jinja2 = ">=2.10.1" +importlib-metadata = ">=4.3" +Jinja2 = ">=2.10.2" Markdown = ">=3.2.1" mergedeep = ">=1.3.4" packaging = ">=20.5" @@ -450,19 +424,19 @@ i18n = ["babel (>=2.9.0)"] [[package]] name = "mkdocs-material" -version = "8.2.6" -description = "A Material Design theme for MkDocs" +version = "8.3.7" +description = "Documentation that simply works" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] -jinja2 = ">=2.11.1" +jinja2 = ">=3.0.2" markdown = ">=3.2" -mkdocs = ">=1.2.3" -mkdocs-material-extensions = ">=1.0" -pygments = ">=2.10" -pymdown-extensions = ">=9.0" +mkdocs = ">=1.3.0" +mkdocs-material-extensions = ">=1.0.3" +pygments = ">=2.12" +pymdown-extensions = ">=9.4" [[package]] name = "mkdocs-material-extensions" @@ -472,6 +446,17 @@ category = "dev" optional = false python-versions = ">=3.6" +[[package]] +name = "mkdocs-video" +version = "1.3.0" +description = "" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +mkdocs = ">=1.1.0,<2" + [[package]] name = "mongoengine" version = "0.24.1" @@ -507,7 +492,7 @@ Deprecated = ">=1.2.6" [[package]] name = "opentelemetry-exporter-jaeger-thrift" -version = "1.10.0" +version = "1.11.0" description = "Jaeger Thrift Exporter for OpenTelemetry" category = "main" optional = false @@ -586,7 +571,7 @@ python-versions = ">=3.6" name = "packaging" version = "21.3" description = "Core utilities for Python packages" -category = "dev" +category = "main" optional = false python-versions = ">=3.6" @@ -595,7 +580,7 @@ pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" [[package]] name = "pika" -version = "1.2.0" +version = "1.2.1" description = "Pika Python AMQP Client Library" category = "main" optional = false @@ -626,20 +611,9 @@ category = "main" optional = false python-versions = "*" -[[package]] -name = "prometheus-client" -version = "0.12.0" -description = "Python client for the Prometheus monitoring system." -category = "main" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" - -[package.extras] -twisted = ["twisted"] - [[package]] name = "prompt-toolkit" -version = "3.0.24" +version = "3.0.29" description = "Library for building powerful interactive command lines in Python" category = "main" optional = false @@ -658,7 +632,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] name = "pycryptodomex" -version = "3.13.0" +version = "3.14.1" description = "Cryptographic library for Python" category = "main" optional = false @@ -666,8 +640,8 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] name = "pydantic" -version = "1.9.0" -description = "Data validation and settings management using python 3.6 type hinting" +version = "1.9.1" +description = "Data validation and settings management using python type hints" category = "main" optional = false python-versions = ">=3.6.1" @@ -681,22 +655,22 @@ email = ["email-validator (>=1.0.3)"] [[package]] name = "pygments" -version = "2.11.2" +version = "2.12.0" description = "Pygments is a syntax highlighting package written in Python." category = "dev" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" [[package]] name = "pymdown-extensions" -version = "9.1" +version = "9.5" description = "Extension pack for Python Markdown." category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] -Markdown = ">=3.2" +markdown = ">=3.2" [[package]] name = "pymongo" @@ -721,14 +695,14 @@ zstd = ["zstandard"] [[package]] name = "pyparsing" -version = "3.0.7" -description = "Python parsing module" -category = "dev" +version = "3.0.9" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.6.8" [package.extras] -diagrams = ["jinja2", "railroad-diagrams"] +diagrams = ["railroad-diagrams", "jinja2"] [[package]] name = "pyrate-limiter" @@ -752,7 +726,7 @@ python-versions = ">=3.8,<4.0" [[package]] name = "pysnmp-pysmi" -version = "1.1.8" +version = "1.1.10" description = "" category = "main" optional = false @@ -764,7 +738,7 @@ requests = ">=2.26.0,<3.0.0" [[package]] name = "pysnmplib" -version = "5.0.10" +version = "5.0.17" description = "" category = "main" optional = false @@ -777,7 +751,7 @@ pysnmp-pysmi = ">=1.0.4,<2.0.0" [[package]] name = "pytest" -version = "7.1.1" +version = "7.1.2" description = "pytest: simple powerful testing with Python" category = "dev" optional = false @@ -815,7 +789,7 @@ testing = ["fields", "hunter", "process-tests", "six", "pytest-xdist", "virtuale name = "python-dateutil" version = "2.8.2" description = "Extensions to the standard Python datetime module" -category = "dev" +category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" @@ -835,7 +809,7 @@ cli = ["click (>=5.0)"] [[package]] name = "pytz" -version = "2021.3" +version = "2022.1" description = "World timezone definitions, modern and historical" category = "main" optional = false @@ -853,34 +827,51 @@ python-versions = ">=3.6" name = "pyyaml-env-tag" version = "0.1" description = "A custom YAML tag for referencing environment variables in YAML files. " -category = "dev" +category = "main" optional = false python-versions = ">=3.6" [package.dependencies] pyyaml = "*" +[[package]] +name = "redis" +version = "4.3.3" +description = "Python client for Redis database and key-value store" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +async-timeout = ">=4.0.2" +deprecated = ">=1.2.3" +packaging = ">=20.4" + +[package.extras] +hiredis = ["hiredis (>=1.0.0)"] +ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==20.0.1)", "requests (>=2.26.0)"] + [[package]] name = "requests" -version = "2.27.1" +version = "2.28.0" description = "Python HTTP for Humans." category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +python-versions = ">=3.7, <4" [package.dependencies] certifi = ">=2017.4.17" -charset-normalizer = {version = ">=2.0.0,<2.1.0", markers = "python_version >= \"3\""} -idna = {version = ">=2.5,<4", markers = "python_version >= \"3\""} +charset-normalizer = ">=2.0.0,<2.1.0" +idna = ">=2.5,<4" urllib3 = ">=1.21.1,<1.27" [package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] use_chardet_on_py3 = ["chardet (>=3.0.2,<5)"] [[package]] name = "requests-cache" -version = "0.9.3" +version = "0.9.4" description = "A transparent persistent cache for the requests library" category = "main" optional = false @@ -896,9 +887,9 @@ urllib3 = ">=1.25.5,<2.0.0" [package.extras] dynamodb = ["boto3 (>=1.15,<2.0)", "botocore (>=1.18,<2.0)"] -all = ["boto3 (>=1.15,<2.0)", "botocore (>=1.18,<2.0)", "pymongo (>=3,<5)", "redis (>=3,<5)", "itsdangerous (>=2.0,<3.0)", "pyyaml (>=5.4)", "ujson (>=4.0)"] -mongodb = ["pymongo (>=3,<5)"] -redis = ["redis (>=3,<5)"] +all = ["boto3 (>=1.15,<2.0)", "botocore (>=1.18,<2.0)", "pymongo (>=3)", "redis (>=3)", "itsdangerous (>=2.0,<3.0)", "pyyaml (>=5.4)", "ujson (>=4.0)"] +mongodb = ["pymongo (>=3)"] +redis = ["redis (>=3)"] bson = ["bson (>=0.5)"] security = ["itsdangerous (>=2.0,<3.0)"] yaml = ["pyyaml (>=5.4)"] @@ -959,9 +950,20 @@ category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +[[package]] +name = "tenacity" +version = "8.0.1" +description = "Retry code until it succeeds" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.extras] +doc = ["reno", "sphinx", "tornado (>=4.5)"] + [[package]] name = "thrift" -version = "0.15.0" +version = "0.16.0" description = "Python bindings for the Apache Thrift RPC system" category = "main" optional = false @@ -977,27 +979,19 @@ twisted = ["twisted"] [[package]] name = "tomli" -version = "2.0.0" +version = "2.0.1" description = "A lil' TOML parser" category = "dev" optional = false python-versions = ">=3.7" -[[package]] -name = "tornado" -version = "6.1" -description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." -category = "main" -optional = false -python-versions = ">= 3.5" - [[package]] name = "typing-extensions" -version = "4.0.1" -description = "Backported and Experimental Type Hints for Python 3.6+" +version = "4.2.0" +description = "Backported and Experimental Type Hints for Python 3.7+" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [[package]] name = "url-normalize" @@ -1012,14 +1006,14 @@ six = "*" [[package]] name = "urllib3" -version = "1.26.8" +version = "1.26.9" description = "HTTP library with thread-safe connection pooling, file post, and more." category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" [package.extras] -brotli = ["brotlipy (>=0.6.0)"] +brotli = ["brotlicffi (>=0.8.0)", "brotli (>=1.0.9)", "brotlipy (>=0.6.0)"] secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] @@ -1052,6 +1046,7 @@ python-versions = ">=3.6" develop = false [package.dependencies] +redis = {version = "*", optional = true, markers = "extra == \"redis\""} requests = ">=2.21.0" [package.extras] @@ -1066,15 +1061,15 @@ websockets = ["websocket-server"] [package.source] type = "git" -url = "https://github.com/rfaircloth-splunk/wait-for-dep.git" +url = "https://github.com/omrozowicz-splunk/wait-for-dep.git" reference = "master" -resolved_reference = "cfa6b17c11017bf96b70f7812874b3611f1bd4e4" +resolved_reference = "b880ba68ba0da8ee5671b48dd58788002209946b" [[package]] name = "watchdog" -version = "2.1.6" +version = "2.1.9" description = "Filesystem events monitoring" -category = "dev" +category = "main" optional = false python-versions = ">=3.6" @@ -1091,7 +1086,7 @@ python-versions = "*" [[package]] name = "wrapt" -version = "1.13.3" +version = "1.14.1" description = "Module for decorators, wrappers and monkey patching." category = "main" optional = false @@ -1099,30 +1094,34 @@ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" [[package]] name = "zipp" -version = "3.7.0" +version = "3.8.0" description = "Backport of pathlib-compatible object wrapper for zip files" -category = "dev" +category = "main" optional = false python-versions = ">=3.7" [package.extras] -docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] +docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)"] [metadata] lock-version = "1.1" python-versions = "^3.8" -content-hash = "b8aa6d79d4a4e18680725ae53bb7d7c5f02081765e5816a91c728071870df45d" +content-hash = "1e0066fdc696c5f6341165a37225d6525a9ce91320fd492aabc140dd9c996180" [metadata.files] amqp = [ - {file = "amqp-5.0.9-py3-none-any.whl", hash = "sha256:9cd81f7b023fc04bbb108718fbac674f06901b77bfcdce85b10e2a5d0ee91be5"}, - {file = "amqp-5.0.9.tar.gz", hash = "sha256:1e5f707424e544078ca196e72ae6a14887ce74e02bd126be54b7c03c971bef18"}, + {file = "amqp-5.1.1-py3-none-any.whl", hash = "sha256:6f0956d2c23d8fa6e7691934d8c3930eadb44972cbbd1a7ae3a520f735d43359"}, + {file = "amqp-5.1.1.tar.gz", hash = "sha256:2c1b13fecc0893e946c65cbd5f36427861cffa4ea2201d8f6fca22e2a373b5e2"}, ] appdirs = [ {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, ] +async-timeout = [ + {file = "async-timeout-4.0.2.tar.gz", hash = "sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15"}, + {file = "async_timeout-4.0.2-py3-none-any.whl", hash = "sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c"}, +] atomicwrites = [ {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"}, {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"}, @@ -1135,29 +1134,26 @@ billiard = [ {file = "billiard-3.6.4.0-py3-none-any.whl", hash = "sha256:87103ea78fa6ab4d5c751c4909bcff74617d985de7fa8b672cf8618afd5a875b"}, {file = "billiard-3.6.4.0.tar.gz", hash = "sha256:299de5a8da28a783d51b197d496bef4f1595dd023a93a4f59dde1886ae905547"}, ] -blinker = [ - {file = "blinker-1.4.tar.gz", hash = "sha256:471aee25f3992bd325afa3772f1063dbdbbca947a041b8b89466dc00d606f8b6"}, -] cattrs = [ {file = "cattrs-1.10.0-py3-none-any.whl", hash = "sha256:35dd9063244263e63bd0bd24ea61e3015b00272cead084b2c40d788b0f857c46"}, {file = "cattrs-1.10.0.tar.gz", hash = "sha256:211800f725cdecedcbcf4c753bbd22d248312b37d130f06045434acb7d9b34e1"}, ] celery = [ - {file = "celery-5.2.3-py3-none-any.whl", hash = "sha256:8aacd02fc23a02760686d63dde1eb0daa9f594e735e73ea8fb15c2ff15cb608c"}, - {file = "celery-5.2.3.tar.gz", hash = "sha256:e2cd41667ad97d4f6a2f4672d1c6a6ebada194c619253058b5f23704aaadaa82"}, + {file = "celery-5.2.7-py3-none-any.whl", hash = "sha256:138420c020cd58d6707e6257b6beda91fd39af7afde5d36c6334d175302c0e14"}, + {file = "celery-5.2.7.tar.gz", hash = "sha256:fafbd82934d30f8a004f81e8f7a062e31413a23d444be8ee3326553915958c6d"}, ] -celerybeat-mongo = [] +celery-redbeat = [] certifi = [ - {file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"}, - {file = "certifi-2021.10.8.tar.gz", hash = "sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872"}, + {file = "certifi-2022.6.15-py3-none-any.whl", hash = "sha256:fe86415d55e84719d75f8b69414f6438ac3547d2078ab91b67e779ef69378412"}, + {file = "certifi-2022.6.15.tar.gz", hash = "sha256:84c85a9078b11105f04f3036a9482ae10e4621616db313fe045dd24743a0820d"}, ] charset-normalizer = [ - {file = "charset-normalizer-2.0.10.tar.gz", hash = "sha256:876d180e9d7432c5d1dfd4c5d26b72f099d503e8fcc0feb7532c9289be60fcbd"}, - {file = "charset_normalizer-2.0.10-py3-none-any.whl", hash = "sha256:cb957888737fc0bbcd78e3df769addb41fd1ff8cf950dc9e7ad7793f1bf44455"}, + {file = "charset-normalizer-2.0.12.tar.gz", hash = "sha256:2857e29ff0d34db842cd7ca3230549d1a697f96ee6d3fb071cfa6c7393832597"}, + {file = "charset_normalizer-2.0.12-py3-none-any.whl", hash = "sha256:6881edbebdb17b39b4eaaa821b438bf6eddffb4468cf344f09f89def34a8b1df"}, ] click = [ - {file = "click-8.0.3-py3-none-any.whl", hash = "sha256:353f466495adaeb40b6b5f592f9f91cb22372351c84caeb068132442a4518ef3"}, - {file = "click-8.0.3.tar.gz", hash = "sha256:410e932b050f5eed773c4cda94de75971c89cdb3155a72a0831139a79e5ecb5b"}, + {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, + {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, ] click-didyoumean = [ {file = "click-didyoumean-0.3.0.tar.gz", hash = "sha256:f184f0d851d96b6d29297354ed981b7dd71df7ff500d82fa6d11f0856bee8035"}, @@ -1172,51 +1168,51 @@ click-repl = [ {file = "click_repl-0.2.0-py3-none-any.whl", hash = "sha256:94b3fbbc9406a236f176e0506524b2937e4b23b6f4c0c0b2a0a83f8a64e9194b"}, ] colorama = [ - {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"}, - {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"}, + {file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"}, + {file = "colorama-0.4.5.tar.gz", hash = "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"}, ] coverage = [ - {file = "coverage-6.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9b27d894748475fa858f9597c0ee1d4829f44683f3813633aaf94b19cb5453cf"}, - {file = "coverage-6.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37d1141ad6b2466a7b53a22e08fe76994c2d35a5b6b469590424a9953155afac"}, - {file = "coverage-6.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9987b0354b06d4df0f4d3e0ec1ae76d7ce7cbca9a2f98c25041eb79eec766f1"}, - {file = "coverage-6.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:26e2deacd414fc2f97dd9f7676ee3eaecd299ca751412d89f40bc01557a6b1b4"}, - {file = "coverage-6.3.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4dd8bafa458b5c7d061540f1ee9f18025a68e2d8471b3e858a9dad47c8d41903"}, - {file = "coverage-6.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:46191097ebc381fbf89bdce207a6c107ac4ec0890d8d20f3360345ff5976155c"}, - {file = "coverage-6.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6f89d05e028d274ce4fa1a86887b071ae1755082ef94a6740238cd7a8178804f"}, - {file = "coverage-6.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:58303469e9a272b4abdb9e302a780072c0633cdcc0165db7eec0f9e32f901e05"}, - {file = "coverage-6.3.2-cp310-cp310-win32.whl", hash = "sha256:2fea046bfb455510e05be95e879f0e768d45c10c11509e20e06d8fcaa31d9e39"}, - {file = "coverage-6.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:a2a8b8bcc399edb4347a5ca8b9b87e7524c0967b335fbb08a83c8421489ddee1"}, - {file = "coverage-6.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f1555ea6d6da108e1999b2463ea1003fe03f29213e459145e70edbaf3e004aaa"}, - {file = "coverage-6.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5f4e1edcf57ce94e5475fe09e5afa3e3145081318e5fd1a43a6b4539a97e518"}, - {file = "coverage-6.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a15dc0a14008f1da3d1ebd44bdda3e357dbabdf5a0b5034d38fcde0b5c234b7"}, - {file = "coverage-6.3.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21b7745788866028adeb1e0eca3bf1101109e2dc58456cb49d2d9b99a8c516e6"}, - {file = "coverage-6.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:8ce257cac556cb03be4a248d92ed36904a59a4a5ff55a994e92214cde15c5bad"}, - {file = "coverage-6.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b0be84e5a6209858a1d3e8d1806c46214e867ce1b0fd32e4ea03f4bd8b2e3359"}, - {file = "coverage-6.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:acf53bc2cf7282ab9b8ba346746afe703474004d9e566ad164c91a7a59f188a4"}, - {file = "coverage-6.3.2-cp37-cp37m-win32.whl", hash = "sha256:8bdde1177f2311ee552f47ae6e5aa7750c0e3291ca6b75f71f7ffe1f1dab3dca"}, - {file = "coverage-6.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:b31651d018b23ec463e95cf10070d0b2c548aa950a03d0b559eaa11c7e5a6fa3"}, - {file = "coverage-6.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:07e6db90cd9686c767dcc593dff16c8c09f9814f5e9c51034066cad3373b914d"}, - {file = "coverage-6.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2c6dbb42f3ad25760010c45191e9757e7dce981cbfb90e42feef301d71540059"}, - {file = "coverage-6.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c76aeef1b95aff3905fb2ae2d96e319caca5b76fa41d3470b19d4e4a3a313512"}, - {file = "coverage-6.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cf5cfcb1521dc3255d845d9dca3ff204b3229401994ef8d1984b32746bb45ca"}, - {file = "coverage-6.3.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8fbbdc8d55990eac1b0919ca69eb5a988a802b854488c34b8f37f3e2025fa90d"}, - {file = "coverage-6.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ec6bc7fe73a938933d4178c9b23c4e0568e43e220aef9472c4f6044bfc6dd0f0"}, - {file = "coverage-6.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:9baff2a45ae1f17c8078452e9e5962e518eab705e50a0aa8083733ea7d45f3a6"}, - {file = "coverage-6.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fd9e830e9d8d89b20ab1e5af09b32d33e1a08ef4c4e14411e559556fd788e6b2"}, - {file = "coverage-6.3.2-cp38-cp38-win32.whl", hash = "sha256:f7331dbf301b7289013175087636bbaf5b2405e57259dd2c42fdcc9fcc47325e"}, - {file = "coverage-6.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:68353fe7cdf91f109fc7d474461b46e7f1f14e533e911a2a2cbb8b0fc8613cf1"}, - {file = "coverage-6.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b78e5afb39941572209f71866aa0b206c12f0109835aa0d601e41552f9b3e620"}, - {file = "coverage-6.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4e21876082ed887baed0146fe222f861b5815455ada3b33b890f4105d806128d"}, - {file = "coverage-6.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34626a7eee2a3da12af0507780bb51eb52dca0e1751fd1471d0810539cefb536"}, - {file = "coverage-6.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1ebf730d2381158ecf3dfd4453fbca0613e16eaa547b4170e2450c9707665ce7"}, - {file = "coverage-6.3.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd6fe30bd519694b356cbfcaca9bd5c1737cddd20778c6a581ae20dc8c04def2"}, - {file = "coverage-6.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:96f8a1cb43ca1422f36492bebe63312d396491a9165ed3b9231e778d43a7fca4"}, - {file = "coverage-6.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:dd035edafefee4d573140a76fdc785dc38829fe5a455c4bb12bac8c20cfc3d69"}, - {file = "coverage-6.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5ca5aeb4344b30d0bec47481536b8ba1181d50dbe783b0e4ad03c95dc1296684"}, - {file = "coverage-6.3.2-cp39-cp39-win32.whl", hash = "sha256:f5fa5803f47e095d7ad8443d28b01d48c0359484fec1b9d8606d0e3282084bc4"}, - {file = "coverage-6.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:9548f10d8be799551eb3a9c74bbf2b4934ddb330e08a73320123c07f95cc2d92"}, - {file = "coverage-6.3.2-pp36.pp37.pp38-none-any.whl", hash = "sha256:18d520c6860515a771708937d2f78f63cc47ab3b80cb78e86573b0a760161faf"}, - {file = "coverage-6.3.2.tar.gz", hash = "sha256:03e2a7826086b91ef345ff18742ee9fc47a6839ccd517061ef8fa1976e652ce9"}, + {file = "coverage-6.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f1d5aa2703e1dab4ae6cf416eb0095304f49d004c39e9db1d86f57924f43006b"}, + {file = "coverage-6.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4ce1b258493cbf8aec43e9b50d89982346b98e9ffdfaae8ae5793bc112fb0068"}, + {file = "coverage-6.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83c4e737f60c6936460c5be330d296dd5b48b3963f48634c53b3f7deb0f34ec4"}, + {file = "coverage-6.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84e65ef149028516c6d64461b95a8dbcfce95cfd5b9eb634320596173332ea84"}, + {file = "coverage-6.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f69718750eaae75efe506406c490d6fc5a6161d047206cc63ce25527e8a3adad"}, + {file = "coverage-6.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e57816f8ffe46b1df8f12e1b348f06d164fd5219beba7d9433ba79608ef011cc"}, + {file = "coverage-6.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:01c5615d13f3dd3aa8543afc069e5319cfa0c7d712f6e04b920431e5c564a749"}, + {file = "coverage-6.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:75ab269400706fab15981fd4bd5080c56bd5cc07c3bccb86aab5e1d5a88dc8f4"}, + {file = "coverage-6.4.1-cp310-cp310-win32.whl", hash = "sha256:a7f3049243783df2e6cc6deafc49ea123522b59f464831476d3d1448e30d72df"}, + {file = "coverage-6.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:ee2ddcac99b2d2aec413e36d7a429ae9ebcadf912946b13ffa88e7d4c9b712d6"}, + {file = "coverage-6.4.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:fb73e0011b8793c053bfa85e53129ba5f0250fdc0392c1591fd35d915ec75c46"}, + {file = "coverage-6.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:106c16dfe494de3193ec55cac9640dd039b66e196e4641fa8ac396181578b982"}, + {file = "coverage-6.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87f4f3df85aa39da00fd3ec4b5abeb7407e82b68c7c5ad181308b0e2526da5d4"}, + {file = "coverage-6.4.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:961e2fb0680b4f5ad63234e0bf55dfb90d302740ae9c7ed0120677a94a1590cb"}, + {file = "coverage-6.4.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:cec3a0f75c8f1031825e19cd86ee787e87cf03e4fd2865c79c057092e69e3a3b"}, + {file = "coverage-6.4.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:129cd05ba6f0d08a766d942a9ed4b29283aff7b2cccf5b7ce279d50796860bb3"}, + {file = "coverage-6.4.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:bf5601c33213d3cb19d17a796f8a14a9eaa5e87629a53979a5981e3e3ae166f6"}, + {file = "coverage-6.4.1-cp37-cp37m-win32.whl", hash = "sha256:269eaa2c20a13a5bf17558d4dc91a8d078c4fa1872f25303dddcbba3a813085e"}, + {file = "coverage-6.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:f02cbbf8119db68455b9d763f2f8737bb7db7e43720afa07d8eb1604e5c5ae28"}, + {file = "coverage-6.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ffa9297c3a453fba4717d06df579af42ab9a28022444cae7fa605af4df612d54"}, + {file = "coverage-6.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:145f296d00441ca703a659e8f3eb48ae39fb083baba2d7ce4482fb2723e050d9"}, + {file = "coverage-6.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d44996140af8b84284e5e7d398e589574b376fb4de8ccd28d82ad8e3bea13"}, + {file = "coverage-6.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2bd9a6fc18aab8d2e18f89b7ff91c0f34ff4d5e0ba0b33e989b3cd4194c81fd9"}, + {file = "coverage-6.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3384f2a3652cef289e38100f2d037956194a837221edd520a7ee5b42d00cc605"}, + {file = "coverage-6.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9b3e07152b4563722be523e8cd0b209e0d1a373022cfbde395ebb6575bf6790d"}, + {file = "coverage-6.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1480ff858b4113db2718848d7b2d1b75bc79895a9c22e76a221b9d8d62496428"}, + {file = "coverage-6.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:865d69ae811a392f4d06bde506d531f6a28a00af36f5c8649684a9e5e4a85c83"}, + {file = "coverage-6.4.1-cp38-cp38-win32.whl", hash = "sha256:664a47ce62fe4bef9e2d2c430306e1428ecea207ffd68649e3b942fa8ea83b0b"}, + {file = "coverage-6.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:26dff09fb0d82693ba9e6231248641d60ba606150d02ed45110f9ec26404ed1c"}, + {file = "coverage-6.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d9c80df769f5ec05ad21ea34be7458d1dc51ff1fb4b2219e77fe24edf462d6df"}, + {file = "coverage-6.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:39ee53946bf009788108b4dd2894bf1349b4e0ca18c2016ffa7d26ce46b8f10d"}, + {file = "coverage-6.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5b66caa62922531059bc5ac04f836860412f7f88d38a476eda0a6f11d4724f4"}, + {file = "coverage-6.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd180ed867e289964404051a958f7cccabdeed423f91a899829264bb7974d3d3"}, + {file = "coverage-6.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84631e81dd053e8a0d4967cedab6db94345f1c36107c71698f746cb2636c63e3"}, + {file = "coverage-6.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:8c08da0bd238f2970230c2a0d28ff0e99961598cb2e810245d7fc5afcf1254e8"}, + {file = "coverage-6.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d42c549a8f41dc103a8004b9f0c433e2086add8a719da00e246e17cbe4056f72"}, + {file = "coverage-6.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:309ce4a522ed5fca432af4ebe0f32b21d6d7ccbb0f5fcc99290e71feba67c264"}, + {file = "coverage-6.4.1-cp39-cp39-win32.whl", hash = "sha256:fdb6f7bd51c2d1714cea40718f6149ad9be6a2ee7d93b19e9f00934c0f2a74d9"}, + {file = "coverage-6.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:342d4aefd1c3e7f620a13f4fe563154d808b69cccef415415aece4c786665397"}, + {file = "coverage-6.4.1-pp36.pp37.pp38-none-any.whl", hash = "sha256:4803e7ccf93230accb928f3a68f00ffa80a88213af98ed338a57ad021ef06815"}, + {file = "coverage-6.4.1.tar.gz", hash = "sha256:4321f075095a096e70aff1d002030ee612b65a205a0a0f5b815280d5dc58100c"}, ] deprecated = [ {file = "Deprecated-1.2.13-py2.py3-none-any.whl", hash = "sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d"}, @@ -1226,115 +1222,78 @@ dnspython = [ {file = "dnspython-1.16.0-py2.py3-none-any.whl", hash = "sha256:f69c21288a962f4da86e56c4905b49d11aba7938d3d740e80d9e366ee4f1632d"}, {file = "dnspython-1.16.0.zip", hash = "sha256:36c5e8e38d4369a08b6780b7f27d790a292b2b08eea01607865bf0936c558e01"}, ] -flower = [ - {file = "flower-1.0.0-py2.py3-none-any.whl", hash = "sha256:a4fcf959881135303e98a74cc7533298b7dfeb48abcd1d90c5bd52cb789430a8"}, - {file = "flower-1.0.0.tar.gz", hash = "sha256:2e17c4fb55c569508f3bfee7fe41f44b8362d30dbdf77b604a9d9f4740fe8cbd"}, -] ghp-import = [ - {file = "ghp-import-2.0.2.tar.gz", hash = "sha256:947b3771f11be850c852c64b561c600fdddf794bab363060854c1ee7ad05e071"}, - {file = "ghp_import-2.0.2-py3-none-any.whl", hash = "sha256:5f8962b30b20652cdffa9c5a9812f7de6bcb56ec475acac579807719bf242c46"}, -] -humanize = [ - {file = "humanize-3.13.1-py3-none-any.whl", hash = "sha256:a6f7cc1597db69a4e571ad5e19b4da07ee871da5a9de2b233dbfab02d98e9754"}, - {file = "humanize-3.13.1.tar.gz", hash = "sha256:12f113f2e369dac7f35d3823f49262934f4a22a53a6d3d4c86b736f50db88c7b"}, + {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, + {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, ] idna = [ {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"}, {file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"}, ] importlib-metadata = [ - {file = "importlib_metadata-4.10.1-py3-none-any.whl", hash = "sha256:899e2a40a8c4a1aec681feef45733de8a6c58f3f6a0dbed2eb6574b4387a77b6"}, - {file = "importlib_metadata-4.10.1.tar.gz", hash = "sha256:951f0d8a5b7260e9db5e41d429285b5f451e928479f19d80818878527d36e95e"}, + {file = "importlib_metadata-4.11.4-py3-none-any.whl", hash = "sha256:c58c8eb8a762858f49e18436ff552e83914778e50e9d2f1660535ffb364552ec"}, + {file = "importlib_metadata-4.11.4.tar.gz", hash = "sha256:5d26852efe48c0a32b0509ffbc583fda1a2266545a78d104a6f4aff3db17d700"}, ] iniconfig = [ {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"}, {file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"}, ] jinja2 = [ - {file = "Jinja2-3.0.3-py3-none-any.whl", hash = "sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8"}, - {file = "Jinja2-3.0.3.tar.gz", hash = "sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7"}, + {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, + {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, ] json-log-formatter = [ {file = "JSON-log-formatter-0.5.1.tar.gz", hash = "sha256:53246bcc5be5452bd46020326c50556ac6d35be20f00733d7235e0ca90d0c5f1"}, ] kombu = [ - {file = "kombu-5.2.3-py3-none-any.whl", hash = "sha256:eeaeb8024f3a5cfc71c9250e45cddb8493f269d74ada2f74909a93c59c4b4179"}, - {file = "kombu-5.2.3.tar.gz", hash = "sha256:81a90c1de97e08d3db37dbf163eaaf667445e1068c98bfd89f051a40e9f6dbbd"}, + {file = "kombu-5.2.4-py3-none-any.whl", hash = "sha256:8b213b24293d3417bcf0d2f5537b7f756079e3ea232a8386dcc89a59fd2361a4"}, + {file = "kombu-5.2.4.tar.gz", hash = "sha256:37cee3ee725f94ea8bb173eaab7c1760203ea53bbebae226328600f9d2799610"}, ] markdown = [ - {file = "Markdown-3.3.6-py3-none-any.whl", hash = "sha256:9923332318f843411e9932237530df53162e29dc7a4e2b91e35764583c46c9a3"}, - {file = "Markdown-3.3.6.tar.gz", hash = "sha256:76df8ae32294ec39dcf89340382882dfa12975f87f45c3ed1ecdb1e8cefc7006"}, + {file = "Markdown-3.3.7-py3-none-any.whl", hash = "sha256:f5da449a6e1c989a4cea2631aa8ee67caa5a2ef855d551c88f9e309f4634c621"}, + {file = "Markdown-3.3.7.tar.gz", hash = "sha256:cbb516f16218e643d8e0a95b309f77eb118cb138d39a4f27851e6a63581db874"}, ] markupsafe = [ - {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:36bc903cbb393720fad60fc28c10de6acf10dc6cc883f3e24ee4012371399a38"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:add36cb2dbb8b736611303cd3bfcee00afd96471b09cda130da3581cbdc56a6d"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:168cd0a3642de83558a5153c8bd34f175a9a6e7f6dc6384b9655d2697312a646"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4dc8f9fb58f7364b63fd9f85013b780ef83c11857ae79f2feda41e270468dd9b"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20dca64a3ef2d6e4d5d615a3fd418ad3bde77a47ec8a23d984a12b5b4c74491a"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cdfba22ea2f0029c9261a4bd07e830a8da012291fbe44dc794e488b6c9bb353a"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-win32.whl", hash = "sha256:99df47edb6bda1249d3e80fdabb1dab8c08ef3975f69aed437cb69d0a5de1e28"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:e0f138900af21926a02425cf736db95be9f4af72ba1bb21453432a07f6082134"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf5d821ffabf0ef3533c39c518f3357b171a1651c1ff6827325e4489b0e46c3c"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0d4b31cc67ab36e3392bbf3862cfbadac3db12bdd8b02a2731f509ed5b829724"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:baa1a4e8f868845af802979fcdbf0bb11f94f1cb7ced4c4b8a351bb60d108145"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:deb993cacb280823246a026e3b2d81c493c53de6acfd5e6bfe31ab3402bb37dd"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:63f3268ba69ace99cab4e3e3b5840b03340efed0948ab8f78d2fd87ee5442a4f"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:8d206346619592c6200148b01a2142798c989edcb9c896f9ac9722a99d4e77e6"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-win32.whl", hash = "sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9936f0b261d4df76ad22f8fee3ae83b60d7c3e871292cd42f40b81b70afae85"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2a7d351cbd8cfeb19ca00de495e224dea7e7d919659c2841bbb7f420ad03e2d6"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:60bf42e36abfaf9aff1f50f52644b336d4f0a3fd6d8a60ca0d054ac9f713a864"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d6c7ebd4e944c85e2c3421e612a7057a2f48d478d79e61800d81468a8d842207"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f0567c4dc99f264f49fe27da5f735f414c4e7e7dd850cfd8e69f0862d7c74ea9"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:89c687013cb1cd489a0f0ac24febe8c7a666e6e221b783e53ac50ebf68e45d86"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-win32.whl", hash = "sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5bb28c636d87e840583ee3adeb78172efc47c8b26127267f54a9c0ec251d41a9"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fcf051089389abe060c9cd7caa212c707e58153afa2c649f00346ce6d260f1b"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5855f8438a7d1d458206a2466bf82b0f104a3724bf96a1c781ab731e4201731a"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3dd007d54ee88b46be476e293f48c85048603f5f516008bee124ddd891398ed6"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:aca6377c0cb8a8253e493c6b451565ac77e98c2951c45f913e0b52facdcff83f"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:04635854b943835a6ea959e948d19dcd311762c5c0c6e1f0e16ee57022669194"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6300b8454aa6930a24b9618fbb54b5a68135092bc666f7b06901f897fa5c2fee"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-win32.whl", hash = "sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c47adbc92fc1bb2b3274c4b3a43ae0e4573d9fbff4f54cd484555edbf030baf1"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:37205cac2a79194e3750b0af2a5720d95f786a55ce7df90c3af697bfa100eaac"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1f2ade76b9903f39aa442b4aadd2177decb66525062db244b35d71d0ee8599b6"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4296f2b1ce8c86a6aea78613c34bb1a672ea0e3de9c6ba08a960efe0b0a09047"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f02365d4e99430a12647f09b6cc8bab61a6564363f313126f775eb4f6ef798e"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5b6d930f030f8ed98e3e6c98ffa0652bdb82601e7a016ec2ab5d7ff23baa78d1"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-win32.whl", hash = "sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8"}, - {file = "MarkupSafe-2.0.1.tar.gz", hash = "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a"}, + {file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812"}, + {file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a"}, + {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a49907dd8420c5685cfa064a1335b6754b74541bbb3706c259c02ed65b644b3e"}, + {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c1bfff05d95783da83491be968e8fe789263689c02724e0c691933c52994f5"}, + {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7bd98b796e2b6553da7225aeb61f447f80a1ca64f41d83612e6139ca5213aa4"}, + {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b09bf97215625a311f669476f44b8b318b075847b49316d3e28c08e41a7a573f"}, + {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:694deca8d702d5db21ec83983ce0bb4b26a578e71fbdbd4fdcd387daa90e4d5e"}, + {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:efc1913fd2ca4f334418481c7e595c00aad186563bbc1ec76067848c7ca0a933"}, + {file = "MarkupSafe-2.1.1-cp310-cp310-win32.whl", hash = "sha256:4a33dea2b688b3190ee12bd7cfa29d39c9ed176bda40bfa11099a3ce5d3a7ac6"}, + {file = "MarkupSafe-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:dda30ba7e87fbbb7eab1ec9f58678558fd9a6b8b853530e176eabd064da81417"}, + {file = "MarkupSafe-2.1.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:671cd1187ed5e62818414afe79ed29da836dde67166a9fac6d435873c44fdd02"}, + {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3799351e2336dc91ea70b034983ee71cf2f9533cdff7c14c90ea126bfd95d65a"}, + {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e72591e9ecd94d7feb70c1cbd7be7b3ebea3f548870aa91e2732960fa4d57a37"}, + {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6fbf47b5d3728c6aea2abb0589b5d30459e369baa772e0f37a0320185e87c980"}, + {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d5ee4f386140395a2c818d149221149c54849dfcfcb9f1debfe07a8b8bd63f9a"}, + {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:bcb3ed405ed3222f9904899563d6fc492ff75cce56cba05e32eff40e6acbeaa3"}, + {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e1c0b87e09fa55a220f058d1d49d3fb8df88fbfab58558f1198e08c1e1de842a"}, + {file = "MarkupSafe-2.1.1-cp37-cp37m-win32.whl", hash = "sha256:8dc1c72a69aa7e082593c4a203dcf94ddb74bb5c8a731e4e1eb68d031e8498ff"}, + {file = "MarkupSafe-2.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:97a68e6ada378df82bc9f16b800ab77cbf4b2fada0081794318520138c088e4a"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e8c843bbcda3a2f1e3c2ab25913c80a3c5376cd00c6e8c4a86a89a28c8dc5452"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0212a68688482dc52b2d45013df70d169f542b7394fc744c02a57374a4207003"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e576a51ad59e4bfaac456023a78f6b5e6e7651dcd383bcc3e18d06f9b55d6d1"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b9fe39a2ccc108a4accc2676e77da025ce383c108593d65cc909add5c3bd601"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96e37a3dc86e80bf81758c152fe66dbf60ed5eca3d26305edf01892257049925"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6d0072fea50feec76a4c418096652f2c3238eaa014b2f94aeb1d56a66b41403f"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:089cf3dbf0cd6c100f02945abeb18484bd1ee57a079aefd52cffd17fba910b88"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6a074d34ee7a5ce3effbc526b7083ec9731bb3cbf921bbe1d3005d4d2bdb3a63"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-win32.whl", hash = "sha256:421be9fbf0ffe9ffd7a378aafebbf6f4602d564d34be190fc19a193232fd12b1"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e04e26803c9c3851c931eac40c695602c6295b8d432cbe78609649ad9bd2da8a"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b87db4360013327109564f0e591bd2a3b318547bcef31b468a92ee504d07ae4f"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99a2a507ed3ac881b975a2976d59f38c19386d128e7a9a18b7df6fff1fd4c1d6"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56442863ed2b06d19c37f94d999035e15ee982988920e12a5b4ba29b62ad1f77"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ce11ee3f23f79dbd06fb3d63e2f6af7b12db1d46932fe7bd8afa259a5996603"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:33b74d289bd2f5e527beadcaa3f401e0df0a89927c1559c8566c066fa4248ab7"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:43093fb83d8343aac0b1baa75516da6092f58f41200907ef92448ecab8825135"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8e3dcf21f367459434c18e71b2a9532d96547aef8a871872a5bd69a715c15f96"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-win32.whl", hash = "sha256:d4306c36ca495956b6d568d276ac11fdd9c30a36f1b6eb928070dc5360b22e1c"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:46d00d6cfecdde84d40e572d63735ef81423ad31184100411e6e3388d405e247"}, + {file = "MarkupSafe-2.1.1.tar.gz", hash = "sha256:7f91197cc9e48f989d12e4e6fbc46495c446636dfc81b9ccf50bb0ec74b91d4b"}, ] mergedeep = [ {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, @@ -1345,17 +1304,21 @@ mike = [ {file = "mike-1.1.2.tar.gz", hash = "sha256:56c3f1794c2d0b5fdccfa9b9487beb013ca813de2e3ad0744724e9d34d40b77b"}, ] mkdocs = [ - {file = "mkdocs-1.2.3-py3-none-any.whl", hash = "sha256:a1fa8c2d0c1305d7fc2b9d9f607c71778572a8b110fb26642aa00296c9e6d072"}, - {file = "mkdocs-1.2.3.tar.gz", hash = "sha256:89f5a094764381cda656af4298727c9f53dc3e602983087e1fe96ea1df24f4c1"}, + {file = "mkdocs-1.3.0-py3-none-any.whl", hash = "sha256:26bd2b03d739ac57a3e6eed0b7bcc86168703b719c27b99ad6ca91dc439aacde"}, + {file = "mkdocs-1.3.0.tar.gz", hash = "sha256:b504405b04da38795fec9b2e5e28f6aa3a73bb0960cb6d5d27ead28952bd35ea"}, ] mkdocs-material = [ - {file = "mkdocs-material-8.2.6.tar.gz", hash = "sha256:be76ba3e0c0d4482159fc2c00d060dbf22cfb29f25276ebd0db9a0eaf6a18712"}, - {file = "mkdocs_material-8.2.6-py2.py3-none-any.whl", hash = "sha256:b30b4cfe5b0a74cccf2c75b7127c22cd8d816e6260c9a8708c3baf08c59e6714"}, + {file = "mkdocs-material-8.3.7.tar.gz", hash = "sha256:e0e01f5deeacb126ad0a64998bb66d512c24467f4c9550a0afc74f7f0719a9ae"}, + {file = "mkdocs_material-8.3.7-py2.py3-none-any.whl", hash = "sha256:f24ed0fc185dd88b036abc0425ce9e31ecf1e7e673bf01d3b168b373c08e629a"}, ] mkdocs-material-extensions = [ {file = "mkdocs-material-extensions-1.0.3.tar.gz", hash = "sha256:bfd24dfdef7b41c312ede42648f9eb83476ea168ec163b613f9abd12bbfddba2"}, {file = "mkdocs_material_extensions-1.0.3-py3-none-any.whl", hash = "sha256:a82b70e533ce060b2a5d9eb2bc2e1be201cf61f901f93704b4acf6e3d5983a44"}, ] +mkdocs-video = [ + {file = "mkdocs-video-1.3.0.tar.gz", hash = "sha256:900a7da60aff6d313d3aec47348bb7c87ec2ad39bad27a1146fa153fafd61e44"}, + {file = "mkdocs_video-1.3.0-py3-none-any.whl", hash = "sha256:6512887d65e65f4fb643b64e3f4cc7e365ce27d0ff4c4b48de8d91b04ea89731"}, +] mongoengine = [ {file = "mongoengine-0.24.1-py3-none-any.whl", hash = "sha256:68878b65bcb3751debcba4342180a180161cdb5f46525027e622ad081dd44fac"}, {file = "mongoengine-0.24.1.tar.gz", hash = "sha256:01baac85f408f5eefb6195c0afeae631e7fc6fab5cb221a7b46646f94227d6da"}, @@ -1368,8 +1331,8 @@ opentelemetry-api = [ {file = "opentelemetry_api-1.6.2-py3-none-any.whl", hash = "sha256:223ba94033cc03cf3dc9be20304e90300402a8e9b60979ae7f4b76c4b1f76b0a"}, ] opentelemetry-exporter-jaeger-thrift = [ - {file = "opentelemetry-exporter-jaeger-thrift-1.10.0.tar.gz", hash = "sha256:cfd1682592fc33d805622ab2880e71973a0925ca957852a7a9439ba74e40b2c4"}, - {file = "opentelemetry_exporter_jaeger_thrift-1.10.0-py3-none-any.whl", hash = "sha256:e337cf4361f35d17a7aeb600cd4ad37ac024b0e0edb52ee6d171771663d13bc8"}, + {file = "opentelemetry-exporter-jaeger-thrift-1.11.0.tar.gz", hash = "sha256:66924fda1f3b38144b6b87f2c008d8ee064964e45ba76bc4b80adc1b2a115191"}, + {file = "opentelemetry_exporter_jaeger_thrift-1.11.0-py3-none-any.whl", hash = "sha256:5774c31fddf73dd2c0858e3dfd6e9b6245d8975c525267043c717022bf4eb140"}, ] opentelemetry-instrumentation = [ {file = "opentelemetry-instrumentation-0.25b2.tar.gz", hash = "sha256:473426972781b544047acf0ffee757cdbe2254842e6dc44a8d56c027a0e5aaa1"}, @@ -1396,8 +1359,8 @@ packaging = [ {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, ] pika = [ - {file = "pika-1.2.0-py2.py3-none-any.whl", hash = "sha256:59da6701da1aeaf7e5e93bb521cc03129867f6e54b7dd352c4b3ecb2bd7ec624"}, - {file = "pika-1.2.0.tar.gz", hash = "sha256:f023d6ac581086b124190cb3dc81dd581a149d216fa4540ac34f9be1e3970b89"}, + {file = "pika-1.2.1-py2.py3-none-any.whl", hash = "sha256:fe89e95fb2d8d06fd713eeae2938299941e0ec329db37afca758f5f9458ce169"}, + {file = "pika-1.2.1.tar.gz", hash = "sha256:e5fbf3a0a3599f4e114f6e4a7af096f9413a8f24f975c2657ba2fac3c931434f"}, ] pluggy = [ {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, @@ -1407,94 +1370,87 @@ ply = [ {file = "ply-3.11-py2.py3-none-any.whl", hash = "sha256:096f9b8350b65ebd2fd1346b12452efe5b9607f7482813ffca50c22722a807ce"}, {file = "ply-3.11.tar.gz", hash = "sha256:00c7c1aaa88358b9c765b6d3000c6eec0ba42abca5351b095321aef446081da3"}, ] -prometheus-client = [ - {file = "prometheus_client-0.12.0-py2.py3-none-any.whl", hash = "sha256:317453ebabff0a1b02df7f708efbab21e3489e7072b61cb6957230dd004a0af0"}, - {file = "prometheus_client-0.12.0.tar.gz", hash = "sha256:1b12ba48cee33b9b0b9de64a1047cbd3c5f2d0ab6ebcead7ddda613a750ec3c5"}, -] prompt-toolkit = [ - {file = "prompt_toolkit-3.0.24-py3-none-any.whl", hash = "sha256:e56f2ff799bacecd3e88165b1e2f5ebf9bcd59e80e06d395fa0cc4b8bd7bb506"}, - {file = "prompt_toolkit-3.0.24.tar.gz", hash = "sha256:1bb05628c7d87b645974a1bad3f17612be0c29fa39af9f7688030163f680bad6"}, + {file = "prompt_toolkit-3.0.29-py3-none-any.whl", hash = "sha256:62291dad495e665fca0bda814e342c69952086afb0f4094d0893d357e5c78752"}, + {file = "prompt_toolkit-3.0.29.tar.gz", hash = "sha256:bd640f60e8cecd74f0dc249713d433ace2ddc62b65ee07f96d358e0b152b6ea7"}, ] py = [ {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, ] pycryptodomex = [ - {file = "pycryptodomex-3.13.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:5a2014598ceb19c34f14815a26536e5cc24167ea4d402f0aec2a52b18960c668"}, - {file = "pycryptodomex-3.13.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:6b3c06e6d235f475395a7e150f2e562a3e9d749fb40c6d81240596f73809346c"}, - {file = "pycryptodomex-3.13.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:c87f62de9e167031ad4179efb1fda4012bb6f7363472a61254e4426bda6bcb64"}, - {file = "pycryptodomex-3.13.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:f3a29bb51e5f9b46004b5be16bcbe4e1b2d2754cbe201e1a0b142c307bdf4c73"}, - {file = "pycryptodomex-3.13.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:b11331510cfd08ec4416f37dc8f072541d7b7240ba924c71288f7218aad36bdf"}, - {file = "pycryptodomex-3.13.0-cp27-cp27m-manylinux2014_aarch64.whl", hash = "sha256:eb4eea028a7ad28458abf8b98ae14af2fd9baeb327a0adb6af05a488e4d9e9a1"}, - {file = "pycryptodomex-3.13.0-cp27-cp27m-win32.whl", hash = "sha256:68fb861b41a889c2efdf2795b0d46aa05d4748543bc4e0bca5886c929c7cbdef"}, - {file = "pycryptodomex-3.13.0-cp27-cp27m-win_amd64.whl", hash = "sha256:e1900d7f16a03b869be3572e7664757c14316329a4d79ecee5a0083fad8c81b0"}, - {file = "pycryptodomex-3.13.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:182962b3612c0d12748fa770f1ef0556ba8ba2c442834450e08acb31d9e6d2ed"}, - {file = "pycryptodomex-3.13.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:9fa76261100b450e5aca2990ba982e5294ba383f653da041a71b4ac1cbaed1ff"}, - {file = "pycryptodomex-3.13.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:2f2bcee2ef59597bfcb755eef2c98294094c1c9b64e9b9195cc9e71be83adb92"}, - {file = "pycryptodomex-3.13.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:b975ce778ea2c65f399ab889a661e118bb68b85db47d93e0442eb1ba1f554794"}, - {file = "pycryptodomex-3.13.0-cp27-cp27mu-manylinux2014_aarch64.whl", hash = "sha256:04a38a7dc484f5e3152a69e4eab89d9340c2ad3b7c4a27d2ee256e5fb878c469"}, - {file = "pycryptodomex-3.13.0-cp35-abi3-macosx_10_9_x86_64.whl", hash = "sha256:7fb9d1ab6a10cfc8c8c7e11f004e01c8a1beff5fd4118370d95110735cc23117"}, - {file = "pycryptodomex-3.13.0-cp35-abi3-manylinux1_i686.whl", hash = "sha256:b4240991748ae0f57a0120b8d905b2d9f835fee02968fc11faec929ef6915ee6"}, - {file = "pycryptodomex-3.13.0-cp35-abi3-manylinux1_x86_64.whl", hash = "sha256:ccd301d2e71d243b0fad8c4642116c538d7d405d35b6026cf4dcee463a667a2e"}, - {file = "pycryptodomex-3.13.0-cp35-abi3-manylinux2010_i686.whl", hash = "sha256:6d50723984ba802904618ef5bfe257a0f9644e76821d323f79f27be5adb9ece7"}, - {file = "pycryptodomex-3.13.0-cp35-abi3-manylinux2010_x86_64.whl", hash = "sha256:7fb188c9a0f69d4f7b607780641ef7aec7f02a8dad689512b17bdf04c96ce6e3"}, - {file = "pycryptodomex-3.13.0-cp35-abi3-manylinux2014_aarch64.whl", hash = "sha256:dfb8bcd45e504e1c26f0bfc404f3edd08f8c8057dfe04fbf6159adc8694ff97a"}, - {file = "pycryptodomex-3.13.0-cp35-abi3-win32.whl", hash = "sha256:00e37d478c0f040639ab41a9d5280291ad2b3b5f25b9aad5baa1d5ecb578a3f6"}, - {file = "pycryptodomex-3.13.0-cp35-abi3-win_amd64.whl", hash = "sha256:e2ddfbcb2c4c7cb8f79db49e284280be468699c701b92d30fd1e46a786b39f5b"}, - {file = "pycryptodomex-3.13.0-pp27-pypy_73-macosx_10_9_x86_64.whl", hash = "sha256:2f7db8d85294c1123e700097af407425fd4c9e6c58b688f391de7053c6a60317"}, - {file = "pycryptodomex-3.13.0-pp27-pypy_73-manylinux1_x86_64.whl", hash = "sha256:3b7656189c259bb2b838559f0a11b533d4d18409ab6d9119c00bae436c3d3e34"}, - {file = "pycryptodomex-3.13.0-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:f553abcb3572242fed87e308a6b91a9bc5a74b801b5d093969391b0500be718b"}, - {file = "pycryptodomex-3.13.0-pp27-pypy_73-win32.whl", hash = "sha256:b7b059517d84c57f25c6fd3b2e03a1b2945df2e585b96109bcd11e56f6c9e610"}, - {file = "pycryptodomex-3.13.0-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:dce2bfd0f285c3fcff89e4239c55f5fbe664ff435ee45abfc154aac0f222ab14"}, - {file = "pycryptodomex-3.13.0-pp36-pypy36_pp73-manylinux1_x86_64.whl", hash = "sha256:0ec86fca2114e8c58fe6bfc7e04ee91568a813139dcf4334819aa44876764bcf"}, - {file = "pycryptodomex-3.13.0-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:80eedc23c4c4d3655c6a7d315a01f0e9d460c7070c5c3af4952937b4f2c0da6f"}, - {file = "pycryptodomex-3.13.0-pp36-pypy36_pp73-win32.whl", hash = "sha256:05e0e3b78b7ccc0b7c5f88596d51fdc8533adb91070b93e18cec12ca3b43deb3"}, - {file = "pycryptodomex-3.13.0.tar.gz", hash = "sha256:63443230247837dd03c5d4028cae5cb2e6793a9ae110e321798bee48a04ff3e9"}, + {file = "pycryptodomex-3.14.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca88f2f7020002638276439a01ffbb0355634907d1aa5ca91f3dc0c2e44e8f3b"}, + {file = "pycryptodomex-3.14.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:8536bc08d130cae6dcba1ea689f2913dfd332d06113904d171f2f56da6228e89"}, + {file = "pycryptodomex-3.14.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:406ec8cfe0c098fadb18d597dc2ee6de4428d640c0ccafa453f3d9b2e58d29e2"}, + {file = "pycryptodomex-3.14.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:da8db8374295fb532b4b0c467e66800ef17d100e4d5faa2bbbd6df35502da125"}, + {file = "pycryptodomex-3.14.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:d709572d64825d8d59ea112e11cc7faf6007f294e9951324b7574af4251e4de8"}, + {file = "pycryptodomex-3.14.1-cp27-cp27m-win32.whl", hash = "sha256:3da13c2535b7aea94cc2a6d1b1b37746814c74b6e80790daddd55ca5c120a489"}, + {file = "pycryptodomex-3.14.1-cp27-cp27m-win_amd64.whl", hash = "sha256:298c00ea41a81a491d5b244d295d18369e5aac4b61b77b2de5b249ca61cd6659"}, + {file = "pycryptodomex-3.14.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:77931df40bb5ce5e13f4de2bfc982b2ddc0198971fbd947776c8bb5050896eb2"}, + {file = "pycryptodomex-3.14.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:c5dd3ffa663c982d7f1be9eb494a8924f6d40e2e2f7d1d27384cfab1b2ac0662"}, + {file = "pycryptodomex-3.14.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:2aa887683eee493e015545bd69d3d21ac8d5ad582674ec98f4af84511e353e45"}, + {file = "pycryptodomex-3.14.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:8085bd0ad2034352eee4d4f3e2da985c2749cb7344b939f4d95ead38c2520859"}, + {file = "pycryptodomex-3.14.1-cp35-abi3-macosx_10_9_x86_64.whl", hash = "sha256:e95a4a6c54d27a84a4624d2af8bb9ee178111604653194ca6880c98dcad92f48"}, + {file = "pycryptodomex-3.14.1-cp35-abi3-manylinux1_i686.whl", hash = "sha256:a4d412eba5679ede84b41dbe48b1bed8f33131ab9db06c238a235334733acc5e"}, + {file = "pycryptodomex-3.14.1-cp35-abi3-manylinux1_x86_64.whl", hash = "sha256:d2cce1c82a7845d7e2e8a0956c6b7ed3f1661c9acf18eb120fc71e098ab5c6fe"}, + {file = "pycryptodomex-3.14.1-cp35-abi3-manylinux2010_i686.whl", hash = "sha256:f75009715dcf4a3d680c2338ab19dac5498f8121173a929872950f4fb3a48fbf"}, + {file = "pycryptodomex-3.14.1-cp35-abi3-manylinux2010_x86_64.whl", hash = "sha256:1ca8e1b4c62038bb2da55451385246f51f412c5f5eabd64812c01766a5989b4a"}, + {file = "pycryptodomex-3.14.1-cp35-abi3-win32.whl", hash = "sha256:ee835def05622e0c8b1435a906491760a43d0c462f065ec9143ec4b8d79f8bff"}, + {file = "pycryptodomex-3.14.1-cp35-abi3-win_amd64.whl", hash = "sha256:b5a185ae79f899b01ca49f365bdf15a45d78d9856f09b0de1a41b92afce1a07f"}, + {file = "pycryptodomex-3.14.1-pp27-pypy_73-macosx_10_9_x86_64.whl", hash = "sha256:797a36bd1f69df9e2798e33edb4bd04e5a30478efc08f9428c087f17f65a7045"}, + {file = "pycryptodomex-3.14.1-pp27-pypy_73-manylinux1_x86_64.whl", hash = "sha256:aebecde2adc4a6847094d3bd6a8a9538ef3438a5ea84ac1983fcb167db614461"}, + {file = "pycryptodomex-3.14.1-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:f8524b8bc89470cec7ac51734907818d3620fb1637f8f8b542d650ebec42a126"}, + {file = "pycryptodomex-3.14.1-pp27-pypy_73-win32.whl", hash = "sha256:4d0db8df9ffae36f416897ad184608d9d7a8c2b46c4612c6bc759b26c073f750"}, + {file = "pycryptodomex-3.14.1-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b276cc4deb4a80f9dfd47a41ebb464b1fe91efd8b1b8620cf5ccf8b824b850d6"}, + {file = "pycryptodomex-3.14.1-pp36-pypy36_pp73-manylinux1_x86_64.whl", hash = "sha256:e36c7e3b5382cd5669cf199c4a04a0279a43b2a3bdd77627e9b89778ac9ec08c"}, + {file = "pycryptodomex-3.14.1-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:c4d8977ccda886d88dc3ca789de2f1adc714df912ff3934b3d0a3f3d777deafb"}, + {file = "pycryptodomex-3.14.1-pp36-pypy36_pp73-win32.whl", hash = "sha256:530756d2faa40af4c1f74123e1d889bd07feae45bac2fd32f259a35f7aa74151"}, + {file = "pycryptodomex-3.14.1.tar.gz", hash = "sha256:2ce76ed0081fd6ac8c74edc75b9d14eca2064173af79843c24fa62573263c1f2"}, ] pydantic = [ - {file = "pydantic-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cb23bcc093697cdea2708baae4f9ba0e972960a835af22560f6ae4e7e47d33f5"}, - {file = "pydantic-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1d5278bd9f0eee04a44c712982343103bba63507480bfd2fc2790fa70cd64cf4"}, - {file = "pydantic-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab624700dc145aa809e6f3ec93fb8e7d0f99d9023b713f6a953637429b437d37"}, - {file = "pydantic-1.9.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c8d7da6f1c1049eefb718d43d99ad73100c958a5367d30b9321b092771e96c25"}, - {file = "pydantic-1.9.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3c3b035103bd4e2e4a28da9da7ef2fa47b00ee4a9cf4f1a735214c1bcd05e0f6"}, - {file = "pydantic-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3011b975c973819883842c5ab925a4e4298dffccf7782c55ec3580ed17dc464c"}, - {file = "pydantic-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:086254884d10d3ba16da0588604ffdc5aab3f7f09557b998373e885c690dd398"}, - {file = "pydantic-1.9.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:0fe476769acaa7fcddd17cadd172b156b53546ec3614a4d880e5d29ea5fbce65"}, - {file = "pydantic-1.9.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8e9dcf1ac499679aceedac7e7ca6d8641f0193c591a2d090282aaf8e9445a46"}, - {file = "pydantic-1.9.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1e4c28f30e767fd07f2ddc6f74f41f034d1dd6bc526cd59e63a82fe8bb9ef4c"}, - {file = "pydantic-1.9.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:c86229333cabaaa8c51cf971496f10318c4734cf7b641f08af0a6fbf17ca3054"}, - {file = "pydantic-1.9.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:c0727bda6e38144d464daec31dff936a82917f431d9c39c39c60a26567eae3ed"}, - {file = "pydantic-1.9.0-cp36-cp36m-win_amd64.whl", hash = "sha256:dee5ef83a76ac31ab0c78c10bd7d5437bfdb6358c95b91f1ba7ff7b76f9996a1"}, - {file = "pydantic-1.9.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d9c9bdb3af48e242838f9f6e6127de9be7063aad17b32215ccc36a09c5cf1070"}, - {file = "pydantic-1.9.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ee7e3209db1e468341ef41fe263eb655f67f5c5a76c924044314e139a1103a2"}, - {file = "pydantic-1.9.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0b6037175234850ffd094ca77bf60fb54b08b5b22bc85865331dd3bda7a02fa1"}, - {file = "pydantic-1.9.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b2571db88c636d862b35090ccf92bf24004393f85c8870a37f42d9f23d13e032"}, - {file = "pydantic-1.9.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8b5ac0f1c83d31b324e57a273da59197c83d1bb18171e512908fe5dc7278a1d6"}, - {file = "pydantic-1.9.0-cp37-cp37m-win_amd64.whl", hash = "sha256:bbbc94d0c94dd80b3340fc4f04fd4d701f4b038ebad72c39693c794fd3bc2d9d"}, - {file = "pydantic-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e0896200b6a40197405af18828da49f067c2fa1f821491bc8f5bde241ef3f7d7"}, - {file = "pydantic-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bdfdadb5994b44bd5579cfa7c9b0e1b0e540c952d56f627eb227851cda9db77"}, - {file = "pydantic-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:574936363cd4b9eed8acdd6b80d0143162f2eb654d96cb3a8ee91d3e64bf4cf9"}, - {file = "pydantic-1.9.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c556695b699f648c58373b542534308922c46a1cda06ea47bc9ca45ef5b39ae6"}, - {file = "pydantic-1.9.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:f947352c3434e8b937e3aa8f96f47bdfe6d92779e44bb3f41e4c213ba6a32145"}, - {file = "pydantic-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5e48ef4a8b8c066c4a31409d91d7ca372a774d0212da2787c0d32f8045b1e034"}, - {file = "pydantic-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:96f240bce182ca7fe045c76bcebfa0b0534a1bf402ed05914a6f1dadff91877f"}, - {file = "pydantic-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:815ddebb2792efd4bba5488bc8fde09c29e8ca3227d27cf1c6990fc830fd292b"}, - {file = "pydantic-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6c5b77947b9e85a54848343928b597b4f74fc364b70926b3c4441ff52620640c"}, - {file = "pydantic-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c68c3bc88dbda2a6805e9a142ce84782d3930f8fdd9655430d8576315ad97ce"}, - {file = "pydantic-1.9.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a79330f8571faf71bf93667d3ee054609816f10a259a109a0738dac983b23c3"}, - {file = "pydantic-1.9.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f5a64b64ddf4c99fe201ac2724daada8595ada0d102ab96d019c1555c2d6441d"}, - {file = "pydantic-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a733965f1a2b4090a5238d40d983dcd78f3ecea221c7af1497b845a9709c1721"}, - {file = "pydantic-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:2cc6a4cb8a118ffec2ca5fcb47afbacb4f16d0ab8b7350ddea5e8ef7bcc53a16"}, - {file = "pydantic-1.9.0-py3-none-any.whl", hash = "sha256:085ca1de245782e9b46cefcf99deecc67d418737a1fd3f6a4f511344b613a5b3"}, - {file = "pydantic-1.9.0.tar.gz", hash = "sha256:742645059757a56ecd886faf4ed2441b9c0cd406079c2b4bee51bcc3fbcd510a"}, + {file = "pydantic-1.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c8098a724c2784bf03e8070993f6d46aa2eeca031f8d8a048dff277703e6e193"}, + {file = "pydantic-1.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c320c64dd876e45254bdd350f0179da737463eea41c43bacbee9d8c9d1021f11"}, + {file = "pydantic-1.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18f3e912f9ad1bdec27fb06b8198a2ccc32f201e24174cec1b3424dda605a310"}, + {file = "pydantic-1.9.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11951b404e08b01b151222a1cb1a9f0a860a8153ce8334149ab9199cd198131"}, + {file = "pydantic-1.9.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8bc541a405423ce0e51c19f637050acdbdf8feca34150e0d17f675e72d119580"}, + {file = "pydantic-1.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e565a785233c2d03724c4dc55464559639b1ba9ecf091288dd47ad9c629433bd"}, + {file = "pydantic-1.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:a4a88dcd6ff8fd47c18b3a3709a89adb39a6373f4482e04c1b765045c7e282fd"}, + {file = "pydantic-1.9.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:447d5521575f18e18240906beadc58551e97ec98142266e521c34968c76c8761"}, + {file = "pydantic-1.9.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:985ceb5d0a86fcaa61e45781e567a59baa0da292d5ed2e490d612d0de5796918"}, + {file = "pydantic-1.9.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:059b6c1795170809103a1538255883e1983e5b831faea6558ef873d4955b4a74"}, + {file = "pydantic-1.9.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:d12f96b5b64bec3f43c8e82b4aab7599d0157f11c798c9f9c528a72b9e0b339a"}, + {file = "pydantic-1.9.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:ae72f8098acb368d877b210ebe02ba12585e77bd0db78ac04a1ee9b9f5dd2166"}, + {file = "pydantic-1.9.1-cp36-cp36m-win_amd64.whl", hash = "sha256:79b485767c13788ee314669008d01f9ef3bc05db9ea3298f6a50d3ef596a154b"}, + {file = "pydantic-1.9.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:494f7c8537f0c02b740c229af4cb47c0d39840b829ecdcfc93d91dcbb0779892"}, + {file = "pydantic-1.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0f047e11febe5c3198ed346b507e1d010330d56ad615a7e0a89fae604065a0e"}, + {file = "pydantic-1.9.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:969dd06110cb780da01336b281f53e2e7eb3a482831df441fb65dd30403f4608"}, + {file = "pydantic-1.9.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:177071dfc0df6248fd22b43036f936cfe2508077a72af0933d0c1fa269b18537"}, + {file = "pydantic-1.9.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9bcf8b6e011be08fb729d110f3e22e654a50f8a826b0575c7196616780683380"}, + {file = "pydantic-1.9.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a955260d47f03df08acf45689bd163ed9df82c0e0124beb4251b1290fa7ae728"}, + {file = "pydantic-1.9.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9ce157d979f742a915b75f792dbd6aa63b8eccaf46a1005ba03aa8a986bde34a"}, + {file = "pydantic-1.9.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0bf07cab5b279859c253d26a9194a8906e6f4a210063b84b433cf90a569de0c1"}, + {file = "pydantic-1.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d93d4e95eacd313d2c765ebe40d49ca9dd2ed90e5b37d0d421c597af830c195"}, + {file = "pydantic-1.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1542636a39c4892c4f4fa6270696902acb186a9aaeac6f6cf92ce6ae2e88564b"}, + {file = "pydantic-1.9.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a9af62e9b5b9bc67b2a195ebc2c2662fdf498a822d62f902bf27cccb52dbbf49"}, + {file = "pydantic-1.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fe4670cb32ea98ffbf5a1262f14c3e102cccd92b1869df3bb09538158ba90fe6"}, + {file = "pydantic-1.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:9f659a5ee95c8baa2436d392267988fd0f43eb774e5eb8739252e5a7e9cf07e0"}, + {file = "pydantic-1.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b83ba3825bc91dfa989d4eed76865e71aea3a6ca1388b59fc801ee04c4d8d0d6"}, + {file = "pydantic-1.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1dd8fecbad028cd89d04a46688d2fcc14423e8a196d5b0a5c65105664901f810"}, + {file = "pydantic-1.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02eefd7087268b711a3ff4db528e9916ac9aa18616da7bca69c1871d0b7a091f"}, + {file = "pydantic-1.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7eb57ba90929bac0b6cc2af2373893d80ac559adda6933e562dcfb375029acee"}, + {file = "pydantic-1.9.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4ce9ae9e91f46c344bec3b03d6ee9612802682c1551aaf627ad24045ce090761"}, + {file = "pydantic-1.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:72ccb318bf0c9ab97fc04c10c37683d9eea952ed526707fabf9ac5ae59b701fd"}, + {file = "pydantic-1.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:61b6760b08b7c395975d893e0b814a11cf011ebb24f7d869e7118f5a339a82e1"}, + {file = "pydantic-1.9.1-py3-none-any.whl", hash = "sha256:4988c0f13c42bfa9ddd2fe2f569c9d54646ce84adc5de84228cfe83396f3bd58"}, + {file = "pydantic-1.9.1.tar.gz", hash = "sha256:1ed987c3ff29fff7fd8c3ea3a3ea877ad310aae2ef9889a119e22d3f2db0691a"}, ] pygments = [ - {file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"}, - {file = "Pygments-2.11.2.tar.gz", hash = "sha256:4e426f72023d88d03b2fa258de560726ce890ff3b630f88c21cbb8b2503b8c6a"}, + {file = "Pygments-2.12.0-py3-none-any.whl", hash = "sha256:dc9c10fb40944260f6ed4c688ece0cd2048414940f1cea51b8b226318411c519"}, + {file = "Pygments-2.12.0.tar.gz", hash = "sha256:5eb116118f9612ff1ee89ac96437bb6b49e8f04d8a13b514ba26f620208e26eb"}, ] pymdown-extensions = [ - {file = "pymdown-extensions-9.1.tar.gz", hash = "sha256:74247f2c80f1d9e3c7242abe1c16317da36c6f26c7ad4b8a7f457f0ec20f0365"}, - {file = "pymdown_extensions-9.1-py3-none-any.whl", hash = "sha256:b03e66f91f33af4a6e7a0e20c740313522995f69a03d86316b1449766c473d0e"}, + {file = "pymdown_extensions-9.5-py3-none-any.whl", hash = "sha256:ec141c0f4983755349f0c8710416348d1a13753976c028186ed14f190c8061c4"}, + {file = "pymdown_extensions-9.5.tar.gz", hash = "sha256:3ef2d998c0d5fa7eb09291926d90d69391283561cf6306f85cd588a5eb5befa0"}, ] pymongo = [ {file = "pymongo-3.12.3-cp27-cp27m-macosx_10_14_intel.whl", hash = "sha256:c164eda0be9048f83c24b9b2656900041e069ddf72de81c17d874d0c32f6079f"}, @@ -1606,8 +1562,8 @@ pymongo = [ {file = "pymongo-3.12.3.tar.gz", hash = "sha256:0a89cadc0062a5e53664dde043f6c097172b8c1c5f0094490095282ff9995a5f"}, ] pyparsing = [ - {file = "pyparsing-3.0.7-py3-none-any.whl", hash = "sha256:a6c06a88f252e6c322f65faf8f418b16213b51bdfaece0524c1c1bc30c63c484"}, - {file = "pyparsing-3.0.7.tar.gz", hash = "sha256:18ee9022775d270c55187733956460083db60b37d0d0fb357445f3094eed3eea"}, + {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"}, + {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"}, ] pyrate-limiter = [ {file = "pyrate-limiter-2.8.1.tar.gz", hash = "sha256:0741b7db4b3facdce60bd836e0fcc43911bc52c443f674f924afba7e02e79c18"}, @@ -1617,16 +1573,16 @@ pysnmp-pyasn1 = [ {file = "pysnmp_pyasn1-1.1.2-py3-none-any.whl", hash = "sha256:de4132bc2931a7b32277aac1ce8e7650db137199df087a376530153b1b1b4d8d"}, ] pysnmp-pysmi = [ - {file = "pysnmp-pysmi-1.1.8.tar.gz", hash = "sha256:23898cc2e4f9736d306192dab28fe8c6f9042c5fb9524ab9837840046f952094"}, - {file = "pysnmp_pysmi-1.1.8-py3-none-any.whl", hash = "sha256:9da5891c16c00cb8c7482c372f73e4fcd6941aaa56c74565df8d4dae87f30117"}, + {file = "pysnmp-pysmi-1.1.10.tar.gz", hash = "sha256:0149c5772e6151f6286f546058da3e1203771d46c9b8b53b568bf1c44267506f"}, + {file = "pysnmp_pysmi-1.1.10-py3-none-any.whl", hash = "sha256:6526b2bda6ca5f01f1c0ac2c8ff01cb34e0eec3c9fe887decd86dc78121ce52c"}, ] pysnmplib = [ - {file = "pysnmplib-5.0.10-py3-none-any.whl", hash = "sha256:8a4f117c7cbdad0fb7b26aaf828952af623151a0893122726c285ffbd5fbbe68"}, - {file = "pysnmplib-5.0.10.tar.gz", hash = "sha256:6d4385ce992ab42c7604e87a0ffb486d41f87a596f6b2b74f6b6403d7172d045"}, + {file = "pysnmplib-5.0.17-py3-none-any.whl", hash = "sha256:2400e2c7776e7653b2edac5a5d35d5aa959bd0dad54d7b06d7b95b89312d5e64"}, + {file = "pysnmplib-5.0.17.tar.gz", hash = "sha256:73fd976f2608597776890c69a1539c9967f5ddd9ca6836cc5b0d1915e7a17ad8"}, ] pytest = [ - {file = "pytest-7.1.1-py3-none-any.whl", hash = "sha256:92f723789a8fdd7180b6b06483874feca4c48a5c76968e03bb3e7f806a1869ea"}, - {file = "pytest-7.1.1.tar.gz", hash = "sha256:841132caef6b1ad17a9afde46dc4f6cfa59a05f9555aae5151f73bdf2820ca63"}, + {file = "pytest-7.1.2-py3-none-any.whl", hash = "sha256:13d0e3ccfc2b6e26be000cb6568c832ba67ba32e719443bfe725814d3c42433c"}, + {file = "pytest-7.1.2.tar.gz", hash = "sha256:a06a0425453864a270bc45e71f783330a7428defb4230fb5e6a731fde06ecd45"}, ] pytest-cov = [ {file = "pytest-cov-3.0.0.tar.gz", hash = "sha256:e7f0f5b1617d2210a2cabc266dfe2f4c75a8d32fb89eafb7ad9d06f6d076d470"}, @@ -1641,8 +1597,8 @@ python-dotenv = [ {file = "python_dotenv-0.19.2-py2.py3-none-any.whl", hash = "sha256:32b2bdc1873fd3a3c346da1c6db83d0053c3c62f28f1f38516070c4c8971b1d3"}, ] pytz = [ - {file = "pytz-2021.3-py2.py3-none-any.whl", hash = "sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c"}, - {file = "pytz-2021.3.tar.gz", hash = "sha256:acad2d8b20a1af07d4e4c9d2e9285c5ed9104354062f275f3fcd88dcef4f1326"}, + {file = "pytz-2022.1-py2.py3-none-any.whl", hash = "sha256:e68985985296d9a66a881eb3193b0906246245294a881e7c8afe623866ac6a5c"}, + {file = "pytz-2022.1.tar.gz", hash = "sha256:1e760e2fe6a8163bc0b3d9a19c4f84342afa0a2affebfaa84b01b978a02ecaa7"}, ] pyyaml = [ {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, @@ -1683,13 +1639,17 @@ pyyaml-env-tag = [ {file = "pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"}, {file = "pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb"}, ] +redis = [ + {file = "redis-4.3.3-py3-none-any.whl", hash = "sha256:f57f8df5d238a8ecf92f499b6b21467bfee6c13d89953c27edf1e2bc673622e7"}, + {file = "redis-4.3.3.tar.gz", hash = "sha256:2f7a57cf4af15cd543c4394bcbe2b9148db2606a37edba755368836e3a1d053e"}, +] requests = [ - {file = "requests-2.27.1-py2.py3-none-any.whl", hash = "sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d"}, - {file = "requests-2.27.1.tar.gz", hash = "sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61"}, + {file = "requests-2.28.0-py3-none-any.whl", hash = "sha256:bc7861137fbce630f17b03d3ad02ad0bf978c844f3536d0edda6499dafce2b6f"}, + {file = "requests-2.28.0.tar.gz", hash = "sha256:d568723a7ebd25875d8d1eaf5dfa068cd2fc8194b2e483d7b1f7c81918dbec6b"}, ] requests-cache = [ - {file = "requests-cache-0.9.3.tar.gz", hash = "sha256:b32f8afba2439e1b3e12cba511c8f579271eff827f063210d62f9efa5bed6564"}, - {file = "requests_cache-0.9.3-py3-none-any.whl", hash = "sha256:d8b32405b2725906aa09810f4796e54cc03029de269381b404c426bae927bada"}, + {file = "requests-cache-0.9.4.tar.gz", hash = "sha256:04bb1212bbefba122080163530555f36e64fcd517ef8f90e289ef76f7ae055da"}, + {file = "requests_cache-0.9.4-py3-none-any.whl", hash = "sha256:edc51d4ba3f1bcacf100aa6097d0c4ae4c787fe13dcc4b2d3a6ce3064482b072"}, ] requests-ratelimiter = [ {file = "requests-ratelimiter-0.2.1.tar.gz", hash = "sha256:9c9271c9435eea9e50b87837c32c23dd6f421c8303cbf615e0b52ee5e640015a"}, @@ -1734,67 +1694,28 @@ tblib = [ {file = "tblib-1.7.0-py2.py3-none-any.whl", hash = "sha256:289fa7359e580950e7d9743eab36b0691f0310fce64dee7d9c31065b8f723e23"}, {file = "tblib-1.7.0.tar.gz", hash = "sha256:059bd77306ea7b419d4f76016aef6d7027cc8a0785579b5aad198803435f882c"}, ] +tenacity = [ + {file = "tenacity-8.0.1-py3-none-any.whl", hash = "sha256:f78f4ea81b0fabc06728c11dc2a8c01277bfc5181b321a4770471902e3eb844a"}, + {file = "tenacity-8.0.1.tar.gz", hash = "sha256:43242a20e3e73291a28bcbcacfd6e000b02d3857a9a9fff56b297a27afdc932f"}, +] thrift = [ - {file = "thrift-0.15.0.tar.gz", hash = "sha256:87c8205a71cf8bbb111cb99b1f7495070fbc9cabb671669568854210da5b3e29"}, + {file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"}, ] tomli = [ - {file = "tomli-2.0.0-py3-none-any.whl", hash = "sha256:b5bde28da1fed24b9bd1d4d2b8cba62300bfb4ec9a6187a957e8ddb9434c5224"}, - {file = "tomli-2.0.0.tar.gz", hash = "sha256:c292c34f58502a1eb2bbb9f5bbc9a5ebc37bee10ffb8c2d6bbdfa8eb13cc14e1"}, -] -tornado = [ - {file = "tornado-6.1-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:d371e811d6b156d82aa5f9a4e08b58debf97c302a35714f6f45e35139c332e32"}, - {file = "tornado-6.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:0d321a39c36e5f2c4ff12b4ed58d41390460f798422c4504e09eb5678e09998c"}, - {file = "tornado-6.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9de9e5188a782be6b1ce866e8a51bc76a0fbaa0e16613823fc38e4fc2556ad05"}, - {file = "tornado-6.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:61b32d06ae8a036a6607805e6720ef00a3c98207038444ba7fd3d169cd998910"}, - {file = "tornado-6.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:3e63498f680547ed24d2c71e6497f24bca791aca2fe116dbc2bd0ac7f191691b"}, - {file = "tornado-6.1-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:6c77c9937962577a6a76917845d06af6ab9197702a42e1346d8ae2e76b5e3675"}, - {file = "tornado-6.1-cp35-cp35m-win32.whl", hash = "sha256:6286efab1ed6e74b7028327365cf7346b1d777d63ab30e21a0f4d5b275fc17d5"}, - {file = "tornado-6.1-cp35-cp35m-win_amd64.whl", hash = "sha256:fa2ba70284fa42c2a5ecb35e322e68823288a4251f9ba9cc77be04ae15eada68"}, - {file = "tornado-6.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:0a00ff4561e2929a2c37ce706cb8233b7907e0cdc22eab98888aca5dd3775feb"}, - {file = "tornado-6.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:748290bf9112b581c525e6e6d3820621ff020ed95af6f17fedef416b27ed564c"}, - {file = "tornado-6.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:e385b637ac3acaae8022e7e47dfa7b83d3620e432e3ecb9a3f7f58f150e50921"}, - {file = "tornado-6.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:25ad220258349a12ae87ede08a7b04aca51237721f63b1808d39bdb4b2164558"}, - {file = "tornado-6.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:65d98939f1a2e74b58839f8c4dab3b6b3c1ce84972ae712be02845e65391ac7c"}, - {file = "tornado-6.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:e519d64089b0876c7b467274468709dadf11e41d65f63bba207e04217f47c085"}, - {file = "tornado-6.1-cp36-cp36m-win32.whl", hash = "sha256:b87936fd2c317b6ee08a5741ea06b9d11a6074ef4cc42e031bc6403f82a32575"}, - {file = "tornado-6.1-cp36-cp36m-win_amd64.whl", hash = "sha256:cc0ee35043162abbf717b7df924597ade8e5395e7b66d18270116f8745ceb795"}, - {file = "tornado-6.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7250a3fa399f08ec9cb3f7b1b987955d17e044f1ade821b32e5f435130250d7f"}, - {file = "tornado-6.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:ed3ad863b1b40cd1d4bd21e7498329ccaece75db5a5bf58cd3c9f130843e7102"}, - {file = "tornado-6.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:dcef026f608f678c118779cd6591c8af6e9b4155c44e0d1bc0c87c036fb8c8c4"}, - {file = "tornado-6.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:70dec29e8ac485dbf57481baee40781c63e381bebea080991893cd297742b8fd"}, - {file = "tornado-6.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:d3f7594930c423fd9f5d1a76bee85a2c36fd8b4b16921cae7e965f22575e9c01"}, - {file = "tornado-6.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3447475585bae2e77ecb832fc0300c3695516a47d46cefa0528181a34c5b9d3d"}, - {file = "tornado-6.1-cp37-cp37m-win32.whl", hash = "sha256:e7229e60ac41a1202444497ddde70a48d33909e484f96eb0da9baf8dc68541df"}, - {file = "tornado-6.1-cp37-cp37m-win_amd64.whl", hash = "sha256:cb5ec8eead331e3bb4ce8066cf06d2dfef1bfb1b2a73082dfe8a161301b76e37"}, - {file = "tornado-6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:20241b3cb4f425e971cb0a8e4ffc9b0a861530ae3c52f2b0434e6c1b57e9fd95"}, - {file = "tornado-6.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:c77da1263aa361938476f04c4b6c8916001b90b2c2fdd92d8d535e1af48fba5a"}, - {file = "tornado-6.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:fba85b6cd9c39be262fcd23865652920832b61583de2a2ca907dbd8e8a8c81e5"}, - {file = "tornado-6.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:1e8225a1070cd8eec59a996c43229fe8f95689cb16e552d130b9793cb570a288"}, - {file = "tornado-6.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:d14d30e7f46a0476efb0deb5b61343b1526f73ebb5ed84f23dc794bdb88f9d9f"}, - {file = "tornado-6.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:8f959b26f2634a091bb42241c3ed8d3cedb506e7c27b8dd5c7b9f745318ddbb6"}, - {file = "tornado-6.1-cp38-cp38-win32.whl", hash = "sha256:34ca2dac9e4d7afb0bed4677512e36a52f09caa6fded70b4e3e1c89dbd92c326"}, - {file = "tornado-6.1-cp38-cp38-win_amd64.whl", hash = "sha256:6196a5c39286cc37c024cd78834fb9345e464525d8991c21e908cc046d1cc02c"}, - {file = "tornado-6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f0ba29bafd8e7e22920567ce0d232c26d4d47c8b5cf4ed7b562b5db39fa199c5"}, - {file = "tornado-6.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:33892118b165401f291070100d6d09359ca74addda679b60390b09f8ef325ffe"}, - {file = "tornado-6.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:7da13da6f985aab7f6f28debab00c67ff9cbacd588e8477034c0652ac141feea"}, - {file = "tornado-6.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:e0791ac58d91ac58f694d8d2957884df8e4e2f6687cdf367ef7eb7497f79eaa2"}, - {file = "tornado-6.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:66324e4e1beede9ac79e60f88de548da58b1f8ab4b2f1354d8375774f997e6c0"}, - {file = "tornado-6.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:a48900ecea1cbb71b8c71c620dee15b62f85f7c14189bdeee54966fbd9a0c5bd"}, - {file = "tornado-6.1-cp39-cp39-win32.whl", hash = "sha256:d3d20ea5782ba63ed13bc2b8c291a053c8d807a8fa927d941bd718468f7b950c"}, - {file = "tornado-6.1-cp39-cp39-win_amd64.whl", hash = "sha256:548430be2740e327b3fe0201abe471f314741efcb0067ec4f2d7dcfb4825f3e4"}, - {file = "tornado-6.1.tar.gz", hash = "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791"}, + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] typing-extensions = [ - {file = "typing_extensions-4.0.1-py3-none-any.whl", hash = "sha256:7f001e5ac290a0c0401508864c7ec868be4e701886d5b573a9528ed3973d9d3b"}, - {file = "typing_extensions-4.0.1.tar.gz", hash = "sha256:4ca091dea149f945ec56afb48dae714f21e8692ef22a395223bcd328961b6a0e"}, + {file = "typing_extensions-4.2.0-py3-none-any.whl", hash = "sha256:6657594ee297170d19f67d55c05852a874e7eb634f4f753dbd667855e07c1708"}, + {file = "typing_extensions-4.2.0.tar.gz", hash = "sha256:f1c24655a0da0d1b67f07e17a5e6b2a105894e6824b92096378bb3668ef02376"}, ] url-normalize = [ {file = "url-normalize-1.4.3.tar.gz", hash = "sha256:d23d3a070ac52a67b83a1c59a0e68f8608d1cd538783b401bc9de2c0fac999b2"}, {file = "url_normalize-1.4.3-py2.py3-none-any.whl", hash = "sha256:ec3c301f04e5bb676d333a7fa162fa977ad2ca04b7e652bfc9fac4e405728eed"}, ] urllib3 = [ - {file = "urllib3-1.26.8-py2.py3-none-any.whl", hash = "sha256:000ca7f471a233c2251c6c7023ee85305721bfdf18621ebff4fd17a8653427ed"}, - {file = "urllib3-1.26.8.tar.gz", hash = "sha256:0e7c33d9a63e7ddfcb86780aac87befc2fbddf46c58dbb487e0855f7ceec283c"}, + {file = "urllib3-1.26.9-py2.py3-none-any.whl", hash = "sha256:44ece4d53fb1706f667c9bd1c648f5469a2ec925fcf3a776667042d645472c14"}, + {file = "urllib3-1.26.9.tar.gz", hash = "sha256:aabaf16477806a5e1dd19aa41f8c2b7950dd3c746362d7e3223dbe6de6ac448e"}, ] verspec = [ {file = "verspec-0.1.0-py3-none-any.whl", hash = "sha256:741877d5633cc9464c45a469ae2a31e801e6dbbaa85b9675d481cda100f11c31"}, @@ -1806,88 +1727,103 @@ vine = [ ] wait-for-dep = [] watchdog = [ - {file = "watchdog-2.1.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:9693f35162dc6208d10b10ddf0458cc09ad70c30ba689d9206e02cd836ce28a3"}, - {file = "watchdog-2.1.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:aba5c812f8ee8a3ff3be51887ca2d55fb8e268439ed44110d3846e4229eb0e8b"}, - {file = "watchdog-2.1.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4ae38bf8ba6f39d5b83f78661273216e7db5b00f08be7592062cb1fc8b8ba542"}, - {file = "watchdog-2.1.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:ad6f1796e37db2223d2a3f302f586f74c72c630b48a9872c1e7ae8e92e0ab669"}, - {file = "watchdog-2.1.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:922a69fa533cb0c793b483becaaa0845f655151e7256ec73630a1b2e9ebcb660"}, - {file = "watchdog-2.1.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b2fcf9402fde2672545b139694284dc3b665fd1be660d73eca6805197ef776a3"}, - {file = "watchdog-2.1.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3386b367e950a11b0568062b70cc026c6f645428a698d33d39e013aaeda4cc04"}, - {file = "watchdog-2.1.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f1c00aa35f504197561060ca4c21d3cc079ba29cf6dd2fe61024c70160c990b"}, - {file = "watchdog-2.1.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b52b88021b9541a60531142b0a451baca08d28b74a723d0c99b13c8c8d48d604"}, - {file = "watchdog-2.1.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8047da932432aa32c515ec1447ea79ce578d0559362ca3605f8e9568f844e3c6"}, - {file = "watchdog-2.1.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e92c2d33858c8f560671b448205a268096e17870dcf60a9bb3ac7bfbafb7f5f9"}, - {file = "watchdog-2.1.6-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b7d336912853d7b77f9b2c24eeed6a5065d0a0cc0d3b6a5a45ad6d1d05fb8cd8"}, - {file = "watchdog-2.1.6-py3-none-manylinux2014_aarch64.whl", hash = "sha256:cca7741c0fcc765568350cb139e92b7f9f3c9a08c4f32591d18ab0a6ac9e71b6"}, - {file = "watchdog-2.1.6-py3-none-manylinux2014_armv7l.whl", hash = "sha256:25fb5240b195d17de949588628fdf93032ebf163524ef08933db0ea1f99bd685"}, - {file = "watchdog-2.1.6-py3-none-manylinux2014_i686.whl", hash = "sha256:be9be735f827820a06340dff2ddea1fb7234561fa5e6300a62fe7f54d40546a0"}, - {file = "watchdog-2.1.6-py3-none-manylinux2014_ppc64.whl", hash = "sha256:d0d19fb2441947b58fbf91336638c2b9f4cc98e05e1045404d7a4cb7cddc7a65"}, - {file = "watchdog-2.1.6-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:3becdb380d8916c873ad512f1701f8a92ce79ec6978ffde92919fd18d41da7fb"}, - {file = "watchdog-2.1.6-py3-none-manylinux2014_s390x.whl", hash = "sha256:ae67501c95606072aafa865b6ed47343ac6484472a2f95490ba151f6347acfc2"}, - {file = "watchdog-2.1.6-py3-none-manylinux2014_x86_64.whl", hash = "sha256:e0f30db709c939cabf64a6dc5babb276e6d823fd84464ab916f9b9ba5623ca15"}, - {file = "watchdog-2.1.6-py3-none-win32.whl", hash = "sha256:e02794ac791662a5eafc6ffeaf9bcc149035a0e48eb0a9d40a8feb4622605a3d"}, - {file = "watchdog-2.1.6-py3-none-win_amd64.whl", hash = "sha256:bd9ba4f332cf57b2c1f698be0728c020399ef3040577cde2939f2e045b39c1e5"}, - {file = "watchdog-2.1.6-py3-none-win_ia64.whl", hash = "sha256:a0f1c7edf116a12f7245be06120b1852275f9506a7d90227648b250755a03923"}, - {file = "watchdog-2.1.6.tar.gz", hash = "sha256:a36e75df6c767cbf46f61a91c70b3ba71811dfa0aca4a324d9407a06a8b7a2e7"}, + {file = "watchdog-2.1.9-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a735a990a1095f75ca4f36ea2ef2752c99e6ee997c46b0de507ba40a09bf7330"}, + {file = "watchdog-2.1.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b17d302850c8d412784d9246cfe8d7e3af6bcd45f958abb2d08a6f8bedf695d"}, + {file = "watchdog-2.1.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ee3e38a6cc050a8830089f79cbec8a3878ec2fe5160cdb2dc8ccb6def8552658"}, + {file = "watchdog-2.1.9-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:64a27aed691408a6abd83394b38503e8176f69031ca25d64131d8d640a307591"}, + {file = "watchdog-2.1.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:195fc70c6e41237362ba720e9aaf394f8178bfc7fa68207f112d108edef1af33"}, + {file = "watchdog-2.1.9-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:bfc4d351e6348d6ec51df007432e6fe80adb53fd41183716017026af03427846"}, + {file = "watchdog-2.1.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8250546a98388cbc00c3ee3cc5cf96799b5a595270dfcfa855491a64b86ef8c3"}, + {file = "watchdog-2.1.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:117ffc6ec261639a0209a3252546b12800670d4bf5f84fbd355957a0595fe654"}, + {file = "watchdog-2.1.9-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:97f9752208f5154e9e7b76acc8c4f5a58801b338de2af14e7e181ee3b28a5d39"}, + {file = "watchdog-2.1.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:247dcf1df956daa24828bfea5a138d0e7a7c98b1a47cf1fa5b0c3c16241fcbb7"}, + {file = "watchdog-2.1.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:226b3c6c468ce72051a4c15a4cc2ef317c32590d82ba0b330403cafd98a62cfd"}, + {file = "watchdog-2.1.9-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d9820fe47c20c13e3c9dd544d3706a2a26c02b2b43c993b62fcd8011bcc0adb3"}, + {file = "watchdog-2.1.9-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:70af927aa1613ded6a68089a9262a009fbdf819f46d09c1a908d4b36e1ba2b2d"}, + {file = "watchdog-2.1.9-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ed80a1628cee19f5cfc6bb74e173f1b4189eb532e705e2a13e3250312a62e0c9"}, + {file = "watchdog-2.1.9-py3-none-manylinux2014_aarch64.whl", hash = "sha256:9f05a5f7c12452f6a27203f76779ae3f46fa30f1dd833037ea8cbc2887c60213"}, + {file = "watchdog-2.1.9-py3-none-manylinux2014_armv7l.whl", hash = "sha256:255bb5758f7e89b1a13c05a5bceccec2219f8995a3a4c4d6968fe1de6a3b2892"}, + {file = "watchdog-2.1.9-py3-none-manylinux2014_i686.whl", hash = "sha256:d3dda00aca282b26194bdd0adec21e4c21e916956d972369359ba63ade616153"}, + {file = "watchdog-2.1.9-py3-none-manylinux2014_ppc64.whl", hash = "sha256:186f6c55abc5e03872ae14c2f294a153ec7292f807af99f57611acc8caa75306"}, + {file = "watchdog-2.1.9-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:083171652584e1b8829581f965b9b7723ca5f9a2cd7e20271edf264cfd7c1412"}, + {file = "watchdog-2.1.9-py3-none-manylinux2014_s390x.whl", hash = "sha256:b530ae007a5f5d50b7fbba96634c7ee21abec70dc3e7f0233339c81943848dc1"}, + {file = "watchdog-2.1.9-py3-none-manylinux2014_x86_64.whl", hash = "sha256:4f4e1c4aa54fb86316a62a87b3378c025e228178d55481d30d857c6c438897d6"}, + {file = "watchdog-2.1.9-py3-none-win32.whl", hash = "sha256:5952135968519e2447a01875a6f5fc8c03190b24d14ee52b0f4b1682259520b1"}, + {file = "watchdog-2.1.9-py3-none-win_amd64.whl", hash = "sha256:7a833211f49143c3d336729b0020ffd1274078e94b0ae42e22f596999f50279c"}, + {file = "watchdog-2.1.9-py3-none-win_ia64.whl", hash = "sha256:ad576a565260d8f99d97f2e64b0f97a48228317095908568a9d5c786c829d428"}, + {file = "watchdog-2.1.9.tar.gz", hash = "sha256:43ce20ebb36a51f21fa376f76d1d4692452b2527ccd601950d69ed36b9e21609"}, ] wcwidth = [ {file = "wcwidth-0.2.5-py2.py3-none-any.whl", hash = "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784"}, {file = "wcwidth-0.2.5.tar.gz", hash = "sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83"}, ] wrapt = [ - {file = "wrapt-1.13.3-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:e05e60ff3b2b0342153be4d1b597bbcfd8330890056b9619f4ad6b8d5c96a81a"}, - {file = "wrapt-1.13.3-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:85148f4225287b6a0665eef08a178c15097366d46b210574a658c1ff5b377489"}, - {file = "wrapt-1.13.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:2dded5496e8f1592ec27079b28b6ad2a1ef0b9296d270f77b8e4a3a796cf6909"}, - {file = "wrapt-1.13.3-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:e94b7d9deaa4cc7bac9198a58a7240aaf87fe56c6277ee25fa5b3aa1edebd229"}, - {file = "wrapt-1.13.3-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:498e6217523111d07cd67e87a791f5e9ee769f9241fcf8a379696e25806965af"}, - {file = "wrapt-1.13.3-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:ec7e20258ecc5174029a0f391e1b948bf2906cd64c198a9b8b281b811cbc04de"}, - {file = "wrapt-1.13.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:87883690cae293541e08ba2da22cacaae0a092e0ed56bbba8d018cc486fbafbb"}, - {file = "wrapt-1.13.3-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:f99c0489258086308aad4ae57da9e8ecf9e1f3f30fa35d5e170b4d4896554d80"}, - {file = "wrapt-1.13.3-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:6a03d9917aee887690aa3f1747ce634e610f6db6f6b332b35c2dd89412912bca"}, - {file = "wrapt-1.13.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:936503cb0a6ed28dbfa87e8fcd0a56458822144e9d11a49ccee6d9a8adb2ac44"}, - {file = "wrapt-1.13.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f9c51d9af9abb899bd34ace878fbec8bf357b3194a10c4e8e0a25512826ef056"}, - {file = "wrapt-1.13.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:220a869982ea9023e163ba915077816ca439489de6d2c09089b219f4e11b6785"}, - {file = "wrapt-1.13.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0877fe981fd76b183711d767500e6b3111378ed2043c145e21816ee589d91096"}, - {file = "wrapt-1.13.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:43e69ffe47e3609a6aec0fe723001c60c65305784d964f5007d5b4fb1bc6bf33"}, - {file = "wrapt-1.13.3-cp310-cp310-win32.whl", hash = "sha256:78dea98c81915bbf510eb6a3c9c24915e4660302937b9ae05a0947164248020f"}, - {file = "wrapt-1.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:ea3e746e29d4000cd98d572f3ee2a6050a4f784bb536f4ac1f035987fc1ed83e"}, - {file = "wrapt-1.13.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:8c73c1a2ec7c98d7eaded149f6d225a692caa1bd7b2401a14125446e9e90410d"}, - {file = "wrapt-1.13.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:086218a72ec7d986a3eddb7707c8c4526d677c7b35e355875a0fe2918b059179"}, - {file = "wrapt-1.13.3-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:e92d0d4fa68ea0c02d39f1e2f9cb5bc4b4a71e8c442207433d8db47ee79d7aa3"}, - {file = "wrapt-1.13.3-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:d4a5f6146cfa5c7ba0134249665acd322a70d1ea61732723c7d3e8cc0fa80755"}, - {file = "wrapt-1.13.3-cp35-cp35m-win32.whl", hash = "sha256:8aab36778fa9bba1a8f06a4919556f9f8c7b33102bd71b3ab307bb3fecb21851"}, - {file = "wrapt-1.13.3-cp35-cp35m-win_amd64.whl", hash = "sha256:944b180f61f5e36c0634d3202ba8509b986b5fbaf57db3e94df11abee244ba13"}, - {file = "wrapt-1.13.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:2ebdde19cd3c8cdf8df3fc165bc7827334bc4e353465048b36f7deeae8ee0918"}, - {file = "wrapt-1.13.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:610f5f83dd1e0ad40254c306f4764fcdc846641f120c3cf424ff57a19d5f7ade"}, - {file = "wrapt-1.13.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5601f44a0f38fed36cc07db004f0eedeaadbdcec90e4e90509480e7e6060a5bc"}, - {file = "wrapt-1.13.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:e6906d6f48437dfd80464f7d7af1740eadc572b9f7a4301e7dd3d65db285cacf"}, - {file = "wrapt-1.13.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:766b32c762e07e26f50d8a3468e3b4228b3736c805018e4b0ec8cc01ecd88125"}, - {file = "wrapt-1.13.3-cp36-cp36m-win32.whl", hash = "sha256:5f223101f21cfd41deec8ce3889dc59f88a59b409db028c469c9b20cfeefbe36"}, - {file = "wrapt-1.13.3-cp36-cp36m-win_amd64.whl", hash = "sha256:f122ccd12fdc69628786d0c947bdd9cb2733be8f800d88b5a37c57f1f1d73c10"}, - {file = "wrapt-1.13.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:46f7f3af321a573fc0c3586612db4decb7eb37172af1bc6173d81f5b66c2e068"}, - {file = "wrapt-1.13.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:778fd096ee96890c10ce96187c76b3e99b2da44e08c9e24d5652f356873f6709"}, - {file = "wrapt-1.13.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0cb23d36ed03bf46b894cfec777eec754146d68429c30431c99ef28482b5c1df"}, - {file = "wrapt-1.13.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:96b81ae75591a795d8c90edc0bfaab44d3d41ffc1aae4d994c5aa21d9b8e19a2"}, - {file = "wrapt-1.13.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7dd215e4e8514004c8d810a73e342c536547038fb130205ec4bba9f5de35d45b"}, - {file = "wrapt-1.13.3-cp37-cp37m-win32.whl", hash = "sha256:47f0a183743e7f71f29e4e21574ad3fa95676136f45b91afcf83f6a050914829"}, - {file = "wrapt-1.13.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fd76c47f20984b43d93de9a82011bb6e5f8325df6c9ed4d8310029a55fa361ea"}, - {file = "wrapt-1.13.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b73d4b78807bd299b38e4598b8e7bd34ed55d480160d2e7fdaabd9931afa65f9"}, - {file = "wrapt-1.13.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ec9465dd69d5657b5d2fa6133b3e1e989ae27d29471a672416fd729b429eb554"}, - {file = "wrapt-1.13.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dd91006848eb55af2159375134d724032a2d1d13bcc6f81cd8d3ed9f2b8e846c"}, - {file = "wrapt-1.13.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ae9de71eb60940e58207f8e71fe113c639da42adb02fb2bcbcaccc1ccecd092b"}, - {file = "wrapt-1.13.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:51799ca950cfee9396a87f4a1240622ac38973b6df5ef7a41e7f0b98797099ce"}, - {file = "wrapt-1.13.3-cp38-cp38-win32.whl", hash = "sha256:4b9c458732450ec42578b5642ac53e312092acf8c0bfce140ada5ca1ac556f79"}, - {file = "wrapt-1.13.3-cp38-cp38-win_amd64.whl", hash = "sha256:7dde79d007cd6dfa65afe404766057c2409316135cb892be4b1c768e3f3a11cb"}, - {file = "wrapt-1.13.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:981da26722bebb9247a0601e2922cedf8bb7a600e89c852d063313102de6f2cb"}, - {file = "wrapt-1.13.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:705e2af1f7be4707e49ced9153f8d72131090e52be9278b5dbb1498c749a1e32"}, - {file = "wrapt-1.13.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:25b1b1d5df495d82be1c9d2fad408f7ce5ca8a38085e2da41bb63c914baadff7"}, - {file = "wrapt-1.13.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:77416e6b17926d953b5c666a3cb718d5945df63ecf922af0ee576206d7033b5e"}, - {file = "wrapt-1.13.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:865c0b50003616f05858b22174c40ffc27a38e67359fa1495605f96125f76640"}, - {file = "wrapt-1.13.3-cp39-cp39-win32.whl", hash = "sha256:0a017a667d1f7411816e4bf214646d0ad5b1da2c1ea13dec6c162736ff25a374"}, - {file = "wrapt-1.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:81bd7c90d28a4b2e1df135bfbd7c23aee3050078ca6441bead44c42483f9ebfb"}, - {file = "wrapt-1.13.3.tar.gz", hash = "sha256:1fea9cd438686e6682271d36f3481a9f3636195578bab9ca3382e2f5f01fc185"}, + {file = "wrapt-1.14.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:1b376b3f4896e7930f1f772ac4b064ac12598d1c38d04907e696cc4d794b43d3"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:903500616422a40a98a5a3c4ff4ed9d0066f3b4c951fa286018ecdf0750194ef"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5a9a0d155deafd9448baff28c08e150d9b24ff010e899311ddd63c45c2445e28"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ddaea91abf8b0d13443f6dac52e89051a5063c7d014710dcb4d4abb2ff811a59"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:36f582d0c6bc99d5f39cd3ac2a9062e57f3cf606ade29a0a0d6b323462f4dd87"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7ef58fb89674095bfc57c4069e95d7a31cfdc0939e2a579882ac7d55aadfd2a1"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:e2f83e18fe2f4c9e7db597e988f72712c0c3676d337d8b101f6758107c42425b"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ee2b1b1769f6707a8a445162ea16dddf74285c3964f605877a20e38545c3c462"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:833b58d5d0b7e5b9832869f039203389ac7cbf01765639c7309fd50ef619e0b1"}, + {file = "wrapt-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80bb5c256f1415f747011dc3604b59bc1f91c6e7150bd7db03b19170ee06b320"}, + {file = "wrapt-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07f7a7d0f388028b2df1d916e94bbb40624c59b48ecc6cbc232546706fac74c2"}, + {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02b41b633c6261feff8ddd8d11c711df6842aba629fdd3da10249a53211a72c4"}, + {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fe803deacd09a233e4762a1adcea5db5d31e6be577a43352936179d14d90069"}, + {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:257fd78c513e0fb5cdbe058c27a0624c9884e735bbd131935fd49e9fe719d310"}, + {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4fcc4649dc762cddacd193e6b55bc02edca674067f5f98166d7713b193932b7f"}, + {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:11871514607b15cfeb87c547a49bca19fde402f32e2b1c24a632506c0a756656"}, + {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"}, + {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"}, + {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:a85d2b46be66a71bedde836d9e41859879cc54a2a04fad1191eb50c2066f6e9d"}, + {file = "wrapt-1.14.1-cp35-cp35m-win32.whl", hash = "sha256:dbcda74c67263139358f4d188ae5faae95c30929281bc6866d00573783c422b7"}, + {file = "wrapt-1.14.1-cp35-cp35m-win_amd64.whl", hash = "sha256:b21bb4c09ffabfa0e85e3a6b623e19b80e7acd709b9f91452b8297ace2a8ab00"}, + {file = "wrapt-1.14.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9e0fd32e0148dd5dea6af5fee42beb949098564cc23211a88d799e434255a1f4"}, + {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9736af4641846491aedb3c3f56b9bc5568d92b0692303b5a305301a95dfd38b1"}, + {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b02d65b9ccf0ef6c34cba6cf5bf2aab1bb2f49c6090bafeecc9cd81ad4ea1c1"}, + {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21ac0156c4b089b330b7666db40feee30a5d52634cc4560e1905d6529a3897ff"}, + {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:9f3e6f9e05148ff90002b884fbc2a86bd303ae847e472f44ecc06c2cd2fcdb2d"}, + {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:6e743de5e9c3d1b7185870f480587b75b1cb604832e380d64f9504a0535912d1"}, + {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d79d7d5dc8a32b7093e81e97dad755127ff77bcc899e845f41bf71747af0c569"}, + {file = "wrapt-1.14.1-cp36-cp36m-win32.whl", hash = "sha256:81b19725065dcb43df02b37e03278c011a09e49757287dca60c5aecdd5a0b8ed"}, + {file = "wrapt-1.14.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b014c23646a467558be7da3d6b9fa409b2c567d2110599b7cf9a0c5992b3b471"}, + {file = "wrapt-1.14.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:88bd7b6bd70a5b6803c1abf6bca012f7ed963e58c68d76ee20b9d751c74a3248"}, + {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5901a312f4d14c59918c221323068fad0540e34324925c8475263841dbdfe68"}, + {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d77c85fedff92cf788face9bfa3ebaa364448ebb1d765302e9af11bf449ca36d"}, + {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d649d616e5c6a678b26d15ece345354f7c2286acd6db868e65fcc5ff7c24a77"}, + {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7d2872609603cb35ca513d7404a94d6d608fc13211563571117046c9d2bcc3d7"}, + {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:ee6acae74a2b91865910eef5e7de37dc6895ad96fa23603d1d27ea69df545015"}, + {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2b39d38039a1fdad98c87279b48bc5dce2c0ca0d73483b12cb72aa9609278e8a"}, + {file = "wrapt-1.14.1-cp37-cp37m-win32.whl", hash = "sha256:60db23fa423575eeb65ea430cee741acb7c26a1365d103f7b0f6ec412b893853"}, + {file = "wrapt-1.14.1-cp37-cp37m-win_amd64.whl", hash = "sha256:709fe01086a55cf79d20f741f39325018f4df051ef39fe921b1ebe780a66184c"}, + {file = "wrapt-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c0ce1e99116d5ab21355d8ebe53d9460366704ea38ae4d9f6933188f327b456"}, + {file = "wrapt-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e3fb1677c720409d5f671e39bac6c9e0e422584e5f518bfd50aa4cbbea02433f"}, + {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642c2e7a804fcf18c222e1060df25fc210b9c58db7c91416fb055897fc27e8cc"}, + {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b7c050ae976e286906dd3f26009e117eb000fb2cf3533398c5ad9ccc86867b1"}, + {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef3f72c9666bba2bab70d2a8b79f2c6d2c1a42a7f7e2b0ec83bb2f9e383950af"}, + {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:01c205616a89d09827986bc4e859bcabd64f5a0662a7fe95e0d359424e0e071b"}, + {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5a0f54ce2c092aaf439813735584b9537cad479575a09892b8352fea5e988dc0"}, + {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2cf71233a0ed05ccdabe209c606fe0bac7379fdcf687f39b944420d2a09fdb57"}, + {file = "wrapt-1.14.1-cp38-cp38-win32.whl", hash = "sha256:aa31fdcc33fef9eb2552cbcbfee7773d5a6792c137b359e82879c101e98584c5"}, + {file = "wrapt-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1967f46ea8f2db647c786e78d8cc7e4313dbd1b0aca360592d8027b8508e24d"}, + {file = "wrapt-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3232822c7d98d23895ccc443bbdf57c7412c5a65996c30442ebe6ed3df335383"}, + {file = "wrapt-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:988635d122aaf2bdcef9e795435662bcd65b02f4f4c1ae37fbee7401c440b3a7"}, + {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cca3c2cdadb362116235fdbd411735de4328c61425b0aa9f872fd76d02c4e86"}, + {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d52a25136894c63de15a35bc0bdc5adb4b0e173b9c0d07a2be9d3ca64a332735"}, + {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40e7bc81c9e2b2734ea4bc1aceb8a8f0ceaac7c5299bc5d69e37c44d9081d43b"}, + {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b9b7a708dd92306328117d8c4b62e2194d00c365f18eff11a9b53c6f923b01e3"}, + {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6a9a25751acb379b466ff6be78a315e2b439d4c94c1e99cb7266d40a537995d3"}, + {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:34aa51c45f28ba7f12accd624225e2b1e5a3a45206aa191f6f9aac931d9d56fe"}, + {file = "wrapt-1.14.1-cp39-cp39-win32.whl", hash = "sha256:dee0ce50c6a2dd9056c20db781e9c1cfd33e77d2d569f5d1d9321c641bb903d5"}, + {file = "wrapt-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:dee60e1de1898bde3b238f18340eec6148986da0455d8ba7848d50470a7a32fb"}, + {file = "wrapt-1.14.1.tar.gz", hash = "sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d"}, ] zipp = [ - {file = "zipp-3.7.0-py3-none-any.whl", hash = "sha256:b47250dd24f92b7dd6a0a8fc5244da14608f3ca90a5efcd37a3b1642fac9a375"}, - {file = "zipp-3.7.0.tar.gz", hash = "sha256:9f50f446828eb9d45b267433fd3e9da8d801f614129124863f9c51ebceafb87d"}, + {file = "zipp-3.8.0-py3-none-any.whl", hash = "sha256:c4f6e5bbf48e74f7a38e7cc5b0480ff42b0ae5178957d564d18932525d5cf099"}, + {file = "zipp-3.8.0.tar.gz", hash = "sha256:56bf8aadb83c24db6c4b577e13de374ccfb67da2078beba1d037c17980bf43ad"}, ] diff --git a/pyproject.toml b/pyproject.toml index dd6297180..b9d4f57c9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ name = "splunk-connect-for-snmp" version = "1.6.3" description = "" -authors = ["rfaircloth-splunk "] +authors = ["omrozowicz-splunk "] license = "Apache-2.0" include = ["splunk_connect_for_snmp/profiles/*.yaml"] @@ -35,17 +35,16 @@ pyrate-limiter = "^2.8.1" requests-cache = "^0.9.3" requests-ratelimiter = "^0.2.1" mongoengine = "^0.24.1" -#celerybeat-mongo = "^0.2.0" -celerybeat-mongo = {git="https://github.com/splunk/celerybeat-mongo", branch="main"} +celery-redbeat = {git = "https://github.com/splunk/redbeat", rev = "main"} pysnmplib = "^5.0.5" PyYAML = "^6.0" #Note this is temporary PR to upstream project is issued -wait-for-dep = { extras = ["rabbitmq"], git="https://github.com/rfaircloth-splunk/wait-for-dep.git" } -flower = "^1.0.0" +wait-for-dep = {extras = ["redis"], git="https://github.com/omrozowicz-splunk/wait-for-dep.git"} mongolock = "^1.3.4" pika = "^1.2.0" JSON-log-formatter ="^0.5.1" "ruamel.yaml" = "^0.17.21" +mkdocs-video = "^1.3.0" [tool.poetry.dev-dependencies] pytest = "^7.1.1" diff --git a/splunk_connect_for_snmp/.gitignore b/splunk_connect_for_snmp/.gitignore new file mode 100644 index 000000000..20b0bc553 --- /dev/null +++ b/splunk_connect_for_snmp/.gitignore @@ -0,0 +1,3 @@ +.env +config.yaml +inventory.csv \ No newline at end of file diff --git a/splunk_connect_for_snmp/celery_config.py b/splunk_connect_for_snmp/celery_config.py index 13b51ec62..18d963941 100644 --- a/splunk_connect_for_snmp/celery_config.py +++ b/splunk_connect_for_snmp/celery_config.py @@ -16,6 +16,8 @@ # Support use of .env file for developers from contextlib import suppress +from kombu import Queue + with suppress(ImportError): from dotenv import load_dotenv @@ -24,29 +26,35 @@ import os -MONGO_DB = os.getenv("MONGO_DB", "sc4snmp") -MONGO_DB_SCHEDULES = os.getenv("MONGO_DB_SCHEDULES", "schedules") CELERY_TASK_TIMEOUT = int(os.getenv("CELERY_TASK_TIMEOUT", "2400")) -MONGO_URI = os.getenv("MONGO_URI") -MONGO_DB_CELERY_DATABASE = os.getenv("MONGO_DB_CELERY_DATABASE", MONGO_DB) - +PREFETCH_COUNT = int(os.getenv("PREFETCH_COUNT", 1)) +redbeat_redis_url = os.getenv("REDIS_URL") # broker -broker_url = os.getenv("CELERY_BROKER_URL") -# results config -result_backend = MONGO_URI +broker_url = os.getenv("CELERY_BROKER_URL", "amqp://guest:guest@localhost:5672//") result_extended = True -mongodb_backend_settings = {"database": MONGO_DB_CELERY_DATABASE} - -beat_scheduler = "celerybeatmongo.schedulers.MongoScheduler" -mongodb_scheduler_url = MONGO_URI -mongodb_scheduler_db = MONGO_DB_CELERY_DATABASE +beat_scheduler = "redbeat.RedBeatScheduler" +redbeat_lock_key = None # Optimization for long running tasks # https://docs.celeryproject.org/en/stable/userguide/optimizing.html#reserve-one-task-at-a-time task_acks_late = True -worker_prefetch_multiplier = 1 +worker_prefetch_multiplier = PREFETCH_COUNT task_acks_on_failure_or_timeout = True task_reject_on_worker_lost = True -task_track_started = True task_time_limit = CELERY_TASK_TIMEOUT +task_create_missing_queues = False task_ignore_result = True +result_persistent = False +result_expires = 60 +task_default_priority = 5 +task_default_queue = "poll" +broker_transport_options = { + "priority_steps": list(range(10)), + "sep": ":", + "queue_order_strategy": "priority", +} +task_queues = ( + Queue("traps", exchange="traps"), + Queue("poll", exchange="poll"), + Queue("send", exchange="send"), +) diff --git a/splunk_connect_for_snmp/common/inventory_record.py b/splunk_connect_for_snmp/common/inventory_record.py index 09f4f700e..0bc3bdd31 100644 --- a/splunk_connect_for_snmp/common/inventory_record.py +++ b/splunk_connect_for_snmp/common/inventory_record.py @@ -26,6 +26,11 @@ InventoryInt = Union[None, int] InventoryBool = Union[None, bool] +ALTERNATIVE_FIELDS = { + "securityEngine": "security_engine", + "SmartProfiles": "smart_profiles", +} + class InventoryRecord(BaseModel): address: InventoryStr @@ -33,12 +38,19 @@ class InventoryRecord(BaseModel): version: InventoryStr community: InventoryStr secret: InventoryStr - securityEngine: InventoryStr = "" + security_engine: InventoryStr = "" walk_interval: InventoryInt = 42000 profiles: List - SmartProfiles: InventoryBool + smart_profiles: InventoryBool delete: InventoryBool + def __init__(self, *args, **kwargs): + for old, current in ALTERNATIVE_FIELDS.items(): + if old in kwargs.keys(): + kwargs[current] = kwargs.get(old) + kwargs.pop(old, None) + super().__init__(*args, **kwargs) + @validator("address", pre=True) def address_validator(cls, value): if value is None: @@ -95,8 +107,8 @@ def secret_validator(cls, value): else: return value - @validator("securityEngine", pre=True) - def securityEngine_validator(cls, value): + @validator("security_engine", pre=True) + def security_engine_validator(cls, value): if value is None or (isinstance(value, str) and value.strip() == ""): return None else: @@ -123,8 +135,8 @@ def profiles_validator(cls, value): else: return value - @validator("SmartProfiles", pre=True) - def SmartProfiles_validator(cls, value): + @validator("smart_profiles", pre=True) + def smart_profiles_validator(cls, value): if value is None or (isinstance(value, str) and value.strip() == ""): return True else: @@ -137,39 +149,5 @@ def delete_validator(cls, value): else: return human_bool(value) - @staticmethod - def from_json(ir_json: str): - ir_dict = json.loads(ir_json) - return InventoryRecord(**ir_dict) - - def to_json(self): - return json.dumps( - { - "address": self.address, - "port": self.port, - "version": self.version, - "community": self.community, - "secret": self.secret, - "securityEngine": self.securityEngine, - "walk_interval": self.walk_interval, - "profiles": self.profiles, - "SmartProfiles": self.SmartProfiles, - "delete": self.delete, - } - ) - - @classmethod - def from_dict(cls, env): - return cls( - **{k: v for k, v in env.items() if k in inspect.signature(cls).parameters} - ) - def asdict(self) -> dict: return self.dict() - - -class InventoryRecordEncoder(json.JSONEncoder): - def default(self, o): - if "tojson" in dir(o): - return o.tojson() - return json.JSONEncoder.default(self, o) diff --git a/splunk_connect_for_snmp/common/profiles.py b/splunk_connect_for_snmp/common/profiles.py index 73facbb38..4b77c4232 100644 --- a/splunk_connect_for_snmp/common/profiles.py +++ b/splunk_connect_for_snmp/common/profiles.py @@ -28,6 +28,7 @@ from celery.utils.log import get_task_logger logger = get_task_logger(__name__) +PROFILES_GET_RETRIES = 3 def load_profiles(): @@ -65,3 +66,73 @@ def load_profiles(): logger.info(f"File: {CONFIG_PATH} not found") return active_profiles + + +class ProfilesManager: + def __init__(self, mongo): + self.mongo = mongo + self.profiles_coll = mongo.sc4snmp.profiles + + def gather_profiles(self): + active_profiles = {} + + pkg_path = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "profiles" + ) + for file in os.listdir(pkg_path): + if file.endswith("yaml"): + with open(os.path.join(pkg_path, file), encoding="utf-8") as of: + profiles = yaml.safe_load(of) + logger.info( + f"loading {len(profiles.keys())} profiles from shared profile group {file}" + ) + for key, profile in profiles.items(): + active_profiles[key] = profile + + try: + with open(CONFIG_PATH, encoding="utf-8") as file: + config_runtime = yaml.safe_load(file) + if "profiles" in config_runtime: + profiles = config_runtime.get("profiles", {}) + logger.info( + f"loading {len(profiles.keys())} profiles from runtime profile group" + ) + for key, profile in profiles.items(): + if key in active_profiles: + if not profile.get("enabled", True): + logger.info(f"disabling profile {key}") + del active_profiles[key] + else: + active_profiles[key] = profile + else: + active_profiles[key] = profile + except FileNotFoundError: + logger.info(f"File: {CONFIG_PATH} not found") + return active_profiles + + def update_profiles(self, profiles): + profiles_to_insert = [] + for key, value in profiles.items(): + profiles_to_insert.append({key: value}) + with self.mongo.start_session() as session: + with session.start_transaction(): + self.profiles_coll.delete_many({}) + self.profiles_coll.insert_many(profiles_to_insert) + + def update_all_profiles(self): + all_profiles = self.gather_profiles() + self.update_profiles(all_profiles) + + def return_all_profiles(self): + for retry in range(3): + profiles = self.request_profiles() + if profiles: + return profiles + return {} + + def request_profiles(self): + profiles = {} + profiles_cursor = self.profiles_coll.find({}, {"_id": 0}) + for item in profiles_cursor: + profiles.update(item) + return profiles diff --git a/splunk_connect_for_snmp/common/schema_migration.py b/splunk_connect_for_snmp/common/schema_migration.py index c1f339f00..4fd1cba4b 100644 --- a/splunk_connect_for_snmp/common/schema_migration.py +++ b/splunk_connect_for_snmp/common/schema_migration.py @@ -23,6 +23,9 @@ CustomisedJSONFormatter, ) +from ..poller import app +from .task_generator import WalkTaskGenerator + formatter = CustomisedJSONFormatter() logger = logging.getLogger(__name__) @@ -35,7 +38,7 @@ logger.addHandler(handler) -CURRENT_SCHEMA_VERSION = 3 +CURRENT_SCHEMA_VERSION = 4 MONGO_URI = os.getenv("MONGO_URI") @@ -94,3 +97,26 @@ def migrate_to_version_3(mongo_client, task_manager): attributes_collection.create_index( [("address", ASCENDING), ("group_key_hash", ASCENDING)] ) + + +def migrate_to_version_4(mongo_client, task_manager): + logger.info("Migrating database schema to version 4") + schedules_collection = mongo_client.sc4snmp.schedules + transform_mongodb_periodic_to_redbeat(schedules_collection, task_manager) + schedules_collection.drop() + + +def transform_mongodb_periodic_to_redbeat(schedule_collection, task_manager): + schedules = schedule_collection.find( + {"task": "splunk_connect_for_snmp.snmp.tasks.walk"} + ) + for schedule_obj in schedules: + walk_interval = schedule_obj.get("interval").get("every") + task_generator = WalkTaskGenerator( + target=schedule_obj.get("target"), + schedule_period=walk_interval, + app=app, + profile=schedule_obj.get("kwargs").get("profile"), + ) + walk_data = task_generator.generate_task_definition() + task_manager.manage_task(**walk_data) diff --git a/splunk_connect_for_snmp/common/task_generator.py b/splunk_connect_for_snmp/common/task_generator.py new file mode 100644 index 000000000..0fff03ee4 --- /dev/null +++ b/splunk_connect_for_snmp/common/task_generator.py @@ -0,0 +1,103 @@ +from celery import Celery, chain, group, signature +from celery.schedules import schedule + + +class TaskGenerator: + def __init__(self, target: str, schedule_period: int, app: Celery): + self.target = target + self.schedule_period = schedule_period + self.app = app + + def generate_task_definition(self): + return { + "target": self.target, + "args": [], + "kwargs": {"address": self.target}, + "schedule": schedule(self.schedule_period), + "enabled": True, + "app": self.app, + } + + +class WalkTaskGenerator(TaskGenerator): + + WALK_CHAIN_OF_TASKS = { + "link": chain( + signature("splunk_connect_for_snmp.enrich.tasks.enrich") + .set(queue="poll") + .set(priority=4), + group( + signature( + "splunk_connect_for_snmp.inventory.tasks.inventory_setup_poller" + ) + .set(queue="poll") + .set(priority=3), + chain( + signature("splunk_connect_for_snmp.splunk.tasks.prepare") + .set(queue="send") + .set(priority=1), + signature("splunk_connect_for_snmp.splunk.tasks.send") + .set(queue="send") + .set(priority=0), + ), + ), + ), + } + + def __init__(self, target, schedule_period, app, profile): + super().__init__(target, schedule_period, app) + self.profile = profile + + def generate_task_definition(self): + task_data = super().generate_task_definition() + name = f"sc4snmp;{self.target};walk" + task_data["name"] = name + task_data["task"] = "splunk_connect_for_snmp.snmp.tasks.walk" + task_data["options"] = self.WALK_CHAIN_OF_TASKS + task_data["run_immediately"] = True + walk_kwargs = {"profile": self.profile} + task_data["kwargs"].update(walk_kwargs) + return task_data + + +class PollTaskGenerator(TaskGenerator): + + POLL_CHAIN_OF_TASKS = { + "link": chain( + signature("splunk_connect_for_snmp.enrich.tasks.enrich") + .set(queue="poll") + .set(priority=4), + chain( + signature("splunk_connect_for_snmp.splunk.tasks.prepare") + .set(queue="send") + .set(priority=1), + signature("splunk_connect_for_snmp.splunk.tasks.send") + .set(queue="send") + .set(priority=0), + ), + ), + } + + def __init__(self, target, schedule_period, app, profiles): + super().__init__(target, schedule_period, app) + self.profiles = profiles + + def generate_task_definition(self): + task_data = super().generate_task_definition() + task_data["name"] = f"sc4snmp;{self.target};{self.schedule_period};poll" + task_data["task"] = "splunk_connect_for_snmp.snmp.tasks.poll" + task_data["options"] = self.POLL_CHAIN_OF_TASKS + task_data["run_immediately"] = self.run_immediately + poll_kwargs = { + "profiles": list(self.profiles), + "frequency": self.schedule_period, + "priority": 2, + } + task_data["kwargs"].update(poll_kwargs) + return task_data + + @property + def run_immediately(self): + if self.schedule_period > 300: + return True + return False diff --git a/splunk_connect_for_snmp/customtaskmanager.py b/splunk_connect_for_snmp/customtaskmanager.py index c73973e30..c9af82f72 100644 --- a/splunk_connect_for_snmp/customtaskmanager.py +++ b/splunk_connect_for_snmp/customtaskmanager.py @@ -16,142 +16,73 @@ import logging from typing import List -from celerybeatmongo.models import PeriodicTask -from mongoengine.connection import connect, disconnect +from redbeat.schedulers import RedBeatSchedulerEntry -import splunk_connect_for_snmp.celery_config +from .poller import app logger = logging.getLogger(__name__) class CustomPeriodicTaskManager: - def __init__(self): - connect( - host=splunk_connect_for_snmp.celery_config.mongodb_scheduler_url, - db=splunk_connect_for_snmp.celery_config.mongodb_scheduler_db, - ) - - def __del__(self): - disconnect() - def delete_unused_poll_tasks(self, target: str, activeschedules: List[str]): - periodic = PeriodicTask.objects(target=target) - for p in periodic: - if not p.task == "splunk_connect_for_snmp.snmp.tasks.poll": + periodic_tasks = RedBeatSchedulerEntry.get_schedules_by_target(target, app=app) + for periodic_document in periodic_tasks: + if not periodic_document.task == "splunk_connect_for_snmp.snmp.tasks.poll": continue - logger.debug(p) - periodic_document = periodic.get(name=p.name) - logger.debug("Got Schedule") - if p.name not in activeschedules: - if periodic_document.enabled: - periodic_document.enabled = False - logger.debug(f"Deleting Schedule: {periodic_document.name}") + logger.debug(f"Got Schedule: {periodic_document.name}") + if periodic_document.name not in activeschedules: + periodic_document.delete() + logger.debug( + f"Deleting Schedule: {periodic_document.name} delete_unused_poll_tasks" + ) def delete_all_poll_tasks(self): - periodic = PeriodicTask.objects() - for p in periodic: - if not p.task == "splunk_connect_for_snmp.snmp.tasks.poll": + periodic_tasks = RedBeatSchedulerEntry.get_schedules() + for periodic_document in periodic_tasks: + if not periodic_document.task == "splunk_connect_for_snmp.snmp.tasks.poll": continue - logger.debug(p) - periodic_document = periodic.get(name=p.name) - logger.debug("Got Schedule") + logger.debug(f"Got Schedule: {periodic_document.name}") periodic_document.delete() - logger.debug("Deleting Schedule") + logger.debug( + f"Deleting Schedule {periodic_document.name} delete_all_poll_tasks" + ) def rerun_all_walks(self): - periodic = PeriodicTask.objects() - for p in periodic: - if not p.task == "splunk_connect_for_snmp.snmp.tasks.walk": + periodic_tasks = RedBeatSchedulerEntry.get_schedules() + for periodic_document in periodic_tasks: + if not periodic_document.task == "splunk_connect_for_snmp.snmp.tasks.walk": continue - logger.debug(p) - periodic_document = periodic.get(name=p.name) - periodic_document.run_immediately = True + periodic_document.set_run_immediately(True) logger.debug("Got Schedule") periodic_document.save() + periodic_document.reschedule() - def delete_disabled_poll_tasks(self): - periodic = PeriodicTask.objects(enabled=False) - for p in periodic: - periodic_document = periodic.get(name=p.name) + def delete_all_tasks_of_host(self, target): + periodic_tasks = RedBeatSchedulerEntry.get_schedules_by_target(target, app=app) + for periodic_document in periodic_tasks: periodic_document.delete() - logger.debug("Deleting Schedule") - - def enable_tasks(self, target): - periodic = PeriodicTask.objects(target=target) - for p in periodic: - periodic_document = periodic.get(name=p.name) - if not periodic_document.enabled: - periodic_document.enabled = True - periodic_document.save() - - def disable_tasks(self, target): - periodic = PeriodicTask.objects(target=target) - for p in periodic: - periodic_document = periodic.get(name=p.name) - if periodic_document.enabled: - periodic_document.enabled = False - periodic_document.save() - - def manage_task(self, run_immediately_if_new: bool = False, **task_data) -> None: - periodic = PeriodicTask.objects(name=task_data["name"]) - if periodic: - logger.debug("Existing Schedule") - isChanged = False - periodic_document = periodic.get(name=task_data["name"]) - for key, value in task_data.items(): - if key == "interval": - if not periodic_document.interval == PeriodicTask.Interval( - **task_data["interval"] - ): - periodic_document.interval = PeriodicTask.Interval( - **task_data["interval"] - ) - isChanged = True - elif key == "crontab": - if not periodic_document.crontab == PeriodicTask.Crontab( - **task_data["crontab"] - ): - periodic_document.crontab = PeriodicTask.Crontab( - **task_data["crontab"] - ) - isChanged = True - elif key == "target": - pass - elif key == "total_run_count": - periodic_document[key] = task_data[key] - else: - if key in periodic_document: - if not periodic_document[key] == task_data[key]: - periodic_document[key] = task_data[key] - isChanged = True - else: - periodic_document[key] = task_data[key] - isChanged = True - else: - logger.debug("New Schedule") - isChanged = True - periodic_document = PeriodicTask(task=task_data["task"]) - periodic_document.name = task_data["name"] - periodic_document.args = task_data["args"] - periodic_document.kwargs = task_data["kwargs"] - if "interval" in task_data: - periodic_document.interval = PeriodicTask.Interval( - **task_data["interval"] - ) - else: - periodic_document.crontab = PeriodicTask.Crontab(**task_data["crontab"]) - periodic_document.enabled = task_data["enabled"] - periodic_document.run_immediately = task_data.get( - "run_immediately", run_immediately_if_new + def manage_task(self, **task_data) -> None: + task_name = task_data.get("name") + # When task is updated, we don't want to change existing schedules. + # If task interval is very long, running walk process in between would result in calculating + # next execution again. + try: + periodic_document = RedBeatSchedulerEntry.from_key( + f"redbeat:{task_name}", app=app ) - if "total_run_count" in task_data: - periodic_document["total_run_count"] = task_data["total_run_count"] - if "target" in task_data: - periodic_document["target"] = task_data["target"] - if "options" in task_data: - periodic_document["options"] = task_data["options"] - - logger.info(f"Periodic document to save: {periodic_document.to_json()}") - if isChanged: - periodic_document.save() + args_list = [ + "target", + "args", + "kwargs", + "run_immediately", + "schedule", + "enabled", + ] + for arg in args_list: + if arg in task_data: + setattr(periodic_document, arg, task_data.get(arg)) + except KeyError: + logger.info(f"Setting up a new task: {task_name}") + periodic_document = RedBeatSchedulerEntry(**task_data) + periodic_document.save() diff --git a/splunk_connect_for_snmp/inventory/loader.py b/splunk_connect_for_snmp/inventory/loader.py index 5b8bd62dc..cefd647ac 100644 --- a/splunk_connect_for_snmp/inventory/loader.py +++ b/splunk_connect_for_snmp/inventory/loader.py @@ -20,15 +20,18 @@ from csv import DictReader import pymongo -from celery.canvas import chain, group, signature +import yaml from splunk_connect_for_snmp import customtaskmanager from splunk_connect_for_snmp.common.customised_json_formatter import ( CustomisedJSONFormatter, ) from splunk_connect_for_snmp.common.inventory_record import InventoryRecord -from splunk_connect_for_snmp.common.profiles import load_profiles +from splunk_connect_for_snmp.common.profiles import ProfilesManager from splunk_connect_for_snmp.common.schema_migration import migrate_database +from splunk_connect_for_snmp.common.task_generator import WalkTaskGenerator + +from ..poller import app try: from dotenv import load_dotenv @@ -64,34 +67,11 @@ def transform_address_to_key(address, port): def gen_walk_task(ir: InventoryRecord, profile=None): target = transform_address_to_key(ir.address, ir.port) - return { - "name": f"sc4snmp;{target};walk", - "task": "splunk_connect_for_snmp.snmp.tasks.walk", - "target": target, - "args": [], - "kwargs": { - "address": target, - "profile": profile, - }, - "options": { - "link": chain( - signature("splunk_connect_for_snmp.enrich.tasks.enrich"), - group( - signature( - "splunk_connect_for_snmp.inventory.tasks.inventory_setup_poller" - ), - chain( - signature("splunk_connect_for_snmp.splunk.tasks.prepare"), - signature("splunk_connect_for_snmp.splunk.tasks.send"), - ), - ), - ), - }, - "interval": {"every": ir.walk_interval, "period": "seconds"}, - "enabled": True, - "total_run_count": 0, - "run_immediately": True, - } + walk_definition = WalkTaskGenerator( + target=target, schedule_period=ir.walk_interval, app=app, profile=profile + ) + task_config = walk_definition.generate_task_definition() + return task_config def load(): @@ -100,13 +80,15 @@ def load(): mongo_client = pymongo.MongoClient(MONGO_URI) targets_collection = mongo_client.sc4snmp.targets attributes_collection = mongo_client.sc4snmp.attributes + profiles_manager = ProfilesManager(mongo_client) mongo_db = mongo_client[MONGO_DB] inventory_records = mongo_db.inventory periodic_obj = customtaskmanager.CustomPeriodicTaskManager() migrate_database(mongo_client, periodic_obj) - config_profiles = load_profiles() + profiles_manager.update_all_profiles() + config_profiles = profiles_manager.return_all_profiles() logger.info(f"Loading inventory from {path}") with open(path, encoding="utf-8") as csv_file: @@ -121,7 +103,7 @@ def load(): ir = InventoryRecord(**source_record) target = transform_address_to_key(ir.address, ir.port) if ir.delete: - periodic_obj.disable_tasks(target) + periodic_obj.delete_all_tasks_of_host(target) inventory_records.delete_one( {"address": ir.address, "port": ir.port} ) @@ -129,11 +111,6 @@ def load(): attributes_collection.remove({"address": target}) logger.info(f"Deleting record: {target}") else: - status = inventory_records.update_one( - {"address": ir.address, "port": ir.port}, - {"$set": ir.asdict()}, - upsert=True, - ) profiles = source_record["profiles"].split(";") profile = None if profiles: @@ -147,7 +124,12 @@ def load(): ] if profiles: profile = profiles[-1] - ir.walk_interval = source_record["walk_interval"] + ir.walk_interval = int(source_record["walk_interval"]) + status = inventory_records.update_one( + {"address": ir.address, "port": ir.port}, + {"$set": ir.asdict()}, + upsert=True, + ) if status.matched_count == 0: logger.info(f"New Record {ir} {status.upserted_id}") elif status.modified_count == 1 and status.upserted_id is None: diff --git a/splunk_connect_for_snmp/inventory/tasks.py b/splunk_connect_for_snmp/inventory/tasks.py index 3c23f0702..564f4c366 100644 --- a/splunk_connect_for_snmp/inventory/tasks.py +++ b/splunk_connect_for_snmp/inventory/tasks.py @@ -13,12 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. # -import time import typing -from splunk_connect_for_snmp.common.profiles import load_profiles +from splunk_connect_for_snmp.common.profiles import ProfilesManager from splunk_connect_for_snmp.snmp.manager import get_inventory +from ..common.task_generator import PollTaskGenerator +from .loader import transform_address_to_key + try: from dotenv import load_dotenv @@ -31,12 +33,13 @@ import pymongo import urllib3 from celery import Task, shared_task -from celery.canvas import chain, signature from celery.utils.log import get_task_logger from splunk_connect_for_snmp import customtaskmanager from splunk_connect_for_snmp.common.hummanbool import human_bool +from ..poller import app + urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) # nosemgrep logger = get_task_logger(__name__) @@ -49,22 +52,20 @@ class InventoryTask(Task): def __init__(self): - self.profiles = load_profiles() - self.last_modified = time.time() + self.mongo_client = pymongo.MongoClient(MONGO_URI) + self.profiles_manager = ProfilesManager(self.mongo_client) + self.profiles = self.profiles_manager.return_all_profiles() @shared_task(bind=True, base=InventoryTask) def inventory_setup_poller(self, work): address = work["address"] - if time.time() - self.last_modified > PROFILES_RELOAD_DELAY: - self.profiles = load_profiles() - self.last_modified = time.time() - logger.debug("Profiles reloaded") + self.profiles = self.profiles_manager.return_all_profiles() + logger.debug("Profiles reloaded") periodic_obj = customtaskmanager.CustomPeriodicTaskManager() - mongo_client = pymongo.MongoClient(MONGO_URI) - mongo_db = mongo_client[MONGO_DB] + mongo_db = self.mongo_client[MONGO_DB] mongo_inventory = mongo_db.inventory targets_collection = mongo_db.targets @@ -85,45 +86,23 @@ def inventory_setup_poller(self, work): periodic_obj.manage_task(**task_config) periodic_obj.delete_unused_poll_tasks(f"{address}", active_schedules) - periodic_obj.delete_disabled_poll_tasks() + # periodic_obj.delete_disabled_poll_tasks() def generate_poll_task_definition(active_schedules, address, assigned_profiles, period): - run_immediately: bool = False - if period > 300: - run_immediately = True - name = f"sc4snmp;{address};{period};poll" period_profiles = set(assigned_profiles[period]) - active_schedules.append(name) - task_config = { - "name": name, - "task": "splunk_connect_for_snmp.snmp.tasks.poll", - "target": f"{address}", - "args": [], - "kwargs": { - "address": address, - "profiles": period_profiles, - "frequency": period, - }, - "options": { - "link": chain( - signature("splunk_connect_for_snmp.enrich.tasks.enrich"), - chain( - signature("splunk_connect_for_snmp.splunk.tasks.prepare"), - signature("splunk_connect_for_snmp.splunk.tasks.send"), - ), - ), - }, - "interval": {"every": period, "period": "seconds"}, - "enabled": True, - "run_immediately": run_immediately, - } + poll_definition = PollTaskGenerator( + target=address, schedule_period=period, app=app, profiles=list(period_profiles) + ) + task_config = poll_definition.generate_task_definition() + active_schedules.append(task_config.get("name")) return task_config def assign_profiles(ir, profiles, target): assigned_profiles: dict[int, list[str]] = {} - if ir.SmartProfiles: + address = transform_address_to_key(ir.address, ir.port) + if ir.smart_profiles: for profile_name, profile in profiles.items(): if not is_smart_profile_valid(profile_name, profile): @@ -163,13 +142,26 @@ def assign_profiles(ir, profiles, target): for profile_name in ir.profiles: if profile_name in profiles: profile = profiles[profile_name] + if "condition" in profile: + if profile["condition"].get("type") == "walk": + logger.warning( + f"profile {profile_name} is a walk profile, it cannot be used as a static profile" + ) + continue + logger.warning( + f"profile {profile_name} is a smart profile, it does not need to be configured as a static one" + ) if "frequency" not in profile: logger.warning(f"profile {profile_name} does not have frequency") continue if profile["frequency"] not in assigned_profiles: assigned_profiles[profile["frequency"]] = [] assigned_profiles[profile["frequency"]].append(profile_name) - logger.debug(f"Profiles Assigned {assigned_profiles}") + else: + logger.warning( + f"profile {profile_name} was assigned for the host: {address}, no such profile in the config" + ) + logger.debug(f"Profiles Assigned for host {address}: {assigned_profiles}") return assigned_profiles diff --git a/splunk_connect_for_snmp/poller.py b/splunk_connect_for_snmp/poller.py index 94b586895..edb3838b8 100644 --- a/splunk_connect_for_snmp/poller.py +++ b/splunk_connect_for_snmp/poller.py @@ -31,15 +31,17 @@ from celery import Celery, signals from celery.utils.log import get_task_logger from opentelemetry import trace -from opentelemetry.exporter.jaeger.thrift import JaegerExporter + +# from opentelemetry.exporter.jaeger.thrift import JaegerExporter from opentelemetry.instrumentation.celery import CeleryInstrumentor from opentelemetry.instrumentation.logging import LoggingInstrumentor from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export import BatchSpanProcessor + +# from opentelemetry.sdk.trace.export import BatchSpanProcessor provider = TracerProvider() -processor = BatchSpanProcessor(JaegerExporter()) -provider.add_span_processor(processor) +# processor = BatchSpanProcessor(JaegerExporter()) +# provider.add_span_processor(processor) trace.set_tracer_provider(provider) formatter = CustomisedJSONFormatter() diff --git a/splunk_connect_for_snmp/snmp/auth.py b/splunk_connect_for_snmp/snmp/auth.py index 5efcf9966..e7567df65 100644 --- a/splunk_connect_for_snmp/snmp/auth.py +++ b/splunk_connect_for_snmp/snmp/auth.py @@ -117,12 +117,12 @@ def getAuthV3(logger, ir: InventoryRecord, snmpEngine: SnmpEngine) -> UsmUserDat get_secret_value(location, "privKeyType", required=False, default="0") ) if ( - isinstance(ir.securityEngine, str) - and ir.securityEngine != "" - and not ir.securityEngine.isdigit() + isinstance(ir.security_engine, str) + and ir.security_engine != "" + and not ir.security_engine.isdigit() ): - securityEngineId = ir.securityEngine - logger.debug(f"Security eng from profile {ir.securityEngine}") + securityEngineId = ir.security_engine + logger.debug(f"Security eng from profile {ir.security_engine}") else: securityEngineId = get_security_engine_id(logger, ir, snmpEngine) logger.debug(f"Security eng dynamic {securityEngineId}") diff --git a/splunk_connect_for_snmp/snmp/manager.py b/splunk_connect_for_snmp/snmp/manager.py index cc4b0dd0c..69ad9b088 100644 --- a/splunk_connect_for_snmp/snmp/manager.py +++ b/splunk_connect_for_snmp/snmp/manager.py @@ -31,7 +31,7 @@ import os import time from io import StringIO -from typing import Any, Dict, List, Union +from typing import Any, Dict, List, Tuple, Union import pymongo from celery import Task @@ -43,7 +43,7 @@ from splunk_connect_for_snmp.common.hummanbool import human_bool from splunk_connect_for_snmp.common.inventory_record import InventoryRecord -from splunk_connect_for_snmp.common.profiles import load_profiles +from splunk_connect_for_snmp.common.profiles import ProfilesManager from splunk_connect_for_snmp.common.requests import CachedLimiterSession from splunk_connect_for_snmp.snmp.auth import GetAuth from splunk_connect_for_snmp.snmp.context import get_context_data @@ -59,7 +59,7 @@ MONGO_DB = os.getenv("MONGO_DB", "sc4snmp") IGNORE_EMPTY_VARBINDS = human_bool(os.getenv("IGNORE_EMPTY_VARBINDS", False)) CONFIG_PATH = os.getenv("CONFIG_PATH", "/app/config/config.yaml") -PROFILES_RELOAD_DELAY = int(os.getenv("PROFILES_RELOAD_DELAY", "300")) +PROFILES_RELOAD_DELAY = int(os.getenv("PROFILES_RELOAD_DELAY", "60")) UDP_CONNECTION_TIMEOUT = int(os.getenv("UDP_CONNECTION_TIMEOUT", 3)) DEFAULT_STANDARD_MIBS = [ @@ -234,9 +234,11 @@ def __init__(self, **kwargs): allowable_codes=[200], ) - self.profiles = load_profiles() + self.profiles_manager = ProfilesManager(self.mongo_client) + self.profiles = self.profiles_manager.return_all_profiles() self.last_modified = time.time() self.snmpEngine = SnmpEngine() + self.already_loaded_mibs = set() self.builder = self.snmpEngine.getMibBuilder() self.mib_view_controller = view.MibViewController(self.builder) compiler.addMibCompiler(self.builder, sources=[MIB_SOURCES]) @@ -280,8 +282,8 @@ def do_work( retry = False address = transform_address_to_key(ir.address, ir.port) - if time.time() - self.last_modified > PROFILES_RELOAD_DELAY: - self.profiles = load_profiles() + if time.time() - self.last_modified > PROFILES_RELOAD_DELAY or walk: + self.profiles = self.profiles_manager.return_all_profiles() self.last_modified = time.time() logger.debug("Profiles reloaded") @@ -359,10 +361,10 @@ def load_mibs(self, mibs: List[str]) -> None: if mib: try: self.builder.loadModules(mib) - except error.MibLoadError as e: - logger.exception(f"Error loading mib for {mib}, {e}") + except Exception as e: + logger.warning(f"Error loading mib for {mib}, {e}") - def is_mib_known(self, id: str, oid: str, target: str) -> tuple[bool, str]: + def is_mib_known(self, id: str, oid: str, target: str) -> Tuple[bool, str]: oid_list = tuple(oid.split(".")) @@ -393,6 +395,10 @@ def get_var_binds(self, address, walk=False, profiles=[]): # First pass we only look at profiles for a full mib walk for profile in profiles: + # In case scheduler processes doesn't yet updated profiles information + if profile not in self.profiles: + self.profiles = self.profiles_manager.return_all_profiles() + self.last_modified = time.time() # Its possible a profile is removed on upgrade but schedule doesn't yet know if profile in self.profiles and "varBinds" in self.profiles[profile]: profile_spec = self.profiles[profile] @@ -405,6 +411,11 @@ def get_var_binds(self, address, walk=False, profiles=[]): bulk_mapping[f"{vb[0]}"] = profile if vb[0] not in needed_mibs: needed_mibs.append(vb[0]) + else: + logger.warning( + f"There is either profile: {profile} missing from the configuration, or varBinds section not" + f"present inside the profile" + ) for profile in profiles: # Its possible a profile is removed on upgrade but schedule doesn't yet know diff --git a/splunk_connect_for_snmp/snmp/tasks.py b/splunk_connect_for_snmp/snmp/tasks.py index fcdf5db4d..190466de7 100644 --- a/splunk_connect_for_snmp/snmp/tasks.py +++ b/splunk_connect_for_snmp/snmp/tasks.py @@ -43,6 +43,7 @@ MONGO_URI = os.getenv("MONGO_URI") MONGO_DB = os.getenv("MONGO_DB", "sc4snmp") CONFIG_PATH = os.getenv("CONFIG_PATH", "/app/config/config.yaml") +WALK_RETRY_MAX_INTERVAL = int(os.getenv("WALK_RETRY_MAX_INTERVAL", "600")) OID_VALIDATOR = re.compile(r"^([0-2])((\.0)|(\.[1-9][0-9]*))*$") @@ -50,8 +51,7 @@ bind=True, base=Poller, retry_backoff=30, - retry_jitter=True, - retry_backoff_max=3600, + retry_backoff_max=WALK_RETRY_MAX_INTERVAL, max_retries=50, autoretry_for=( MongoLockLocked, @@ -63,7 +63,6 @@ ), ) def walk(self, **kwargs): - address = kwargs["address"] profile = kwargs.get("profile", []) if profile: @@ -129,7 +128,6 @@ def trap(self, work): var_bind_table = [] not_translated_oids = [] remaining_oids = [] - oid_values = set() remotemibs = set() metrics = {} for w in work["data"]: @@ -137,9 +135,9 @@ def trap(self, work): if OID_VALIDATOR.match(w[1]): with suppress(Exception): found, mib = self.is_mib_known(w[1], w[1], work["host"]) - if found and mib not in oid_values: + if found and mib not in self.already_loaded_mibs: self.load_mibs([mib]) - oid_values.add(mib) + self.already_loaded_mibs.add(mib) try: var_bind_table.append( @@ -152,12 +150,13 @@ def trap(self, work): for oid in not_translated_oids: found, mib = self.is_mib_known(oid[0], oid[0], work["host"]) - if found: + if found and mib not in self.already_loaded_mibs: remotemibs.add(mib) remaining_oids.append((oid[0], oid[1])) if remotemibs: self.load_mibs(remotemibs) + self.already_loaded_mibs.update(remotemibs) for w in remaining_oids: try: var_bind_table.append( diff --git a/splunk_connect_for_snmp/traps.py b/splunk_connect_for_snmp/traps.py index 1257fc5bc..06285d9ca 100644 --- a/splunk_connect_for_snmp/traps.py +++ b/splunk_connect_for_snmp/traps.py @@ -35,11 +35,13 @@ import yaml from celery import Celery, chain, signals from opentelemetry import trace -from opentelemetry.exporter.jaeger.thrift import JaegerExporter + +# from opentelemetry.exporter.jaeger.thrift import JaegerExporter from opentelemetry.instrumentation.celery import CeleryInstrumentor from opentelemetry.instrumentation.logging import LoggingInstrumentor from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export import BatchSpanProcessor + +# from opentelemetry.sdk.trace.export import BatchSpanProcessor from pysnmp.carrier.asyncio.dgram import udp from pysnmp.entity import config, engine from pysnmp.entity.rfc3413 import ntfrcv @@ -49,8 +51,6 @@ from splunk_connect_for_snmp.splunk.tasks import prepare, send provider = TracerProvider() -processor = BatchSpanProcessor(JaegerExporter()) -provider.add_span_processor(processor) trace.set_tracer_provider(provider) CONFIG_PATH = os.getenv("CONFIG_PATH", "/app/config/config.yaml") @@ -114,7 +114,9 @@ def cbFun(snmpEngine, stateReference, contextEngineId, contextName, varBinds, cb work = {"data": data, "host": device_ip} my_chain = chain( - trap_task_signature(work), prepare_task_signature(), send_task_signature() + trap_task_signature(work).set(queue="traps").set(priority=5), + prepare_task_signature().set(queue="send").set(priority=1), + send_task_signature().set(queue="send").set(priority=0), ) _ = my_chain.apply_async() diff --git a/test/common/test_inventory_record.py b/test/common/test_inventory_record.py index be44d8527..c95af59d0 100644 --- a/test/common/test_inventory_record.py +++ b/test/common/test_inventory_record.py @@ -38,7 +38,7 @@ def test_port_too_high(self): "port": 65537, "version": "2c", "walk_interval": 1850, - "SmartProfiles": True, + "smart_profiles": True, "delete": "", } @@ -46,7 +46,195 @@ def test_port_too_high(self): InventoryRecord(**ir_dict) self.assertEqual("Port out of range 65537", e.exception.args[0][0].exc.args[0]) + def test_port_not_specified(self): + ir_dict = { + "address": "192.168.0.1", + "port": "", + "version": None, + "community": "public", + "secret": "secret", + "security_engine": "ENGINE", + "walk_interval": 1850, + "profiles": "", + "smart_profiles": True, + "delete": False, + } + ir = InventoryRecord(**ir_dict) + self.assertEqual(161, ir.port) + def test_version_none(self): + ir_dict = { + "address": "192.168.0.1", + "port": "34", + "version": None, + "community": "public", + "secret": "secret", + "security_engine": "ENGINE", + "walk_interval": 1850, + "profiles": "", + "smart_profiles": True, + "delete": False, + } + + ir = InventoryRecord(**ir_dict) + + self.assertEqual("2c", ir.version) + + def test_version_out_of_range(self): + ir_dict = { + "address": "192.168.0.1", + "port": "34", + "version": "5a", + "community": "public", + "secret": "secret", + "security_engine": "ENGINE", + "walk_interval": 1850, + "profiles": "", + "smart_profiles": True, + "delete": False, + } + + with self.assertRaises(ValueError) as e: + InventoryRecord(**ir_dict) + self.assertEqual( + "version out of range 5a accepted is 1 or 2c or 3", + e.exception.args[0][0].exc.args[0], + ) + + def test_empty_community(self): + ir_dict = { + "address": "192.168.0.1", + "port": "34", + "version": "3", + "community": "", + "secret": "secret", + "security_engine": "ENGINE", + "walk_interval": 1850, + "profiles": "", + "smart_profiles": True, + "delete": False, + } + + ir = InventoryRecord(**ir_dict) + self.assertIsNone(ir.community) + + def test_empty_walk_interval(self): + ir_dict = { + "address": "192.168.0.1", + "port": "34", + "version": "3", + "community": "public", + "secret": "secret", + "security_engine": "ENGINE", + "walk_interval": None, + "profiles": "", + "smart_profiles": True, + "delete": False, + } + + ir = InventoryRecord(**ir_dict) + self.assertEqual(42000, ir.walk_interval) + + def test_too_low_walk_interval(self): + ir_dict = { + "address": "192.168.0.1", + "port": "34", + "version": "3", + "community": "public", + "secret": "secret", + "security_engine": "ENGINE", + "walk_interval": 20, + "profiles": "", + "smart_profiles": True, + "delete": False, + } + + ir = InventoryRecord(**ir_dict) + self.assertEqual(1800, ir.walk_interval) + + def test_too_high_walk_interval(self): + ir_dict = { + "address": "192.168.0.1", + "port": "34", + "version": "3", + "community": "public", + "secret": "secret", + "security_engine": "ENGINE", + "walk_interval": 50000, + "profiles": "", + "smart_profiles": True, + "delete": False, + } + + ir = InventoryRecord(**ir_dict) + self.assertEqual(42000, ir.walk_interval) + + def test_profiles_not_string(self): + ir_dict = { + "address": "192.168.0.1", + "port": "34", + "version": "3", + "community": "public", + "secret": "secret", + "security_engine": "ENGINE", + "walk_interval": 1850, + "profiles": [], + "smart_profiles": True, + "delete": False, + } + + ir = InventoryRecord(**ir_dict) + self.assertEqual([], ir.profiles) + + def test_smart_profiles_empty(self): + ir_dict = { + "address": "192.168.0.1", + "port": "34", + "version": "3", + "community": "public", + "secret": "secret", + "security_engine": "ENGINE", + "walk_interval": 1850, + "profiles": "", + "smart_profiles": True, + "delete": False, + } + + ir = InventoryRecord(**ir_dict) + self.assertTrue(ir.smart_profiles) + + def test_delete_empty(self): + ir_dict = { + "address": "192.168.0.1", + "port": "34", + "version": "3", + "community": "public", + "secret": "secret", + "security_engine": "ENGINE", + "walk_interval": 1850, + "profiles": "", + "smart_profiles": True, + "delete": "", + } + + ir = InventoryRecord(**ir_dict) + self.assertFalse(ir.delete) + + def test_port_too_high_camel_case(self): + ir_dict = { + "address": "192.168.0.1", + "port": 65537, + "version": "2c", + "walk_interval": 1850, + "SmartProfiles": True, + "delete": "", + } + + with self.assertRaises(ValueError) as e: + InventoryRecord(**ir_dict) + self.assertEqual("Port out of range 65537", e.exception.args[0][0].exc.args[0]) + + def test_version_none_camel_case(self): ir_dict = { "address": "192.168.0.1", "port": "34", @@ -64,7 +252,7 @@ def test_version_none(self): self.assertEqual("2c", ir.version) - def test_version_out_of_range(self): + def test_version_out_of_range_camel_case(self): ir_dict = { "address": "192.168.0.1", "port": "34", @@ -85,7 +273,7 @@ def test_version_out_of_range(self): e.exception.args[0][0].exc.args[0], ) - def test_empty_community(self): + def test_empty_community_camel_case(self): ir_dict = { "address": "192.168.0.1", "port": "34", @@ -102,7 +290,7 @@ def test_empty_community(self): ir = InventoryRecord(**ir_dict) self.assertIsNone(ir.community) - def test_empty_walk_interval(self): + def test_empty_walk_interval_camel_case(self): ir_dict = { "address": "192.168.0.1", "port": "34", @@ -119,7 +307,7 @@ def test_empty_walk_interval(self): ir = InventoryRecord(**ir_dict) self.assertEqual(42000, ir.walk_interval) - def test_too_low_walk_interval(self): + def test_too_low_walk_interval_camel_case(self): ir_dict = { "address": "192.168.0.1", "port": "34", @@ -136,7 +324,7 @@ def test_too_low_walk_interval(self): ir = InventoryRecord(**ir_dict) self.assertEqual(1800, ir.walk_interval) - def test_too_high_walk_interval(self): + def test_too_high_walk_interval_camel_case(self): ir_dict = { "address": "192.168.0.1", "port": "34", @@ -153,7 +341,7 @@ def test_too_high_walk_interval(self): ir = InventoryRecord(**ir_dict) self.assertEqual(42000, ir.walk_interval) - def test_profiles_not_string(self): + def test_profiles_not_string_camel_case(self): ir_dict = { "address": "192.168.0.1", "port": "34", @@ -170,7 +358,7 @@ def test_profiles_not_string(self): ir = InventoryRecord(**ir_dict) self.assertEqual([], ir.profiles) - def test_smart_profiles_empty(self): + def test_smart_profiles_empty_camel_case(self): ir_dict = { "address": "192.168.0.1", "port": "34", @@ -185,9 +373,9 @@ def test_smart_profiles_empty(self): } ir = InventoryRecord(**ir_dict) - self.assertTrue(ir.SmartProfiles) + self.assertTrue(ir.smart_profiles) - def test_delete_empty(self): + def test_delete_empty_camel_case(self): ir_dict = { "address": "192.168.0.1", "port": "34", @@ -204,25 +392,41 @@ def test_delete_empty(self): ir = InventoryRecord(**ir_dict) self.assertFalse(ir.delete) - def test_from_json(self): - ir = InventoryRecord.from_json( - '{"address": "192.168.0.1", "port": "34", "version": "3", "community": ' - '"public", "secret": "secret", "securityEngine": "ENGINE", "walk_interval": ' - '1850, "profiles": "", "SmartProfiles": true, "delete": ""}' - ) + def test_secret_not_specified(self): + ir_dict = { + "address": "192.168.0.1", + "port": "34", + "version": "3", + "community": "public", + "secret": "", + "securityEngine": "ENGINE", + "walk_interval": 1850, + "profiles": "", + "SmartProfiles": True, + "delete": "", + } - self.assertEqual(ir.address, "192.168.0.1") - self.assertEqual(ir.port, 34) - self.assertEqual(ir.version, "3") - self.assertEqual(ir.community, "public") - self.assertEqual(ir.secret, "secret") - self.assertEqual(ir.securityEngine, "ENGINE") - self.assertEqual(ir.walk_interval, 1850) - self.assertEqual(ir.profiles, []) - self.assertEqual(ir.SmartProfiles, True) - self.assertEqual(ir.delete, False) + ir = InventoryRecord(**ir_dict) + self.assertIsNone(ir.secret) - def test_to_json(self): + def test_security_engine_not_specified(self): + ir_dict = { + "address": "192.168.0.1", + "port": "34", + "version": "3", + "community": "public", + "secret": "", + "securityEngine": "", + "walk_interval": 1850, + "profiles": "", + "SmartProfiles": True, + "delete": "", + } + + ir = InventoryRecord(**ir_dict) + self.assertIsNone(ir.security_engine) + + def test_profiles(self): ir_dict = { "address": "192.168.0.1", "port": "34", @@ -231,16 +435,56 @@ def test_to_json(self): "secret": "secret", "securityEngine": "ENGINE", "walk_interval": 1850, - "profiles": "", + "profiles": "generic_switch;new_profiles", "SmartProfiles": True, "delete": "", } ir = InventoryRecord(**ir_dict) + self.assertEqual(["generic_switch", "new_profiles"], ir.profiles) - self.assertEqual( - '{"address": "192.168.0.1", "port": 34, "version": "3", "community": ' - '"public", "secret": "secret", "securityEngine": "ENGINE", "walk_interval": ' - '1850, "profiles": [], "SmartProfiles": true, "delete": false}', - ir.to_json(), - ) + def test_smart_profiles_not_specified(self): + ir_dict = { + "address": "192.168.0.1", + "port": "34", + "version": "3", + "community": "public", + "secret": "secret", + "securityEngine": "ENGINE", + "walk_interval": 1850, + "profiles": "generic_switch;new_profiles", + "SmartProfiles": "", + "delete": "", + } + + ir = InventoryRecord(**ir_dict) + self.assertTrue(ir.smart_profiles) + + def test_asdict_method(self): + ir_dict = { + "address": "192.168.0.1", + "port": "34", + "version": "3", + "community": "public", + "secret": "secret", + "securityEngine": "ENGINE", + "walk_interval": 1850, + "profiles": "generic_switch;new_profiles", + "SmartProfiles": "", + "delete": "", + } + expeced_dict = { + "address": "192.168.0.1", + "port": 34, + "version": "3", + "community": "public", + "secret": "secret", + "security_engine": "ENGINE", + "walk_interval": 1850, + "profiles": ["generic_switch", "new_profiles"], + "smart_profiles": True, + "delete": False, + } + + ir = InventoryRecord(**ir_dict) + self.assertEqual(expeced_dict, ir.asdict()) diff --git a/test/common/test_profiles.py b/test/common/test_profiles.py index 06875b60c..f6d9844c0 100644 --- a/test/common/test_profiles.py +++ b/test/common/test_profiles.py @@ -1,7 +1,8 @@ import os from unittest import TestCase, mock +from unittest.mock import Mock -from splunk_connect_for_snmp.common.profiles import load_profiles +from splunk_connect_for_snmp.common.profiles import ProfilesManager def return_mocked_path(file_name): @@ -47,7 +48,9 @@ class TestProfiles(TestCase): return_config_without_profiles(), ) def test_base_files_not_found(self): - self.assertRaises(FileNotFoundError, load_profiles) + profiles_manager = ProfilesManager(Mock()) + with self.assertRaises(FileNotFoundError): + profiles_manager.gather_profiles() @mock.patch( "splunk_connect_for_snmp.common.profiles.os.listdir", return_yaml_profiles @@ -60,7 +63,8 @@ def test_config_file_not_found(self): with self.assertLogs( "splunk_connect_for_snmp.common.profiles", level="INFO" ) as cm: - load_profiles() + profiles_manager = ProfilesManager(Mock()) + profiles_manager.gather_profiles() self.assertTrue( any( [ @@ -100,7 +104,9 @@ def test_read_base_profiles(self): ], }, } - self.assertEqual(load_profiles(), active_profiles) + profiles_manager = ProfilesManager(Mock()) + profiles = profiles_manager.gather_profiles() + self.assertEqual(profiles, active_profiles) @mock.patch( "splunk_connect_for_snmp.common.profiles.os.listdir", return_yaml_empty_profiles @@ -128,7 +134,9 @@ def test_runtime_profiles(self): ], }, } - self.assertEqual(load_profiles(), active_profiles) + profiles_manager = ProfilesManager(Mock()) + profiles = profiles_manager.gather_profiles() + self.assertEqual(profiles, active_profiles) @mock.patch( "splunk_connect_for_snmp.common.profiles.os.listdir", return_yaml_profiles @@ -175,7 +183,9 @@ def test_all_profiles(self): ], }, } - self.assertEqual(load_profiles(), active_profiles) + profiles_manager = ProfilesManager(Mock()) + profiles = profiles_manager.gather_profiles() + self.assertEqual(profiles, active_profiles) @mock.patch( "splunk_connect_for_snmp.common.profiles.os.listdir", return_yaml_profiles @@ -195,4 +205,6 @@ def test_disabled_profiles(self): ], } } - self.assertEqual(load_profiles(), active_profiles) + profiles_manager = ProfilesManager(Mock()) + profiles = profiles_manager.gather_profiles() + self.assertEqual(profiles, active_profiles) diff --git a/test/common/test_schema_migration.py b/test/common/test_schema_migration.py index 1136295cf..88681ac32 100644 --- a/test/common/test_schema_migration.py +++ b/test/common/test_schema_migration.py @@ -1,12 +1,21 @@ from unittest import TestCase -from unittest.mock import MagicMock, Mock, patch +from unittest.mock import ANY, MagicMock, Mock, patch + +from celery import chain, group, signature +from celery.schedules import schedule +from pymongo import ASCENDING from splunk_connect_for_snmp.common.schema_migration import ( fetch_schema_version, migrate_database, migrate_to_version_1, + migrate_to_version_2, + migrate_to_version_3, + migrate_to_version_4, save_schema_version, + transform_mongodb_periodic_to_redbeat, ) +from splunk_connect_for_snmp.common.task_generator import WalkTaskGenerator class TestSchemaMigration(TestCase): @@ -34,6 +43,7 @@ def test_save_schema_version(self): @patch("splunk_connect_for_snmp.common.schema_migration.save_schema_version") @patch("splunk_connect_for_snmp.common.schema_migration.migrate_to_version_2") @patch("splunk_connect_for_snmp.common.schema_migration.migrate_to_version_1") + @patch("splunk_connect_for_snmp.common.schema_migration.CURRENT_SCHEMA_VERSION", 3) def test_migrate_database(self, m_version_1, m_version_2, m_save, m_fetch): mc = MagicMock() m_fetch.return_value = 0 @@ -61,3 +71,188 @@ def test_migrate_to_version_1(self): periodic_obj_mock.delete_all_poll_tasks.assert_called() periodic_obj_mock.rerun_all_walks.assert_called() + + def test_migrate_to_version_2(self): + periodic_obj_mock = Mock() + mc = MagicMock() + migrate_to_version_2(mc, periodic_obj_mock) + + periodic_obj_mock.delete_all_poll_tasks.assert_called() + periodic_obj_mock.rerun_all_walks.assert_called() + mc.sc4snmp.attributes.drop.assert_called() + + def test_migrate_to_version_3(self): + periodic_obj_mock = Mock() + mc = MagicMock() + migrate_to_version_3(mc, periodic_obj_mock) + + mc.sc4snmp.attributes.create_index.assert_called_with( + [("address", ASCENDING), ("group_key_hash", ASCENDING)] + ) + + @patch( + "splunk_connect_for_snmp.common.schema_migration.transform_mongodb_periodic_to_redbeat" + ) + def test_migrate_to_version_4(self, transform_mongodb_periodic_to_redbeat): + periodic_obj_mock = Mock() + mc = MagicMock() + migrate_to_version_4(mc, periodic_obj_mock) + mc.sc4snmp.schedules.drop.assert_called() + + def test_transform_mongodb_periodic_to_redbeat(self): + old_schedules = [ + { + "name": f"sc4snmp;127.0.0.1;walk", + "task": "splunk_connect_for_snmp.snmp.tasks.walk", + "target": "127.0.0.1", + "args": [], + "kwargs": { + "address": "127.0.0.1", + "profile": "walk1", + }, + "options": { + "link": chain( + signature("splunk_connect_for_snmp.enrich.tasks.enrich"), + group( + signature( + "splunk_connect_for_snmp.inventory.tasks.inventory_setup_poller" + ), + chain( + signature( + "splunk_connect_for_snmp.splunk.tasks.prepare" + ), + signature("splunk_connect_for_snmp.splunk.tasks.send"), + ), + ), + ), + }, + "interval": {"every": 30, "period": "seconds"}, + "enabled": True, + "total_run_count": 0, + "run_immediately": True, + } + ] + new_schedule = { + "name": f"sc4snmp;127.0.0.1;walk", + "task": "splunk_connect_for_snmp.snmp.tasks.walk", + "target": "127.0.0.1", + "args": [], + "kwargs": { + "address": "127.0.0.1", + "profile": "walk1", + }, + "options": WalkTaskGenerator.WALK_CHAIN_OF_TASKS, + "schedule": schedule(30), + "enabled": True, + "run_immediately": True, + "app": ANY, + } + mc = MagicMock() + mc.find.return_value = old_schedules + periodic_obj_mock = Mock() + transform_mongodb_periodic_to_redbeat(mc, periodic_obj_mock) + periodic_obj_mock.manage_task.assert_called_with(**new_schedule) + + def test_transform_mongodb_periodic_to_redbeat_more_than_one_walk(self): + old_schedules = [ + { + "name": f"sc4snmp;127.0.0.1;walk", + "task": "splunk_connect_for_snmp.snmp.tasks.walk", + "target": "127.0.0.1", + "args": [], + "kwargs": { + "address": "127.0.0.1", + "profile": "walk1", + }, + "options": { + "link": chain( + signature("splunk_connect_for_snmp.enrich.tasks.enrich"), + group( + signature( + "splunk_connect_for_snmp.inventory.tasks.inventory_setup_poller" + ), + chain( + signature( + "splunk_connect_for_snmp.splunk.tasks.prepare" + ), + signature("splunk_connect_for_snmp.splunk.tasks.send"), + ), + ), + ), + }, + "interval": {"every": 30, "period": "seconds"}, + "enabled": True, + "total_run_count": 0, + "run_immediately": True, + }, + { + "name": f"sc4snmp;127.0.0.2;walk", + "task": "splunk_connect_for_snmp.snmp.tasks.walk", + "target": "127.0.0.2", + "args": [], + "kwargs": { + "address": "127.0.0.2", + "profile": None, + }, + "options": { + "link": chain( + signature("splunk_connect_for_snmp.enrich.tasks.enrich"), + group( + signature( + "splunk_connect_for_snmp.inventory.tasks.inventory_setup_poller" + ), + chain( + signature( + "splunk_connect_for_snmp.splunk.tasks.prepare" + ), + signature("splunk_connect_for_snmp.splunk.tasks.send"), + ), + ), + ), + }, + "interval": {"every": 400, "period": "seconds"}, + "enabled": True, + "total_run_count": 0, + "run_immediately": True, + }, + ] + new_schedule = [ + { + "name": f"sc4snmp;127.0.0.1;walk", + "task": "splunk_connect_for_snmp.snmp.tasks.walk", + "target": "127.0.0.1", + "args": [], + "kwargs": { + "address": "127.0.0.1", + "profile": "walk1", + }, + "options": WalkTaskGenerator.WALK_CHAIN_OF_TASKS, + "schedule": schedule(30), + "enabled": True, + "run_immediately": True, + "app": ANY, + }, + { + "name": f"sc4snmp;127.0.0.2;walk", + "task": "splunk_connect_for_snmp.snmp.tasks.walk", + "target": "127.0.0.2", + "args": [], + "kwargs": { + "address": "127.0.0.2", + "profile": None, + }, + "options": WalkTaskGenerator.WALK_CHAIN_OF_TASKS, + "schedule": schedule(400), + "enabled": True, + "run_immediately": True, + "app": ANY, + }, + ] + mc = MagicMock() + mc.find.return_value = old_schedules + periodic_obj_mock = Mock() + transform_mongodb_periodic_to_redbeat(mc, periodic_obj_mock) + calls = periodic_obj_mock.manage_task.call_args_list + self.assertEqual(len(calls), 2) + self.assertEqual(calls[0].kwargs, new_schedule[0]) + self.assertEqual(calls[1].kwargs, new_schedule[1]) diff --git a/test/common/test_task_generator.py b/test/common/test_task_generator.py new file mode 100644 index 000000000..ab058b635 --- /dev/null +++ b/test/common/test_task_generator.py @@ -0,0 +1,111 @@ +from unittest import TestCase +from unittest.mock import Mock + +from celery.schedules import schedule + +from splunk_connect_for_snmp.common.task_generator import ( + PollTaskGenerator, + WalkTaskGenerator, +) + + +class TestTaskGenerator(TestCase): + def test_walk_generator(self): + app = Mock() + walk_task_generator = WalkTaskGenerator( + target="127.0.0.1", schedule_period=10, app=app, profile=None + ) + generated_task = walk_task_generator.generate_task_definition() + expected_task = { + "app": app, + "args": [], + "enabled": True, + "kwargs": {"address": "127.0.0.1", "profile": None}, + "name": "sc4snmp;127.0.0.1;walk", + "options": WalkTaskGenerator.WALK_CHAIN_OF_TASKS, + "run_immediately": True, + "schedule": schedule(10), + "target": "127.0.0.1", + "task": "splunk_connect_for_snmp.snmp.tasks.walk", + } + self.assertEqual(generated_task, expected_task) + + def test_walk_generator_with_profile(self): + app = Mock() + walk_task_generator = WalkTaskGenerator( + target="127.0.0.1", schedule_period=10, app=app, profile="walk1" + ) + generated_task = walk_task_generator.generate_task_definition() + expected_task = { + "app": app, + "args": [], + "enabled": True, + "kwargs": {"address": "127.0.0.1", "profile": "walk1"}, + "name": "sc4snmp;127.0.0.1;walk", + "options": WalkTaskGenerator.WALK_CHAIN_OF_TASKS, + "run_immediately": True, + "schedule": schedule(10), + "target": "127.0.0.1", + "task": "splunk_connect_for_snmp.snmp.tasks.walk", + } + self.assertEqual(generated_task, expected_task) + + def test_poll_generator(self): + app = Mock() + poll_task_generator = PollTaskGenerator( + target="127.0.0.1", + schedule_period=10, + app=app, + profiles=["BaseProfile", "IFProfile"], + ) + generated_task = poll_task_generator.generate_task_definition() + expected_task = { + "app": app, + "args": [], + "enabled": True, + "kwargs": { + "address": "127.0.0.1", + "frequency": 10, + "priority": 2, + "profiles": ["BaseProfile", "IFProfile"], + }, + "name": "sc4snmp;127.0.0.1;10;poll", + "options": PollTaskGenerator.POLL_CHAIN_OF_TASKS, + "run_immediately": False, + "schedule": schedule(10), + "target": "127.0.0.1", + "task": "splunk_connect_for_snmp.snmp.tasks.poll", + } + self.assertEqual(generated_task, expected_task) + + def test_poll_generator_with_run_immediately(self): + app = Mock() + poll_task_generator = PollTaskGenerator( + target="127.0.0.1", + schedule_period=500, + app=app, + profiles=["BaseProfile", "IFProfile"], + ) + generated_task = poll_task_generator.generate_task_definition() + expected_task = { + "app": app, + "args": [], + "enabled": True, + "kwargs": { + "address": "127.0.0.1", + "frequency": 500, + "priority": 2, + "profiles": ["BaseProfile", "IFProfile"], + }, + "name": "sc4snmp;127.0.0.1;500;poll", + "options": PollTaskGenerator.POLL_CHAIN_OF_TASKS, + "run_immediately": True, + "schedule": schedule(500), + "target": "127.0.0.1", + "task": "splunk_connect_for_snmp.snmp.tasks.poll", + } + self.assertEqual(generated_task, expected_task) + + def test_global_variables(self): + self.assertTrue(WalkTaskGenerator.WALK_CHAIN_OF_TASKS) + self.assertTrue(PollTaskGenerator.POLL_CHAIN_OF_TASKS) diff --git a/test/inventory/test_assign_profiles.py b/test/inventory/test_assign_profiles.py index 52412315f..959a7e2ef 100644 --- a/test/inventory/test_assign_profiles.py +++ b/test/inventory/test_assign_profiles.py @@ -1,10 +1,9 @@ -from unittest import TestCase +from unittest import TestCase, mock from splunk_connect_for_snmp.common.inventory_record import InventoryRecord -from splunk_connect_for_snmp.inventory.tasks import assign_profiles -ir_smart = InventoryRecord.from_dict( - { +ir_smart = InventoryRecord( + **{ "address": "192.168.0.1", "port": "34", "version": "2c", @@ -30,16 +29,21 @@ } +@mock.patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.return_all_profiles" +) class TestProfilesAssignment(TestCase): - def test_assignment_of_static_profiles(self): + def test_assignment_of_static_profiles(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import assign_profiles + profiles = { "profile1": {"frequency": 20}, "profile2": {"frequency": 30}, "profile3": {}, } - ir = InventoryRecord.from_dict( - { + ir = InventoryRecord( + **{ "address": "192.168.0.1", "port": "34", "version": "2c", @@ -56,7 +60,9 @@ def test_assignment_of_static_profiles(self): result = assign_profiles(ir, profiles, {}) self.assertEqual({20: ["profile1"], 30: ["profile2"]}, result) - def test_assignment_of_base_profiles(self): + def test_assignment_of_base_profiles(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import assign_profiles + profiles = { "BaseUpTime": {"frequency": 60, "condition": {"type": "base"}}, "profile2": {"frequency": 30, "condition": {"type": "base"}}, @@ -65,7 +71,9 @@ def test_assignment_of_base_profiles(self): result = assign_profiles(ir_smart, profiles, {}) self.assertEqual({60: ["BaseUpTime"], 30: ["profile2"]}, result) - def test_assignment_of_field_profiles(self): + def test_assignment_of_field_profiles(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import assign_profiles + profiles = { "BaseUpTime": { "frequency": 60, @@ -104,23 +112,33 @@ def test_assignment_of_field_profiles(self): result = assign_profiles(ir_smart, profiles, target) self.assertEqual({60: ["BaseUpTime", "MyProfile", "OtherProfile"]}, result) - def test_assignment_of_field_profiles_missing_state(self): + def test_assignment_of_field_profiles_missing_state(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import assign_profiles + result = assign_profiles(ir_smart, simple_profiles, {}) self.assertEqual({}, result) - def test_assignment_of_field_profiles_db_missing_field_value(self): + def test_assignment_of_field_profiles_db_missing_field_value( + self, return_all_profiles + ): + from splunk_connect_for_snmp.inventory.tasks import assign_profiles + target = {"state": {"SNMPv2-MIB|sysDescr": {}}} result = assign_profiles(ir_smart, simple_profiles, target) self.assertEqual({}, result) - def test_assignment_of_field_not_matching_regex(self): + def test_assignment_of_field_not_matching_regex(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import assign_profiles + target = {"state": {"SNMPv2-MIB|sysDescr": {"value": "WRONG"}}} result = assign_profiles(ir_smart, simple_profiles, target) self.assertEqual({}, result) - def test_assignment_of_static_and_smart_profiles(self): + def test_assignment_of_static_and_smart_profiles(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import assign_profiles + profiles = { "profile1": {"frequency": 20}, "profile2": {"frequency": 30}, @@ -128,8 +146,8 @@ def test_assignment_of_static_and_smart_profiles(self): "profile5": {"frequency": 30, "condition": {"type": "base"}}, } - ir = InventoryRecord.from_dict( - { + ir = InventoryRecord( + **{ "address": "192.168.0.1", "port": "34", "version": "2c", @@ -147,3 +165,87 @@ def test_assignment_of_static_and_smart_profiles(self): self.assertEqual( {60: ["BaseUpTime"], 30: ["profile5", "profile2"], 20: ["profile1"]}, result ) + + def test_assignment_of_walk_profile_as_a_static_profile(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import assign_profiles + + profiles = { + "profile1": {"frequency": 20}, + "profile2": {"frequency": 30}, + "walk": {"frequency": 60, "condition": {"type": "walk"}}, + "profile5": {"frequency": 30, "condition": {"type": "base"}}, + } + + ir = InventoryRecord( + **{ + "address": "192.168.0.1", + "port": "34", + "version": "2c", + "community": "public", + "secret": "secret", + "securityEngine": "ENGINE", + "walk_interval": 1850, + "profiles": "profile1;profile2;walk", + "SmartProfiles": True, + "delete": False, + } + ) + + result = assign_profiles(ir, profiles, {}) + self.assertEqual({30: ["profile5", "profile2"], 20: ["profile1"]}, result) + + def test_assignment_of_walk_profile_as_a_static_profile_without_frequency( + self, return_all_profiles + ): + from splunk_connect_for_snmp.inventory.tasks import assign_profiles + + profiles = { + "profile1": {"frequency": 20}, + "profile2": {"frequency": 30}, + "walk": {"condition": {"type": "walk"}}, + "profile5": {"frequency": 30, "condition": {"type": "base"}}, + } + + ir = InventoryRecord( + **{ + "address": "192.168.0.1", + "port": "34", + "version": "2c", + "community": "public", + "secret": "secret", + "securityEngine": "ENGINE", + "walk_interval": 1850, + "profiles": "profile1;profile2;walk", + "SmartProfiles": True, + "delete": False, + } + ) + + result = assign_profiles(ir, profiles, {}) + self.assertEqual({30: ["profile5", "profile2"], 20: ["profile1"]}, result) + + def test_smart_profiles_as_static_ones(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import assign_profiles + + profiles = { + "profile1": {"frequency": 20}, + "profile5": {"frequency": 30, "condition": {"type": "base"}}, + } + + ir = InventoryRecord( + **{ + "address": "192.168.0.1", + "port": "34", + "version": "2c", + "community": "public", + "secret": "secret", + "securityEngine": "ENGINE", + "walk_interval": 1850, + "profiles": "profile1;profile5", + "SmartProfiles": False, + "delete": False, + } + ) + + result = assign_profiles(ir, profiles, {}) + self.assertEqual({30: ["profile5"], 20: ["profile1"]}, result) diff --git a/test/inventory/test_inventory_setup_poller.py b/test/inventory/test_inventory_setup_poller.py index 673072eb8..e69a62788 100644 --- a/test/inventory/test_inventory_setup_poller.py +++ b/test/inventory/test_inventory_setup_poller.py @@ -1,15 +1,15 @@ from unittest import TestCase, mock from unittest.mock import Mock, patch +from celery.schedules import schedule + from splunk_connect_for_snmp.common.inventory_record import InventoryRecord -from splunk_connect_for_snmp.inventory.tasks import ( - generate_poll_task_definition, - inventory_setup_poller, -) class TestInventorySetupPoller(TestCase): - @mock.patch("splunk_connect_for_snmp.common.profiles.load_profiles") + @patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.return_all_profiles" + ) @patch("splunk_connect_for_snmp.customtaskmanager.CustomPeriodicTaskManager") @mock.patch("pymongo.collection.Collection.find_one") @mock.patch("splunk_connect_for_snmp.inventory.tasks.assign_profiles") @@ -22,11 +22,13 @@ def test_inventory_setup_poller( m_task_manager, m_load_profiles, ): + from splunk_connect_for_snmp.inventory.tasks import inventory_setup_poller + periodic_obj_mock = Mock() + m_load_profiles.return_value = [] m_task_manager.return_value = periodic_obj_mock - - m_get_inventory.return_value = InventoryRecord.from_dict( - { + m_get_inventory.return_value = InventoryRecord( + **{ "address": "192.168.0.1", "port": "34", "version": "2c", @@ -59,24 +61,36 @@ def test_inventory_setup_poller( # when inventory_setup_poller(work) - m_load_profiles.assert_not_called() - calls = periodic_obj_mock.manage_task.call_args_list + calls[0][1]["kwargs"]["profiles"] = set(calls[0][1]["kwargs"]["profiles"]) + calls[1][1]["kwargs"]["profiles"] = set(calls[1][1]["kwargs"]["profiles"]) + calls[2][1]["kwargs"]["profiles"] = set(calls[2][1]["kwargs"]["profiles"]) self.assertEqual( - {"address": "192.168.0.1", "profiles": {"BaseUpTime"}, "frequency": 60}, + { + "address": "192.168.0.1", + "profiles": {"BaseUpTime"}, + "priority": 2, + "frequency": 60, + }, calls[0][1]["kwargs"], ) self.assertEqual( { "address": "192.168.0.1", + "priority": 2, "profiles": {"profile2", "profile5"}, "frequency": 30, }, calls[1][1]["kwargs"], ) self.assertEqual( - {"address": "192.168.0.1", "profiles": {"profile1"}, "frequency": 20}, + { + "address": "192.168.0.1", + "profiles": {"profile1"}, + "priority": 2, + "frequency": 20, + }, calls[2][1]["kwargs"], ) @@ -88,9 +102,13 @@ def test_inventory_setup_poller( "sc4snmp;192.168.0.1;20;poll", ], ) - periodic_obj_mock.delete_disabled_poll_tasks.assert_called() - def test_generate_poll_task_definition(self): + @patch("splunk_connect_for_snmp.common.profiles.ProfilesManager") + def test_generate_poll_task_definition(self, rp): + from splunk_connect_for_snmp.inventory.tasks import ( + generate_poll_task_definition, + ) + active_schedules = [] address = "192.168.0.1" assigned_profiles = { @@ -99,11 +117,10 @@ def test_generate_poll_task_definition(self): 20: ["profile1"], } period = 30 - result = generate_poll_task_definition( active_schedules, address, assigned_profiles, period ) - + result["kwargs"]["profiles"] = set(result["kwargs"]["profiles"]) self.assertEqual("sc4snmp;192.168.0.1;30;poll", result["name"]) self.assertEqual("splunk_connect_for_snmp.snmp.tasks.poll", result["task"]) self.assertEqual("192.168.0.1", result["target"]) @@ -111,6 +128,7 @@ def test_generate_poll_task_definition(self): self.assertEqual( { "address": "192.168.0.1", + "priority": 2, "profiles": {"profile2", "profile5"}, "frequency": 30, }, @@ -128,7 +146,7 @@ def test_generate_poll_task_definition(self): "splunk_connect_for_snmp.splunk.tasks.send", result["options"]["link"].tasks[2].name, ) - self.assertEqual({"every": 30, "period": "seconds"}, result["interval"]) + self.assertEqual(schedule(30), result["schedule"]) self.assertEqual(True, result["enabled"]) self.assertEqual(False, result["run_immediately"]) diff --git a/test/inventory/test_loader.py b/test/inventory/test_loader.py index 577f48664..0fbdf7844 100644 --- a/test/inventory/test_loader.py +++ b/test/inventory/test_loader.py @@ -1,6 +1,7 @@ from unittest import TestCase, mock from unittest.mock import Mock, mock_open, patch +from celery.schedules import schedule from pymongo.results import UpdateResult from splunk_connect_for_snmp.common.inventory_record import InventoryRecord @@ -54,7 +55,7 @@ def test_walk_task(self): "delete": False, } - inventory_record = InventoryRecord.from_dict(inventory_record_json) + inventory_record = InventoryRecord(**inventory_record_json) result = gen_walk_task(inventory_record) self.assertEqual("sc4snmp;192.68.0.1:456;walk", result["name"]) @@ -81,7 +82,7 @@ def test_walk_task(self): "splunk_connect_for_snmp.splunk.tasks.send", result["options"]["link"].tasks[1].tasks[1].tasks[1].name, ) - self.assertEqual({"every": 3456, "period": "seconds"}, result["interval"]) + self.assertEqual(schedule(3456), result["schedule"]) self.assertTrue(result["enabled"]) self.assertTrue(result["run_immediately"]) @@ -99,7 +100,7 @@ def test_walk_task_for_port_161(self): "delete": False, } - inventory_record = InventoryRecord.from_dict(inventory_record_json) + inventory_record = InventoryRecord(**inventory_record_json) result = gen_walk_task(inventory_record) self.assertEqual("sc4snmp;192.68.0.1;walk", result["name"]) @@ -124,7 +125,7 @@ def test_walk_task_for_port_161(self): "splunk_connect_for_snmp.splunk.tasks.send", result["options"]["link"].tasks[1].tasks[1].tasks[1].name, ) - self.assertEqual({"every": 3456, "period": "seconds"}, result["interval"]) + self.assertEqual(schedule(3456), result["schedule"]) self.assertTrue(result["enabled"]) self.assertTrue(result["run_immediately"]) @@ -132,10 +133,16 @@ def test_walk_task_for_port_161(self): @patch("splunk_connect_for_snmp.customtaskmanager.CustomPeriodicTaskManager") @mock.patch("pymongo.collection.Collection.update_one") @patch("splunk_connect_for_snmp.inventory.loader.migrate_database") - @mock.patch("splunk_connect_for_snmp.inventory.loader.load_profiles") + @mock.patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.update_all_profiles" + ) + @mock.patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.return_all_profiles" + ) def test_load_new_record_small_walk( self, m_load_profiles, + m_gather_profiles, m_migrate, m_mongo_collection, m_taskManager, @@ -177,10 +184,16 @@ def test_load_new_record_small_walk( @patch("splunk_connect_for_snmp.customtaskmanager.CustomPeriodicTaskManager") @mock.patch("pymongo.collection.Collection.update_one") @patch("splunk_connect_for_snmp.inventory.loader.migrate_database") - @mock.patch("splunk_connect_for_snmp.inventory.loader.load_profiles") + @mock.patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.update_all_profiles" + ) + @mock.patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.return_all_profiles" + ) def test_load_new_record( self, m_load_profiles, + m_update_profiles, m_migrate, m_mongo_collection, m_taskManager, @@ -203,10 +216,16 @@ def test_load_new_record( @patch("splunk_connect_for_snmp.customtaskmanager.CustomPeriodicTaskManager") @mock.patch("pymongo.collection.Collection.update_one") @patch("splunk_connect_for_snmp.inventory.loader.migrate_database") - @mock.patch("splunk_connect_for_snmp.inventory.loader.load_profiles") + @mock.patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.update_all_profiles" + ) + @mock.patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.return_all_profiles" + ) def test_load_modified_record( self, m_load_profiles, + m_update_profiles, m_migrate, m_mongo_collection, m_taskManager, @@ -228,9 +247,20 @@ def test_load_modified_record( @patch("splunk_connect_for_snmp.customtaskmanager.CustomPeriodicTaskManager") @mock.patch("pymongo.collection.Collection.update_one") @patch("splunk_connect_for_snmp.inventory.loader.migrate_database") - @mock.patch("splunk_connect_for_snmp.inventory.loader.load_profiles") + @mock.patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.update_all_profiles" + ) + @mock.patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.return_all_profiles" + ) def test_load_unchanged_record( - self, m_load_profiles, m_migrate, m_mongo_collection, m_taskManager, m_open + self, + m_load_profiles, + m_update_profiles, + m_migrate, + m_mongo_collection, + m_taskManager, + m_open, ): m_mongo_collection.return_value = UpdateResult( {"n": 1, "nModified": 0, "upserted": None}, True @@ -248,9 +278,20 @@ def test_load_unchanged_record( @patch("splunk_connect_for_snmp.customtaskmanager.CustomPeriodicTaskManager") @mock.patch("pymongo.collection.Collection.update_one") @patch("splunk_connect_for_snmp.inventory.loader.migrate_database") - @mock.patch("splunk_connect_for_snmp.inventory.loader.load_profiles") + @mock.patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.update_all_profiles" + ) + @mock.patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.return_all_profiles" + ) def test_ignoring_comment( - self, m_load_profiles, m_migrate, m_mongo_collection, m_taskManager, m_open + self, + m_load_profiles, + m_update_profiles, + m_migrate, + m_mongo_collection, + m_taskManager, + m_open, ): periodic_obj_mock = Mock() m_taskManager.return_value = periodic_obj_mock @@ -265,16 +306,28 @@ def test_ignoring_comment( @mock.patch("pymongo.collection.Collection.delete_one") @mock.patch("pymongo.collection.Collection.remove") @patch("splunk_connect_for_snmp.inventory.loader.migrate_database") - @mock.patch("splunk_connect_for_snmp.inventory.loader.load_profiles") + @mock.patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.update_all_profiles" + ) + @mock.patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.return_all_profiles" + ) def test_deleting_record( - self, m_load_profiles, m_migrate, m_remove, m_delete, m_taskManager, m_open + self, + m_load_profiles, + m_update_profiles, + m_migrate, + m_remove, + m_delete, + m_taskManager, + m_open, ): periodic_obj_mock = Mock() m_taskManager.return_value = periodic_obj_mock m_load_profiles.return_value = default_profiles self.assertEqual(False, load()) - periodic_obj_mock.disable_tasks.assert_called_with("192.168.0.1") + periodic_obj_mock.delete_all_tasks_of_host.assert_called_with("192.168.0.1") m_delete.assert_called_with({"address": "192.168.0.1", "port": 161}) calls = m_remove.call_args_list @@ -292,16 +345,28 @@ def test_deleting_record( @mock.patch("pymongo.collection.Collection.delete_one") @mock.patch("pymongo.collection.Collection.remove") @patch("splunk_connect_for_snmp.inventory.loader.migrate_database") - @mock.patch("splunk_connect_for_snmp.inventory.loader.load_profiles") + @mock.patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.update_all_profiles" + ) + @mock.patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.return_all_profiles" + ) def test_deleting_record_non_default_port( - self, m_load_profiles, m_migrate, m_remove, m_delete, m_taskManager, m_open + self, + m_load_profiles, + m_update_profiles, + m_migrate, + m_remove, + m_delete, + m_taskManager, + m_open, ): periodic_obj_mock = Mock() m_taskManager.return_value = periodic_obj_mock m_load_profiles.return_value = default_profiles self.assertEqual(False, load()) - periodic_obj_mock.disable_tasks.assert_called_with("192.168.0.1:345") + periodic_obj_mock.delete_all_tasks_of_host.assert_called_with("192.168.0.1:345") m_delete.assert_called_with({"address": "192.168.0.1", "port": 345}) calls = m_remove.call_args_list @@ -317,10 +382,16 @@ def test_deleting_record_non_default_port( "splunk_connect_for_snmp.customtaskmanager.CustomPeriodicTaskManager.manage_task" ) @patch("splunk_connect_for_snmp.inventory.loader.migrate_database") - @mock.patch("splunk_connect_for_snmp.inventory.loader.load_profiles") + @mock.patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.update_all_profiles" + ) + @mock.patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.return_all_profiles" + ) def test_inventory_errors( self, m_load_profiles, + m_update_profiles, m_migrate, m_manage_task, m_mongo_collection, diff --git a/test/inventory/test_record_validation.py b/test/inventory/test_record_validation.py index 5ee6bd966..8223fe9f5 100644 --- a/test/inventory/test_record_validation.py +++ b/test/inventory/test_record_validation.py @@ -1,10 +1,13 @@ -from unittest import TestCase - -from splunk_connect_for_snmp.inventory.tasks import is_smart_profile_valid +from unittest import TestCase, mock +@mock.patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.return_all_profiles" +) class TestRecordValidation(TestCase): - def test_disabled_profile(self): + def test_disabled_profile(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import is_smart_profile_valid + self.assertFalse( is_smart_profile_valid( None, @@ -12,32 +15,44 @@ def test_disabled_profile(self): ) ) - def test_frequency_present(self): + def test_frequency_present(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import is_smart_profile_valid + self.assertFalse(is_smart_profile_valid(None, {"condition": {"type": "base"}})) - def test_condition_present(self): + def test_condition_present(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import is_smart_profile_valid + self.assertFalse(is_smart_profile_valid(None, {"frequency": 300})) - def test_condition_type_present(self): + def test_condition_type_present(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import is_smart_profile_valid + self.assertFalse( is_smart_profile_valid(None, {"frequency": 300, "condition": "asdad"}) ) - def test_condition_type(self): + def test_condition_type(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import is_smart_profile_valid + self.assertFalse( is_smart_profile_valid( None, {"frequency": 300, "condition": {"type": "else"}} ) ) - def test_field_type(self): + def test_field_type(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import is_smart_profile_valid + self.assertFalse( is_smart_profile_valid( None, {"frequency": 300, "condition": {"type": "field"}} ) ) - def test_patterns_present(self): + def test_patterns_present(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import is_smart_profile_valid + self.assertFalse( is_smart_profile_valid( None, @@ -48,7 +63,9 @@ def test_patterns_present(self): ) ) - def test_patterns_is_list(self): + def test_patterns_is_list(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import is_smart_profile_valid + self.assertFalse( is_smart_profile_valid( None, @@ -76,14 +93,18 @@ def test_patterns_is_list(self): ) ) - def test_base_profile_is_valid(self): + def test_base_profile_is_valid(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import is_smart_profile_valid + self.assertTrue( is_smart_profile_valid( None, {"frequency": 300, "condition": {"type": "base"}} ) ) - def test_field_profile_is_valid(self): + def test_field_profile_is_valid(self, return_all_profiles): + from splunk_connect_for_snmp.inventory.tasks import is_smart_profile_valid + self.assertTrue( is_smart_profile_valid( None, diff --git a/test/other/test_custom_task_manager.py b/test/other/test_custom_task_manager.py index 5d01b4bc8..1845ffd0c 100644 --- a/test/other/test_custom_task_manager.py +++ b/test/other/test_custom_task_manager.py @@ -1,28 +1,37 @@ from unittest import TestCase -from unittest.mock import MagicMock, Mock, patch +from unittest.mock import ANY, MagicMock, Mock, patch + +from celery.schedules import schedule from splunk_connect_for_snmp.customtaskmanager import CustomPeriodicTaskManager +def raise_exception(): + raise KeyError + + class TestCustomTaskManager(TestCase): - @patch("celerybeatmongo.models.PeriodicTask.objects") + @patch("redbeat.schedulers.RedBeatSchedulerEntry.get_schedules_by_target") def test_delete_unused_poll_tasks(self, m_objects): task_manager = CustomPeriodicTaskManager.__new__(CustomPeriodicTaskManager) doc1 = Mock() doc1.enabled = True task1 = Mock() + task1.delete = Mock() task1.task = "splunk_connect_for_snmp.snmp.tasks.poll" task1.name = "test1" doc2 = Mock() doc2.enabled = True task2 = Mock() + task2.delete = Mock() task2.task = "splunk_connect_for_snmp.snmp.tasks.poll" task2.name = "test2" doc3 = Mock() doc3.enabled = True task3 = Mock() + task3.delete = Mock() task3.task = "splunk_connect_for_snmp.snmp.tasks.walk" task3.name = "test3" @@ -34,322 +43,177 @@ def test_delete_unused_poll_tasks(self, m_objects): periodic_list = Mock() periodic_list.__iter__ = Mock(return_value=iter([task1, task2, task3, task4])) - periodic_list.get.side_effect = [doc1, doc2, doc3, doc4] m_objects.return_value = periodic_list task_manager.delete_unused_poll_tasks("192.168.0.1", ["name1", "name2"]) - - m_objects.assert_called_with(target="192.168.0.1") - calls = periodic_list.get.call_args_list - - self.assertEqual({"name": "test1"}, calls[0].kwargs) - self.assertEqual({"name": "test2"}, calls[1].kwargs) - self.assertEqual({"name": "name1"}, calls[2].kwargs) - - self.assertFalse(doc1.enabled) - self.assertFalse(doc2.enabled) - self.assertTrue(doc3.enabled) - self.assertTrue(doc4.enabled) - - @patch("celerybeatmongo.models.PeriodicTask.objects") - def test_delete_disabled_poll_tasks(self, m_objects): - task_manager = CustomPeriodicTaskManager.__new__(CustomPeriodicTaskManager) - - doc1 = Mock() - task1 = Mock() - task1.name = "test1" - - doc2 = Mock() - task2 = Mock() - task2.name = "test2" - - doc3 = Mock() - task3 = Mock() - task3.name = "test3" - - doc4 = Mock() - task4 = Mock() - task4.name = "test4" - - periodic_list = Mock() - periodic_list.__iter__ = Mock(return_value=iter([task1, task2, task3, task4])) - periodic_list.get.side_effect = [doc1, doc2, doc3, doc4] - - m_objects.return_value = periodic_list - - task_manager.delete_disabled_poll_tasks() - - m_objects.assert_called_with(enabled=False) - calls = periodic_list.get.call_args_list - - self.assertEqual({"name": "test1"}, calls[0].kwargs) - self.assertEqual({"name": "test2"}, calls[1].kwargs) - self.assertEqual({"name": "test3"}, calls[2].kwargs) - self.assertEqual({"name": "test4"}, calls[3].kwargs) - - doc1.delete.assert_called() - doc2.delete.assert_called() - doc3.delete.assert_called() - doc4.delete.assert_called() - - @patch("celerybeatmongo.models.PeriodicTask.objects") - def test_enable_tasks(self, m_objects): + m_objects.assert_called_with("192.168.0.1", app=ANY) + self.assertTrue(task1.delete.called) + self.assertTrue(task2.delete.called) + self.assertFalse(task3.delete.called) + self.assertFalse(task4.delete.called) + + @patch("redbeat.schedulers.RedBeatSchedulerEntry.from_key") + def test_manage_existing_task(self, redbeat_scheduler_entry_from_key): task_manager = CustomPeriodicTaskManager.__new__(CustomPeriodicTaskManager) - doc1 = Mock() - doc1.enabled = False task1 = Mock() task1.name = "test1" + task1.schedule = schedule(120) + task1.save = Mock() - doc2 = Mock() - doc2.enabled = False - task2 = Mock() - task2.name = "test2" + redbeat_scheduler_entry_from_key.return_value = task1 - periodic_list = Mock() - periodic_list.__iter__ = Mock(return_value=iter([task1, task2])) - periodic_list.get.side_effect = [doc1, doc2] - m_objects.return_value = periodic_list - - task_manager.enable_tasks("192.168.0.1") - - m_objects.assert_called_with(target="192.168.0.1") - - calls = periodic_list.get.call_args_list - - self.assertEqual({"name": "test1"}, calls[0].kwargs) - self.assertEqual({"name": "test2"}, calls[1].kwargs) - - self.assertTrue(doc1.enabled) - self.assertTrue(doc2.enabled) - - doc1.save.assert_called() - doc2.save.assert_called() - - @patch("celerybeatmongo.models.PeriodicTask.objects") - def test_disable_tasks(self, m_objects): - task_manager = CustomPeriodicTaskManager.__new__(CustomPeriodicTaskManager) - - doc1 = Mock() - doc1.enabled = True - task1 = Mock() - task1.name = "test1" - - doc2 = Mock() - doc2.enabled = True - task2 = Mock() - task2.name = "test2" - - periodic_list = Mock() - periodic_list.__iter__ = Mock(return_value=iter([task1, task2])) - periodic_list.get.side_effect = [doc1, doc2] - m_objects.return_value = periodic_list - - task_manager.disable_tasks("192.168.0.1") - - m_objects.assert_called_with(target="192.168.0.1") - - calls = periodic_list.get.call_args_list - - self.assertEqual({"name": "test1"}, calls[0].kwargs) - self.assertEqual({"name": "test2"}, calls[1].kwargs) - - self.assertFalse(doc1.enabled) - self.assertFalse(doc2.enabled) - - doc1.save.assert_called() - doc2.save.assert_called() - - @patch("celerybeatmongo.models.PeriodicTask.objects") - def test_manage_task_existing_interval(self, m_objects): - task_manager = CustomPeriodicTaskManager.__new__(CustomPeriodicTaskManager) - - task1 = Mock() - task1.name = "test1" - doc1 = MagicMock() - - m_objects.return_value = task1 - task1.get.return_value = doc1 - - d = {"name": "test1"} - doc1.__getitem__.side_effect = d.__getitem__ - doc1.__setitem__.side_effect = d.__setitem__ - doc1.__contains__.side_effect = d.__contains__ - - task_data = {"name": "test1", "interval": {"every": 60, "period": "seconds"}} + task_data = { + "task": "task1", + "name": "test1", + "args": {"arg1": "val1", "arg2": "val2"}, + "kwargs": {"karg1": "val1", "karg2": "val2"}, + "schedule": schedule(60), + "target": "some_target", + "options": "some+option", + "enabled": True, + "run_immediately": False, + } task_manager.manage_task(**task_data) - m_objects.assert_called_with(name="test1") - task1.get.assert_called_with(name="test1") - - self.assertEqual({"name": "test1"}, d) - - self.assertEqual(60, doc1.interval.every) - self.assertEqual("seconds", doc1.interval.period) - doc1.save.assert_called_with() + redbeat_scheduler_entry_from_key.assert_called_with("redbeat:test1", app=ANY) + self.assertEqual(task1.schedule, schedule(60)) + self.assertTrue(task1.save.called) - @patch("celerybeatmongo.models.PeriodicTask.objects") - def test_manage_task_existing_crontab(self, m_objects): + @patch("redbeat.schedulers.RedBeatSchedulerEntry.from_key") + @patch("redbeat.schedulers.RedBeatSchedulerEntry.__new__") + def test_manage_new_task(self, redbeat_scheduler, redbeat_scheduler_entry_from_key): task_manager = CustomPeriodicTaskManager.__new__(CustomPeriodicTaskManager) - task1 = Mock() - task1.name = "test1" - doc1 = MagicMock() - - m_objects.return_value = task1 - task1.get.return_value = doc1 + redbeat_scheduler_entry_from_key.side_effect = KeyError - d = {"name": "test1"} - doc1.__getitem__.side_effect = d.__getitem__ - doc1.__setitem__.side_effect = d.__setitem__ - doc1.__contains__.side_effect = d.__contains__ - - task_data = {"name": "test1", "crontab": {"minute": 30, "hour": 10}} + task1 = Mock() + task_data = { + "task": "task1", + "name": "test1", + "args": {"arg1": "val1", "arg2": "val2"}, + "kwargs": {"karg1": "val1", "karg2": "val2"}, + "schedule": schedule(60), + "target": "some_target", + "options": "some+option", + "enabled": True, + "run_immediately": False, + } + redbeat_scheduler.return_value = task1 task_manager.manage_task(**task_data) - m_objects.assert_called_with(name="test1") - task1.get.assert_called_with(name="test1") + redbeat_scheduler_entry_from_key.assert_called_with("redbeat:test1", app=ANY) - self.assertEqual({"name": "test1"}, d) - - self.assertEqual("30 10 * * * (m/h/d/dM/MY)", str(doc1.crontab)) - doc1.save.assert_called_with() - - @patch("celerybeatmongo.models.PeriodicTask.objects") - def test_manage_task_existing_target(self, m_objects): + @patch("redbeat.schedulers.RedBeatSchedulerEntry.from_key") + def test_manage_task_existing_target(self, redbeat_scheduler_entry_from_key): task_manager = CustomPeriodicTaskManager.__new__(CustomPeriodicTaskManager) task1 = Mock() task1.name = "test1" - doc1 = MagicMock() + task1.target = "some_target" + task1.save = Mock() - m_objects.return_value = task1 - task1.get.return_value = doc1 + redbeat_scheduler_entry_from_key.return_value = task1 - d = {"name": "test1"} - doc1.__getitem__.side_effect = d.__getitem__ - doc1.__setitem__.side_effect = d.__setitem__ - doc1.__contains__.side_effect = d.__contains__ - - task_data = {"name": "test1", "target": "192.168.0.1"} + task_data = { + "task": "task1", + "name": "test1", + "args": {"arg1": "val1", "arg2": "val2"}, + "kwargs": {"karg1": "val1", "karg2": "val2"}, + "schedule": schedule(60), + "target": "some_other_target", + "options": "some+option", + "enabled": True, + "run_immediately": False, + } task_manager.manage_task(**task_data) - self.assertEqual({"name": "test1"}, d) - - m_objects.assert_called_with(name="test1") - task1.get.assert_called_with(name="test1") - - doc1.save.assert_not_called() + redbeat_scheduler_entry_from_key.assert_called_with("redbeat:test1", app=ANY) + self.assertEqual(task1.target, "some_other_target") + self.assertTrue(task1.save.called) - @patch("celerybeatmongo.models.PeriodicTask.objects") - def test_manage_task_existing_only_props(self, m_objects): + @patch("redbeat.schedulers.RedBeatSchedulerEntry.from_key") + @patch("redbeat.schedulers.RedBeatSchedulerEntry.__new__") + def test_manage_task_existing_only_props( + self, redbeat_scheduler, redbeat_scheduler_entry_from_key + ): task_manager = CustomPeriodicTaskManager.__new__(CustomPeriodicTaskManager) task1 = Mock() task1.name = "test1" - doc1 = MagicMock() - - m_objects.return_value = task1 - task1.get.return_value = doc1 - - d = {"name": "test1"} - doc1.__getitem__.side_effect = d.__getitem__ - doc1.__setitem__.side_effect = d.__setitem__ - doc1.__contains__.side_effect = d.__contains__ - - task_data = {"name": "test1", "prop1": "value1", "prop2": "value2"} - - task_manager.manage_task(**task_data) + task1.task = ("task1",) + task1.args = ({"arg1": "val1", "arg2": "val2"},) + task1.kwargs = ({"karg1": "val1", "karg2": "val2"},) + task1.schedule = (schedule(60),) + task1.target = ("some_other_target",) + task1.options = ("some+option",) + task1.enabled = (True,) + task1.run_immediately = (False,) + + redbeat_scheduler_entry_from_key.return_value = task1 + new_args = {"arg1": "new_arg_value"} + new_kwargs = {"karg1": "new_karg_value"} + new_task_data = { + "task": "task1", + "name": "test1", + "args": new_args, + "kwargs": new_kwargs, + "schedule": schedule(60), + "target": "some_other_target", + "options": "some+option", + "enabled": True, + "run_immediately": False, + } - self.assertEqual({"name": "test1", "prop1": "value1", "prop2": "value2"}, d) + task_manager.manage_task(**new_task_data) - m_objects.assert_called_with(name="test1") - task1.get.assert_called_with(name="test1") + self.assertEqual(task1.args, new_args) + self.assertEqual(task1.kwargs, new_kwargs) - doc1.save.assert_called() + redbeat_scheduler.assert_not_called() + task1.save.assert_called() - @patch("celerybeatmongo.models.PeriodicTask.objects") + @patch("redbeat.schedulers.RedBeatSchedulerEntry.get_schedules") def test_delete_all_poll_tasks(self, m_objects): task_manager = CustomPeriodicTaskManager.__new__(CustomPeriodicTaskManager) - doc1 = Mock() task1 = Mock() task1.task = "splunk_connect_for_snmp.snmp.tasks.poll" task1.name = "test1" - doc2 = Mock() task2 = Mock() task2.task = "splunk_connect_for_snmp.snmp.tasks.poll" task2.name = "test2" - doc3 = Mock() task3 = Mock() task3.task = "splunk_connect_for_snmp.snmp.tasks.walk" task3.name = "test3" - periodic_list = Mock() - periodic_list.__iter__ = Mock(return_value=iter([task1, task2, task3])) - periodic_list.get.side_effect = [doc1, doc2, doc3] + periodic_list = [task1, task2, task3] m_objects.return_value = periodic_list task_manager.delete_all_poll_tasks() - calls = periodic_list.get.call_args_list - - self.assertEqual({"name": "test1"}, calls[0].kwargs) - self.assertEqual({"name": "test2"}, calls[1].kwargs) + task1.delete.assert_called() + task2.delete.assert_called() + task3.delete.assert_not_called() - doc1.delete.assert_called() - doc2.delete.assert_called() - doc3.delete.assert_not_called() - - @patch("celerybeatmongo.models.PeriodicTask.objects") + @patch("redbeat.schedulers.RedBeatSchedulerEntry.get_schedules") def test_rerun_all_walks(self, m_objects): task_manager = CustomPeriodicTaskManager.__new__(CustomPeriodicTaskManager) - doc1 = Mock() - doc1.run_immediately = False task1 = Mock() + task1.run_immediately = False task1.task = "splunk_connect_for_snmp.snmp.tasks.walk" task1.name = "test1" - periodic_list = Mock() - periodic_list.__iter__ = Mock(return_value=iter([task1])) - periodic_list.get.side_effect = [doc1] - - m_objects.return_value = periodic_list + m_objects.return_value = [task1] task_manager.rerun_all_walks() - calls = periodic_list.get.call_args_list - - self.assertEqual({"name": "test1"}, calls[0].kwargs) - - self.assertTrue(doc1.run_immediately) - doc1.save.assert_called() - - @patch("celerybeatmongo.models.PeriodicTask.objects") - @patch("celerybeatmongo.models.PeriodicTask.save") - def test_manage_task_new(self, m_save, m_objects): - m_objects.return_value = None - - task_manager = CustomPeriodicTaskManager.__new__(CustomPeriodicTaskManager) - m_objects.return_value = None - task_data = { - "task": "task1", - "name": "test1", - "args": {"arg1": "val1", "arg2": "val2"}, - "kwargs": {"karg1": "val1", "karg2": "val2"}, - "interval": {"every": 60, "period": "seconds"}, - "target": "some_target", - "options": "some+option", - "enabled": True, - } - - task_manager.manage_task(**task_data) - - m_save.assert_called() + task1.set_run_immediately.assert_called_with(True) + task1.save.assert_called() diff --git a/test/snmp/test_auth.py b/test/snmp/test_auth.py index afc66334f..aaef20ae8 100644 --- a/test/snmp/test_auth.py +++ b/test/snmp/test_auth.py @@ -21,8 +21,8 @@ mock_value = """some value""" -ir = InventoryRecord.from_dict( - { +ir = InventoryRecord( + **{ "address": "192.168.0.1", "port": "34", "version": "2c", @@ -65,8 +65,8 @@ def test_get_secret_value_default(self, m_exists): @patch("splunk_connect_for_snmp.snmp.auth.getCmd") @patch("splunk_connect_for_snmp.snmp.auth.fetch_security_engine_id") def test_get_security_engine_id_not_present(self, m_fetch, m_get_cmd): - ir2 = InventoryRecord.from_dict( - { + ir2 = InventoryRecord( + **{ "address": "192.168.0.1", "port": "34", "version": "2c", @@ -101,8 +101,8 @@ def test_get_security_engine_id_not_present(self, m_fetch, m_get_cmd): @patch("splunk_connect_for_snmp.snmp.auth.getCmd") @patch("splunk_connect_for_snmp.snmp.auth.fetch_security_engine_id") def test_get_security_engine_id(self, m_fetch, m_get_cmd): - ir2 = InventoryRecord.from_dict( - { + ir2 = InventoryRecord( + **{ "address": "192.168.0.1", "port": "34", "version": "2c", @@ -193,8 +193,8 @@ def test_getAuthV3_security_engine_not_str( logger = Mock() snmpEngine = Mock() - ir2 = InventoryRecord.from_dict( - { + ir2 = InventoryRecord( + **{ "address": "192.168.0.1", "port": "34", "version": "2c", diff --git a/test/snmp/test_do_work.py b/test/snmp/test_do_work.py index 91ad53d82..040a36fc0 100644 --- a/test/snmp/test_do_work.py +++ b/test/snmp/test_do_work.py @@ -5,8 +5,8 @@ from splunk_connect_for_snmp.snmp.exceptions import SnmpActionError from splunk_connect_for_snmp.snmp.manager import Poller -inventory_record = InventoryRecord.from_dict( - { +inventory_record = InventoryRecord( + **{ "address": "192.168.0.1", "port": "34", "version": "2c", @@ -33,6 +33,7 @@ def test_do_work_no_work_to_do(self): poller = Poller.__new__(Poller) poller.last_modified = 1609675634 poller.snmpEngine = None + poller.profiles_manager = MagicMock() varbinds_bulk, varbinds_get = set(), set() get_mapping, bulk_mapping = {}, {} @@ -56,17 +57,18 @@ def test_do_work_no_work_to_do(self): @patch("splunk_connect_for_snmp.snmp.manager.UdpTransportTarget", MagicMock()) @patch("splunk_connect_for_snmp.snmp.manager.bulkCmd") @patch("splunk_connect_for_snmp.snmp.manager.getCmd") - @patch("splunk_connect_for_snmp.snmp.manager.load_profiles") + @patch("splunk_connect_for_snmp.common.profiles.ProfilesManager") def test_do_work_bulk(self, load_profiles, getCmd, bulkCmd): poller = Poller.__new__(Poller) poller.last_modified = 1609675634 poller.snmpEngine = None poller.builder = MagicMock() + poller.profiles_manager = MagicMock() m_process_data = MagicMock() m_process_data.return_value = (False, [], {}) poller.process_snmp_data = m_process_data requested_profiles = ["profile1", "profile2"] - load_profiles.return_value = { + poller.profiles_manager.return_all_profiles.return_value = { "profile1": { "frequency": 20, "varBinds": [["IF-MIB", "ifDescr"], ["IF-MIB", "ifSpeed"]], @@ -88,15 +90,18 @@ def test_do_work_bulk(self, load_profiles, getCmd, bulkCmd): @patch("splunk_connect_for_snmp.snmp.manager.UdpTransportTarget", MagicMock()) @patch("splunk_connect_for_snmp.snmp.manager.bulkCmd") @patch("splunk_connect_for_snmp.snmp.manager.getCmd") - @patch("splunk_connect_for_snmp.snmp.manager.load_profiles") + @patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.return_all_profiles" + ) def test_do_work_get(self, load_profiles, getCmd, bulkCmd): poller = Poller.__new__(Poller) poller.last_modified = 1609675634 poller.snmpEngine = None poller.builder = MagicMock() poller.process_snmp_data = MagicMock() + poller.profiles_manager = MagicMock() requested_profiles = ["profile1", "profile2"] - load_profiles.return_value = { + poller.profiles_manager.return_all_profiles.return_value = { "profile1": { "frequency": 20, "varBinds": [["IF-MIB", "ifDescr", 1], ["IF-MIB", "ifSpeed", 2]], @@ -125,15 +130,18 @@ def test_do_work_get(self, load_profiles, getCmd, bulkCmd): @patch("splunk_connect_for_snmp.snmp.manager.UdpTransportTarget", MagicMock()) @patch("splunk_connect_for_snmp.snmp.manager.bulkCmd") @patch("splunk_connect_for_snmp.snmp.manager.getCmd") - @patch("splunk_connect_for_snmp.snmp.manager.load_profiles") + @patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.return_all_profiles" + ) def test_do_work_errors(self, load_profiles, getCmd, bulkCmd): poller = Poller.__new__(Poller) poller.last_modified = 1609675634 poller.snmpEngine = None poller.builder = MagicMock() poller.process_snmp_data = MagicMock() + poller.profiles_manager = MagicMock() requested_profiles = ["profile1"] - load_profiles.return_value = { + poller.profiles_manager.return_all_profiles.return_value = { "profile1": {"frequency": 20, "varBinds": [["IF-MIB", "ifDescr", 1]]} } getCmd.return_value = [(True, True, 2, [])] diff --git a/test/snmp/test_tasks.py b/test/snmp/test_tasks.py index da2c30357..9e9ad0eb7 100644 --- a/test/snmp/test_tasks.py +++ b/test/snmp/test_tasks.py @@ -163,7 +163,10 @@ def test_trap_retry_translation_failed( m_is_mib_known, m_process_data, m_resolved, - *mongo_args + m_release, + m_lock, + m_mongo_lock, + m_mongo_client, ): from splunk_connect_for_snmp.snmp.tasks import trap @@ -177,6 +180,7 @@ def test_trap_retry_translation_failed( self_obj = MagicMock() self_obj.trap = trap self_obj.mib_view_controller = MagicMock() + self_obj.trap.already_loaded_mibs = set() result = self_obj.trap(work) calls = m_load_mib.call_args_list diff --git a/test/snmp/test_utils.py b/test/snmp/test_utils.py index 9a3acd225..183cd612f 100644 --- a/test/snmp/test_utils.py +++ b/test/snmp/test_utils.py @@ -42,10 +42,10 @@ def test_get_inventory(self): self.assertEqual("2c", ir.version) self.assertEqual("public", ir.community) self.assertEqual("some_secret", ir.secret) - self.assertEqual("some_engine", ir.securityEngine) + self.assertEqual("some_engine", ir.security_engine) self.assertEqual(1820, ir.walk_interval) self.assertEqual(["profile1", "profile2"], ir.profiles) - self.assertEqual(True, ir.SmartProfiles) + self.assertEqual(True, ir.smart_profiles) self.assertEqual(False, ir.delete) def test_any_failure_happened_error_indication(self): diff --git a/test/test_walk.py b/test/test_walk.py index 172ef56ce..9ac6a9893 100644 --- a/test/test_walk.py +++ b/test/test_walk.py @@ -13,7 +13,9 @@ class TestWalk(TestCase): @patch("builtins.open", new_callable=mock_open, read_data=mock_inventory) @patch("splunk_connect_for_snmp.snmp.manager.Poller.__init__") @patch("splunk_connect_for_snmp.snmp.manager.Poller.do_work") - @patch("splunk_connect_for_snmp.snmp.manager.load_profiles") + @patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.return_all_profiles" + ) def test_run_walk(self, m_load_profiles, m_do_work, m_init, m_open): m_init.return_value = None m_do_work.return_value = (False, {}) @@ -33,7 +35,9 @@ def test_run_walk(self, m_load_profiles, m_do_work, m_init, m_open): @patch("builtins.open", new_callable=mock_open, read_data=mock_inventory) @patch("splunk_connect_for_snmp.snmp.manager.Poller.__init__") @patch("splunk_connect_for_snmp.snmp.manager.Poller.do_work") - @patch("splunk_connect_for_snmp.snmp.manager.load_profiles") + @patch( + "splunk_connect_for_snmp.common.profiles.ProfilesManager.return_all_profiles" + ) def test_run_walk_exception(self, m_load_profiles, m_do_work, m_init, m_open): m_init.return_value = None m_do_work.side_effect = (Exception("Boom!"), (False, {}))