diff --git a/.github/workflows/ci-main.yaml b/.github/workflows/ci-main.yaml index 47075fb7e..a56966b29 100644 --- a/.github/workflows/ci-main.yaml +++ b/.github/workflows/ci-main.yaml @@ -159,7 +159,7 @@ jobs: AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY_INT_TESTS }} working-directory: ./integration_tests/scripts - name: Run integration tests - run: sleep 30 && timeout 50m ansible-playbook -v playbook.yml || true + run: sleep 30 && timeout 55m ansible-playbook -v playbook.yml || true working-directory: ./integration_tests/scripts - name: Download and analyze logs run: | @@ -178,64 +178,4 @@ jobs: AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY_INT_TESTS }} working-directory: ./integration_tests/scripts - release: - name: Release - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - with: - submodules: false - persist-credentials: false - - #Build docker images - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - name: Login to GitHub Packages Docker Registry - uses: docker/login-action@v1.9.0 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Docker meta - id: docker_meta - uses: docker/metadata-action@v3 - with: - images: ghcr.io/splunk/splunk-connect-for-snmp/container - tags: | - type=semver,pattern=v{{major}}.{{minor}} - type=semver,pattern=v{{major}} - type=semver,pattern=v{{version}} - type=semver,pattern={{major}}.{{minor}} - type=semver,pattern={{major}} - type=semver,pattern={{version}} - type=ref,event=branch - type=ref,event=pr - type=sha - type=sha,format=long - - name: Build and push action - id: docker_action_build - uses: docker/build-push-action@v2 - with: - context: . - push: true - platforms: linux/amd64,linux/arm64 - tags: ${{ steps.docker_meta.outputs.tags }} - labels: ${{ steps.docker_meta.outputs.labels }} - cache-to: type=inline - - uses: actions/setup-node@v2 - with: - node-version: "14" - - name: Semantic Release - id: version - uses: cycjimmy/semantic-release-action@v2.6.0 - with: - semantic_version: 17 - extra_plugins: | - @semantic-release/exec - @semantic-release/git - @google/semantic-release-replace-plugin - env: - GITHUB_TOKEN: ${{ secrets.GH_TOKEN_ADMIN }} diff --git a/.github/workflows/ci-release-pr.yaml b/.github/workflows/ci-release-pr.yaml new file mode 100644 index 000000000..cb5baaba1 --- /dev/null +++ b/.github/workflows/ci-release-pr.yaml @@ -0,0 +1,73 @@ +# ######################################################################## +# Copyright 2021 Splunk Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ######################################################################## + +name: ci-release-pr +on: + pull_request: + branches: + - "main" + - "develop" + - "next" + tags-ignore: + - "v*" +jobs: + release: + name: Release + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + submodules: false + persist-credentials: false + + #Build docker images + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Login to GitHub Packages Docker Registry + uses: docker/login-action@v1.9.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Docker meta + id: docker_meta + uses: docker/metadata-action@v3 + with: + images: ghcr.io/splunk/splunk-connect-for-snmp/container + tags: | + type=semver,pattern=v{{major}}.{{minor}} + type=semver,pattern=v{{major}} + type=semver,pattern=v{{version}} + type=semver,pattern={{major}}.{{minor}} + type=semver,pattern={{major}} + type=semver,pattern={{version}} + type=ref,event=branch + - name: Build and push action + id: docker_action_build + uses: docker/build-push-action@v2 + with: + context: . + push: false + platforms: linux/amd64,linux/arm64 + tags: ${{ steps.docker_meta.outputs.tags }} + labels: ${{ steps.docker_meta.outputs.labels }} + cache-to: type=inline + - uses: actions/setup-node@v2 + with: + node-version: "14" + diff --git a/.github/workflows/ci-release.yaml b/.github/workflows/ci-release.yaml new file mode 100644 index 000000000..7ca5585fa --- /dev/null +++ b/.github/workflows/ci-release.yaml @@ -0,0 +1,86 @@ +# ######################################################################## +# Copyright 2021 Splunk Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ######################################################################## + +name: ci-release +on: + push: + branches: + - "main" + - "develop" + - "next" + tags-ignore: + - "v*" + +jobs: + release: + name: Release + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + submodules: false + persist-credentials: false + + #Build docker images + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Login to GitHub Packages Docker Registry + uses: docker/login-action@v1.9.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Docker meta + id: docker_meta + uses: docker/metadata-action@v3 + with: + images: ghcr.io/splunk/splunk-connect-for-snmp/container + tags: | + type=semver,pattern=v{{major}}.{{minor}} + type=semver,pattern=v{{major}} + type=semver,pattern=v{{version}} + type=semver,pattern={{major}}.{{minor}} + type=semver,pattern={{major}} + type=semver,pattern={{version}} + type=ref,event=branch + type=ref,event=pr + - name: Build and push action + id: docker_action_build + uses: docker/build-push-action@v2 + with: + context: . + push: true + platforms: linux/amd64,linux/arm64 + tags: ${{ steps.docker_meta.outputs.tags }} + labels: ${{ steps.docker_meta.outputs.labels }} + cache-to: type=inline + - uses: actions/setup-node@v2 + with: + node-version: "14" + - name: Semantic Release + id: version + uses: cycjimmy/semantic-release-action@v2.6.0 + with: + semantic_version: 17 + extra_plugins: | + @semantic-release/exec + @semantic-release/git + @google/semantic-release-replace-plugin + env: + GITHUB_TOKEN: ${{ secrets.GH_TOKEN_ADMIN }} + diff --git a/README.md b/README.md index f00eb0175..4bef3fe75 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,10 @@ Splunk Connect for SNMP Gets SNMP data in to Splunk Enterprise and Splunk Cloud # Documentation For deployment and user documentation [see](https://splunk.github.io/splunk-connect-for-snmp/) +There are plenty of versions you can browse: +1. `main` refers to the latest version +2. `develop` refers to the latest `beta` version +3. tagged versions, for ex. `1.7.1` refers to the specific release # Contact Feel free to contact us via [#splunk-connect-for-snmp](https://splunk-usergroups.slack.com/archives/C01K4V86WV7) slack channel. diff --git a/charts/splunk-connect-for-snmp/Chart.lock b/charts/splunk-connect-for-snmp/Chart.lock index 299eb6541..6d95914db 100644 --- a/charts/splunk-connect-for-snmp/Chart.lock +++ b/charts/splunk-connect-for-snmp/Chart.lock @@ -7,6 +7,6 @@ dependencies: version: 16.8.10 - name: mibserver repository: https://pysnmp.github.io/mibs/charts/ - version: 1.14.8 -digest: sha256:5a3ee28eee8cfe1a1dfc271151cefb16e7229600d5dcd5ebe12e52b1982fa5f5 -generated: "2022-09-19T10:31:22.032368+02:00" + version: 1.14.9 +digest: sha256:0547c9ace910e4d9a6632f714a37763d378372336b0106ec003028d023ce6b31 +generated: "2022-09-30T13:31:07.322275+02:00" diff --git a/charts/splunk-connect-for-snmp/Chart.yaml b/charts/splunk-connect-for-snmp/Chart.yaml index 7bd0fe4a2..151a87b10 100644 --- a/charts/splunk-connect-for-snmp/Chart.yaml +++ b/charts/splunk-connect-for-snmp/Chart.yaml @@ -14,12 +14,12 @@ type: application # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.8.1 +version: 1.8.2-beta.7 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.8.1" +appVersion: "1.8.2-beta.7" # dependencies: - name: mongodb diff --git a/charts/splunk-connect-for-snmp/templates/common/sim-secret.yaml b/charts/splunk-connect-for-snmp/templates/common/sim-secret.yaml index 4f235c67c..7ba8c7d82 100644 --- a/charts/splunk-connect-for-snmp/templates/common/sim-secret.yaml +++ b/charts/splunk-connect-for-snmp/templates/common/sim-secret.yaml @@ -1,4 +1,4 @@ -{{- if .Values.sim.enabled }} +{{- if and .Values.sim.secret.create .Values.sim.enabled }} apiVersion: v1 kind: Secret metadata: diff --git a/charts/splunk-connect-for-snmp/templates/sim/_helpers.tpl b/charts/splunk-connect-for-snmp/templates/sim/_helpers.tpl index efca2c398..4674d398c 100644 --- a/charts/splunk-connect-for-snmp/templates/sim/_helpers.tpl +++ b/charts/splunk-connect-for-snmp/templates/sim/_helpers.tpl @@ -60,3 +60,15 @@ Create the name of the service account to use {{- default "default" .Values.sim.serviceAccount.name }} {{- end }} {{- end }} + +{{/* +Define name for the Splunk Secret +*/}} +{{- define "splunk-connect-for-snmp.sim.secret" -}} +{{- if .Values.sim.secret.name -}} +{{- printf "%s" .Values.sim.secret.name -}} +{{- else -}} +{{ include "splunk-connect-for-snmp.name" . }}-sim +{{- end -}} +{{- end -}} + diff --git a/charts/splunk-connect-for-snmp/templates/sim/deployment.yaml b/charts/splunk-connect-for-snmp/templates/sim/deployment.yaml index 6e8cc0f2b..d975e0c5a 100644 --- a/charts/splunk-connect-for-snmp/templates/sim/deployment.yaml +++ b/charts/splunk-connect-for-snmp/templates/sim/deployment.yaml @@ -14,10 +14,11 @@ spec: {{- include "splunk-connect-for-snmp.sim.selectorLabels" . | nindent 6 }} template: metadata: - {{- with .Values.sim.podAnnotations }} annotations: + {{- with .Values.sim.podAnnotations }} {{- toYaml . | nindent 8 }} {{- end }} + checksum/config: {{ include (print $.Template.BasePath "/common/sim-secret.yaml") . | sha256sum }} labels: {{- include "splunk-connect-for-snmp.sim.selectorLabels" . | nindent 8 }} spec: @@ -35,18 +36,16 @@ spec: image: {{ .Values.sim.image | default "quay.io/signalfx/splunk-otel-collector" }}:{{ .Values.sim.tag | default "0.41.0" }} imagePullPolicy: {{ .Values.sim.pullPolicy | default "IfNotPresent" }} args: ["--config=/config/otel-collector-config.yaml"] - securityContext: - allowPrivilegeEscalation: false env: - name: signalfxToken valueFrom: secretKeyRef: - name: {{ include "splunk-connect-for-snmp.name" . }}-sim + name: {{ include "splunk-connect-for-snmp.sim.secret" . }} key: signalfxToken - name: signalfxRealm valueFrom: secretKeyRef: - name: {{ include "splunk-connect-for-snmp.name" . }}-sim + name: {{ include "splunk-connect-for-snmp.sim.secret" . }} key: signalfxRealm ports: - containerPort: 8882 diff --git a/charts/splunk-connect-for-snmp/templates/sim/service.yaml b/charts/splunk-connect-for-snmp/templates/sim/service.yaml index f40999c9f..ac4383fc6 100644 --- a/charts/splunk-connect-for-snmp/templates/sim/service.yaml +++ b/charts/splunk-connect-for-snmp/templates/sim/service.yaml @@ -7,6 +7,12 @@ metadata: {{- include "splunk-connect-for-snmp.sim.labels" . | nindent 4 }} annotations: metallb.universe.tf/allow-shared-ip: "true" + {{- if .Values.sim.service.annotations }} +{{ toYaml .Values.sim.service.annotations | indent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} +{{ toYaml .Values.commonAnnotations| indent 4 }} + {{- end }} spec: type: ClusterIP ports: diff --git a/charts/splunk-connect-for-snmp/templates/traps/service.yaml b/charts/splunk-connect-for-snmp/templates/traps/service.yaml index 76df2723e..e16653e85 100644 --- a/charts/splunk-connect-for-snmp/templates/traps/service.yaml +++ b/charts/splunk-connect-for-snmp/templates/traps/service.yaml @@ -8,6 +8,12 @@ metadata: {{- if .Values.traps.service.usemetallb }} metallb.universe.tf/allow-shared-ip: {{ .Values.traps.service.metallbsharingkey | default "splunk-connect" | quote }} {{- end }} + {{- if .Values.traps.service.annotations }} +{{ toYaml .Values.traps.service.annotations | indent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} +{{ toYaml .Values.commonAnnotations| indent 4 }} + {{- end }} spec: type: {{ .Values.traps.service.type }} diff --git a/charts/splunk-connect-for-snmp/values.yaml b/charts/splunk-connect-for-snmp/values.yaml index 8d6071c80..44ba82bc3 100644 --- a/charts/splunk-connect-for-snmp/values.yaml +++ b/charts/splunk-connect-for-snmp/values.yaml @@ -204,6 +204,7 @@ sim: ingress: enabled: false securityContext: + allowPrivilegeEscalation: false capabilities: drop: - ALL @@ -231,6 +232,17 @@ sim: # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: "" + + service: + annotations: {} + + secret: + # Option for creating a new secret or using an existing one. + # When secret.create=true, a new kubernetes secret will be created by the helm chart that will contain the + # values from sim.signalfxToken and sim.signalfxRealm. + # When secret.create=false, the user must set secret.name to a name of a k8s secret the user created. + create: true + name: "" traps: replicaCount: 2 usernameSecrets: [] @@ -265,6 +277,7 @@ traps: runAsGroup: 10001 service: + annotations: {} usemetallb: true metallbsharingkey: "splunk-connect" type: LoadBalancer @@ -372,4 +385,5 @@ mongodb: redis: architecture: standalone auth: - enabled: false \ No newline at end of file + enabled: false +commonAnnotations: {} \ No newline at end of file diff --git a/create_packages.sh b/create_packages.sh index 149236c90..3df1d4176 100755 --- a/create_packages.sh +++ b/create_packages.sh @@ -231,5 +231,9 @@ unzip otel-repo.zip rm otel-repo.zip OTEL_DIR=$(ls | grep -E "signalfx-splunk.+") CHART_DIT="$OTEL_DIR/helm-charts/splunk-otel-collector" +OTEL_IMAGE_TAG=$(cat $CHART_DIT/Chart.yaml | grep appVersion | sed 's/.*: //g') +otel_image=quay.io/signalfx/splunk-otel-collector:"$OTEL_IMAGE_TAG" +docker pull "$otel_image" +docker save $otel_image > /tmp/package/packages/otel_image.tar helm package $CHART_DIT -d /tmp/package/packages/ rm -rf $OTEL_DIR diff --git a/docs/bestpractices.md b/docs/bestpractices.md index d64e5e631..2dfe58cbf 100644 --- a/docs/bestpractices.md +++ b/docs/bestpractices.md @@ -2,10 +2,10 @@ ## Pieces of Advice -### Check when SNMP WALK was executed last time for device +### Check when SNMP WALK was executed last time for the device 1. [Configure Splunk OpenTelemetry Collector for Kubernetes](gettingstarted/sck-installation.md) 2. Go to your Splunk and execute search: `index="em_logs" "Sending due task" "sc4snmp;;walk"` -and replace by IP Address which you are interested. +and replace with the pertinent IP Address. ### Uninstall Splunk Connect for SNMP To uninstall SC4SNMP run the following commands: @@ -17,12 +17,12 @@ To uninstall SC4SNMP run the following commands: ### Installing Splunk Connect for SNMP on Linux RedHat Installation of RedHat may be blocking ports required by microk8s. Installing microk8s on RedHat -required checking if the firewall is not blocking any of [required microk8s ports](https://microk8s.io/docs/ports). +requires checking to see if the firewall is not blocking any of [required microk8s ports](https://microk8s.io/docs/ports). ## Issues ### "Empty SNMP response message" problem -In case you see the following line in worker's logs: +In case you see the following line in the worker's logs: ```log [2022-01-04 11:44:22,553: INFO/ForkPoolWorker-1] Task splunk_connect_for_snmp.snmp.tasks.walk[8e62fc62-569c-473f-a765-ff92577774e5] retry: Retry in 3489s: SnmpActionError('An error of SNMP isWalk=True for a host 192.168.10.20 occurred: Empty SNMP response message') @@ -54,7 +54,7 @@ worker: ``` If you put only IP address (ex. `127.0.0.1`), then errors will be ignored for all of its devices (like `127.0.0.1:161`, -`127.0.0.1:163`...). If you put IP address and host structured as `{host}:{port}` that means the error will be ignored only for this device. +`127.0.0.1:163`...). If you put IP address and host structured as `{host}:{port}`, that means the error will be ignored only for this device. ### Walking a device takes too much time @@ -63,26 +63,26 @@ Enable small walk functionality with the following instruction: [Configure small ### An error of SNMP isWalk=True blocks traffic on SC4SNMP instance If you see many `An error of SNMP isWalk=True` errors in logs, that means that there is a connection problem with the hosts you're polling from. -Walk will try to retry multiple times, what will eventually cause a worker to be blocked for the retries time. In this case, you might want to limit -the maximum retries time. You can do it by setting the variable `worker.walkRetryMaxInterval`, like: +Walk will try to retry multiple times, which will eventually cause a worker to be blocked for the retries time. In this case, you might want to limit +the maximum retries time. You can do this by setting the variable `worker.walkRetryMaxInterval`, for example: ```yaml worker: walkRetryMaxInterval: 60 ``` -With the configuration from the above, walk will retry exponentially until it reaches 60 seconds. +With the configuration from the above, 'walk' will retry exponentially until it reaches 60 seconds. ### SNMP Rollover -The Rollover problem is that the integer value that they store (especially when they are 32-bit) is finite, -and when it’s reaching the maximum, it gets rolled down to 0 again which causes a strange drop in Analytics data. -The most common case of this issue is interface speed on a high speed ports. As a solution to this problem, SNMPv2 SMI defined a new object type, counter64, for 64-bit counters ([read more about it](https://www.cisco.com/c/en/us/support/docs/ip/simple-network-management-protocol-snmp/26007-faq-snmpcounter.html)). -Not all the devices support it, but if they are - remember to always poll counter64 type OID instead of counter32 one. +The Rollover problem is due to the integer value stored (especially when the value is 32-bit) being finite. +When it reaches its maximum, it gets rolled down to 0 again. This causes a strange drop in Analytics data. +The most common case of this issue is interface speed on high speed ports. As a solution to this problem, SNMPv2 SMI defined a new object type, counter64, for 64-bit counters ([read more about it](https://www.cisco.com/c/en/us/support/docs/ip/simple-network-management-protocol-snmp/26007-faq-snmpcounter.html)). +Not all the devices support it, but if they do, poll the counter64 type OID instead of the counter32 one. For example, use `ifHCInOctets` instead of `ifInOctets`. -If 64-bit counter are not supported on your device, you can write own splunk queries that calculate the shift based on -maximum integer value + current state. The same works for values big enough that they're not fitting 64-bit value. -An example for a SPLUNK query like that (inteface counter), would be: +If 64-bit counter are not supported on your device, you can write your own Splunk queries that calculate the shift based on +maximum integer value + current state. The same works for values big enough that they're not fitting a 64-bit value. +An example for a SPLUNK query like that (interface counter), would be: ``` | streamstats current=f last(ifInOctets) as p_ifInOctets last(ifOutOctets) as p_ifOutOctets by ifAlias @@ -91,4 +91,4 @@ An example for a SPLUNK query like that (inteface counter), would be: | eval max=pow(2,64) | eval out = if(out_delta<0,((max+out_delta)*8/(5*60*1000*1000*1000)),(out_delta)*8/(5*60*1000*1000*1000)) | timechart span=5m avg(in) AS in, avg(out) AS out by ifAlias -``` \ No newline at end of file +``` diff --git a/docs/configuration/configuring-groups.md b/docs/configuration/configuring-groups.md index 948c9d2df..dae8c12f2 100644 --- a/docs/configuration/configuring-groups.md +++ b/docs/configuration/configuring-groups.md @@ -1,16 +1,16 @@ # Configuring Groups It is common to configure whole groups of devices instead of just single ones. -SC4SNMP allows both types of configuration. Group consists of many hosts. Each of them is configured in `values.yaml` -file in the `scheduler` section. After configuring a group, its name can be used in the `address` +SC4SNMP allows both types of configuration. Group consists of many hosts. Each of them is configured in the `values.yaml` +file, in the `scheduler` section. After configuring a group, its name can be used in the `address` field in the inventory record. All settings specified in the inventory record will be assigned to hosts from the given group, unless specific host configuration overrides it. -- Group configuration example and documentation can be found on [Scheduler Configuration](scheduler-configuration.md#define-groups-of-hosts) page. -- Use of groups in the inventory can be found on [Poller Configuration](poller-configuration.md#configure-inventory) page. +- Group configuration example and documentation can be found in the [Scheduler Configuration](scheduler-configuration.md#define-groups-of-hosts) page. +- Use of groups in the inventory can be found in the [Poller Configuration](poller-configuration.md#configure-inventory) page. -If the host is configured in the group and both the group and the single host are included in the inventory (like in the example below), -configuration for the single host will be ignored in favour of group configuration. +If the host is configured in the group and both the group and the single host are included in the inventory (like in the example below), the +configuration for the single host will be ignored in favour of group configuration: ```yaml scheduler: @@ -31,7 +31,7 @@ poller: ``` If the specific host from the group has to be configured separately, first it must be deleted from the group configuration, -and then it can be inserted as a new record in the inventory (like in the example below). +and then it can be inserted as a new record in the inventory (like in the example below): ```yaml scheduler: @@ -47,4 +47,4 @@ poller: address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete example_group_1,,2c,public,,,2000,my_profile2,, 10.202.4.202,,2c,public,,,2000,my_profile1,, -``` \ No newline at end of file +``` diff --git a/docs/configuration/configuring-profiles.md b/docs/configuration/configuring-profiles.md index 695d99df4..01871bb67 100644 --- a/docs/configuration/configuring-profiles.md +++ b/docs/configuration/configuring-profiles.md @@ -1,23 +1,23 @@ # Configuring profiles -Profiles are the units, where you can configure what you want to poll and then assign them to the device. The definition of profile can be found in `values.yaml` file +Profiles are the units where you can configure what you want to poll, and then assign them to the device. The definition of profile can be found in the `values.yaml` file under the `scheduler` section. -Here is the instruction of how to use profiles: [Update Inventory and Profile](../deployment-configuration/#update-inventory-and-profile). +Here are the instructions on how to use profiles: [Update Inventory and Profile](../deployment-configuration/#update-inventory-and-profile). There are two types of profiles in general: -1. Static profile - polling starts when profile is added to `profiles` field in `inventory` of the device +1. Static profile - polling starts when the profile is added to the `profiles` field in the `inventory` of the device. 2. Smart profile - polling starts when configured conditions are fulfilled, and the device to poll from has `smart_profiles` enabled in inventory. -Smart profiles are useful when we have many devices of certain kind, and we don't want to configure all of them "one by one" with static profiles. +Smart profiles are useful when we have many devices of a certain kind, and we don't want to configure each of them individually with static profiles. - In order to configure smart profile do the following: + In order to configure smart profile, do the following: - 1. Choose one of the fields polled from device, most commonly sysDescr is being used - 2. Set the filter to match all the devices of this kind - 3. Setup polling of the profile by enabling smart profiles for devices you want this profile to be polled + 1. Choose one of the fields polled from the device, most commonly sysDescr. + 2. Set the filter to match all the devices of this kind. + 3. Setup polling of the profile by enabling smart profiles for devices you want to be polled. -The template of the profile looks like following: +The template of the profile looks like the following: ```yaml scheduler: @@ -42,7 +42,7 @@ scheduler: - ['SNMPv2-MIB', 'sysUpTime',0] ``` -For example, we have configured two profiles. One is smart, the other one is static: +For example, we have configured two profiles. One is smart, and the other one is static: ```yaml scheduler: @@ -64,7 +64,7 @@ scheduler: - ['IP-MIB'] ``` -If we want to enable only `static_profile` polling for host `10.202.4.202`, we will configure inventory like that: +If we want to enable only `static_profile` polling for the host `10.202.4.202`, we will configure similar inventory: ```yaml poller: @@ -73,7 +73,7 @@ poller: 10.202.4.202,,2c,public,,,2000,static_profile,f, ``` -If we want to enable checking `10.202.4.202` device against smart profiles, we need to set `smart_profiles` to `t`: +If we want to enable checking the `10.202.4.202` device against smart profiles, we need to set `smart_profiles` to `t`: ```yaml poller: @@ -82,22 +82,22 @@ poller: 10.202.4.202,,2c,public,,,2000,,t, ``` -Then if the device `sysDescr` match `'.*linux.*'` filter, `smart_profile` profile will be polled. +Then, if the device `sysDescr` matches the `'.*linux.*'` filter, the `smart_profile` profile will be polled. ## varBinds configuration -`varBinds` short for "variable binding" in SNMP. The combination of an Object Identifier (OID) and a value. +`varBinds` is short for "variable binding" in the SNMP. It is the combination of an Object Identifier (OID) and a value. `varBinds` are used for defining what OIDs should be requested from SNMP Agents. `varBinds` is a required -subsection of each profile. Syntax configuration of `varBinds` looks following: +subsection of each profile. The syntax configuration of `varBinds` looks like the following: [ "MIB-Component", "MIB object"[Optional], "MIB index number"[Optional]] - - `MIB-Component` - The SNMP MIB, itself, consists of distinct component MIBs, each of which refers to a specific - defined collection of management information that is part of the overall SNMP MIB eg. `SNMPv2-MIB`. - If only `MIB-Component` is set then SC4SNMP will get whole subtree. + - `MIB-Component` - The SNMP MIB itself consists of distinct component MIBs, each of which refers to a specific + defined collection of management information that is part of the overall SNMP MIB, eg., `SNMPv2-MIB`. + If only the `MIB-Component` is set, then the SC4SNMP will get the whole subtree. - `MIB object` - The SNMP MIB stores only simple data types: scalars and two-dimensional arrays of scalars, - called tables. Keywords SYNTAX, ACCESS, and DESCRIPTION as well as other keywords such as STATUS and - INDEX is used to define the SNMP MIB managed objects. + called tables. The keywords SYNTAX, ACCESS, and DESCRIPTION as well as other keywords such as STATUS and + INDEX are used to define the SNMP MIB managed objects. - `MIB index number` - Define index number for given MIB Object eg. `0`. Example: @@ -110,10 +110,10 @@ Example: ``` ## Static Profile configuration -Static Profile are used when they are defined on a list of profiles in inventory configuration in `poller` +Static Profile is used when they are defined on a list of profiles in the inventory configuration in the `poller` service [Inventory configuration](../poller-configuration/#configure-inventory). Static Profiles are executed even if the SmartProfile flag in inventory is set to false. -To configure Static Profile following value needs to be set in `profiles` section: +To configure Static Profile value needs to be set in the `profiles` section: - `ProfileName` - define as subsection key in `profiles`. - `frequency` - define interval between executing SNMP gets in second. @@ -133,11 +133,11 @@ scheduler: ### Particular kinds of static profiles -Sometimes static profiles have additional functionalities, to be used in some special scenarios. +Sometimes static profiles have additional functionalities to be used in specific scenarios. #### WALK profile -If you would like to limit the scope of the walk, you should set one of the profiles in the inventory to point to the profile definition of type `walk` +If you would like to limit the scope of the walk, you should set one of the profiles in the inventory to point to the profile definition of type `walk`: ```yaml scheduler: profiles: | @@ -147,8 +147,8 @@ scheduler: varBinds: - ['UDP-MIB'] ``` -Such profile should be placed in the profiles section of inventory definition. It will be executed with the frequency defined in `walk_interval`. -In case of multiple profiles of type `walk` will be placed in profiles, the last one will be used. +This profile should be placed in the profiles section of the inventory definition. It will be executed with the frequency defined in `walk_interval`. +If multiple profiles of type `walk` is placed in profiles, the last one will be used. This is how to use `walk` profiles: @@ -159,23 +159,23 @@ poller: 10.202.4.202,,2c,public,,,2000,small_walk,, ``` -NOTE: When small walk is configured, you can set up polling only of OIDs belonging to walk profile varBinds. -Additionally, there are two MIB families that are enabled by default (we need them to create state of the device in the database and poll base profiles): `IF-MIB` and `SNMPv2-MIB`. -For example, if you've decided to use `small_walk` from the example above, you'll be able to poll only `UDP-MIB`, `IF-MIB` and `SNMPv2-MIB` OIDs. +NOTE: When small walk is configured, you can set up polling only of OIDs belonging to the walk profile varBinds. +Additionally, there are two MIB families that are enabled by default (we need them to create the state of the device in the database and poll base profiles): `IF-MIB` and `SNMPv2-MIB`. +For example, if you've decided to use `small_walk` from the example above, you'll be able to poll only `UDP-MIB`, `IF-MIB`, and `SNMPv2-MIB` OIDs. ## SmartProfile configuration SmartProfile is executed when the SmartProfile flag in inventory is set to true and the condition defined in profile match. -More information about configuring inventory can be found in [Inventory configuration](../poller-configuration/#configure-inventory) +More information about configuring inventory can be found in [Inventory configuration](../poller-configuration/#configure-inventory). -To configure Smart Profile following value need to be set in `profiles` section: +To configure Smart Profile, the following value needs to be set in the `profiles` section: - `ProfileName` - define as subsection key in `profiles`. - `frequency` - define an interval between executing SNMP's gets in second. - `condition` - section define conditions to match profile - - `type` - key of `condition` section which defines type of condition. Allowed value `base` and `field` (`walk` type is also allowed here, but it's not part of smart profiles) + - `type` - key of `condition` section which defines type of condition. The allowed values are `base` and `field` (`walk` type is also allowed here, but it's not part of smart profiles). - `base` type of condition will be executed when `SmartProfile` in inventory is set to true. - - `field` type of condition will be executed if match `pattern` for defined `field`. Supported fields: + - `field` type of condition will be executed if it matches `pattern` for defined `field`. Supported fields are: - "SNMPv2-MIB.sysDescr" - "SNMPv2-MIB.sysObjectID" - `field` Define field name for condition type field. @@ -183,7 +183,7 @@ To configure Smart Profile following value need to be set in `profiles` section: - ".*linux.*" - `varBinds` - define var binds to query. -Example of `base` type profile +Example of `base` type profile: ```yaml scheduler: profiles: | @@ -196,7 +196,7 @@ scheduler: - ['SNMPv2-MIB', 'sysName'] ``` -Example of `field` type profile, also called an automatic profile +Example of `field` type profile, also called an automatic profile: ```yaml scheduler: profiles: | @@ -213,11 +213,11 @@ scheduler: ``` NOTE: Be aware that profile changes may not be reflected immediately. It can take up to 1 minute for changes to propagate. In case you changed frequency, or a profile type, the change will be reflected only after the next walk. -There is also 5 minute TTL for an inventory pod. Basically, SC4SNMP allows one inventory upgrade and then block updates for the next 5 minutes +There is also 5 minute TTL for an inventory pod. Basically, SC4SNMP allows one inventory upgrade and then block updates for the next 5 minutes. ## Custom translations -If the user wants to use custom names/translations of MIB names, it can be configured under customTranslations section under scheduler config. -Translations are grouped by MIB family. In the example below IF-MIB.ifInDiscards will be translated to IF-MIB.myCustomName1 +If the user wants to use custom names/translations of MIB names, it can be configured under the customTranslations section under scheduler config. +Translations are grouped by MIB family. In the example below IF-MIB.ifInDiscards will be translated to IF-MIB.myCustomName1: ```yaml scheduler: customTranslations: diff --git a/docs/configuration/deployment-configuration.md b/docs/configuration/deployment-configuration.md index 79ccebfd9..460ee4d98 100644 --- a/docs/configuration/deployment-configuration.md +++ b/docs/configuration/deployment-configuration.md @@ -40,3 +40,10 @@ All the components have the `resources` field for adjusting memory resources: memory: 2Gi ``` More information about the concept of `resources` can be found in the [kuberentes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + +There is an option to create common annotations across all the services. It can be set by: + +```yaml +commonAnnotations: + annotation_key: annotation_value +``` \ No newline at end of file diff --git a/docs/configuration/mongo-configuration.md b/docs/configuration/mongo-configuration.md index 565485353..af535bce6 100644 --- a/docs/configuration/mongo-configuration.md +++ b/docs/configuration/mongo-configuration.md @@ -3,8 +3,8 @@ Mongo DB is used as the database for keeping schedules. ### Mongo DB configuration file -Mongo DB configuration is kept in `values.yaml` file in section `mongodb`. -`values.yaml` is being used during the installation process for configuring kubernetes values. +Mongo DB configuration is kept in the `values.yaml` file in the `mongodb` section. +`values.yaml` is used during the installation process for configuring kubernetes values. Example: ```yaml @@ -30,4 +30,4 @@ mongodb: enabled: true ``` -The recommendation is to do not change this setting. In case of need to change it please follow documentation: [MongoDB on Kubernetes](https://github.com/bitnami/charts/tree/master/bitnami/mongodb/) +It is recommended not to change this setting. If it is necessary to change it, see: [MongoDB on Kubernetes](https://github.com/bitnami/charts/tree/master/bitnami/mongodb/) diff --git a/docs/configuration/poller-configuration.md b/docs/configuration/poller-configuration.md index 2a5034df3..e39ac604f 100644 --- a/docs/configuration/poller-configuration.md +++ b/docs/configuration/poller-configuration.md @@ -1,19 +1,19 @@ #Poller Configuration Poller is a service which is responsible for querying -SNMP devices using SNMP GET, SNMP WALK functionality. Poller executes two main types of tasks: +SNMP devices using the SNMP GET, and the SNMP WALK functionality. Poller executes two main types of tasks: - Walk task - executes SNMP walk. SNMP walk is an SNMP application that uses SNMP GETNEXT requests to collect SNMP data from the network and infrastructure SNMP-enabled devices, such as switches and routers. It is a time-consuming task, -which may overload the SNMP device when executing too often. It is used by SC4SNMP to collect and push all OIDs values which provided ACL has access to. +which may overload the SNMP device when executed too often. It is used by the SC4SNMP to collect and push all OID values, which the provided ACL has access to. -- Get task - it is a lightweight task whose goal is to query a subset of OIDs defined by the customer. The task serves for a frequent monitoring OIDs like memory or CPU utilization. +- Get task - it is a lightweight task whose goal is to query a subset of OIDs defined by the customer. The task serves frequent monitoring OIDs, like memory or CPU utilization. Poller has an `inventory`, which defines what and how often SC4SNMP has to poll. ### Poller configuration file -Poller configuration is kept in `values.yaml` file in a `poller` section. -`values.yaml` is being used during the installation process for configuring Kubernetes values. +The poller configuration is kept in a `values.yaml` file in the `poller` section. +`values.yaml` is used during the installation process for configuring Kubernetes values. Poller example configuration: ```yaml @@ -30,17 +30,16 @@ poller: NOTE: header's line (`address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete`) is necessary for the correct execution of SC4SNMP. Do not remove it. ### Define log level -Log level for poller can be set by changing the value for key `logLevel`. Allowed values are: `DEBUG`, `INFO`, `WARNING`, `ERROR`. -The default value is `WARNING` +The log level for poller can be set by changing the value for the key `logLevel`. The allowed values are: `DEBUG`, `INFO`, `WARNING`, `ERROR`. +The default value is `WARNING`. ### Define usernameSecrets -Secrets are required to run SNMPv3 polling. To add v3 authentication details, create the k8s Secret object following this -instruction: [SNMPv3 Configuration](snmpv3-configuration.md), and put its name in `poller.usernameSecrets`. +Secrets are required to run SNMPv3 polling. To add v3 authentication details, create the k8s Secret object: [SNMPv3 Configuration](snmpv3-configuration.md), and put its name in `poller.usernameSecrets`. ### Configure inventory -To update inventory, follow instruction: [Update Inventory and Profile](#update-inventory-and-profile). +To update inventory, see: [Update Inventory and Profile](#update-inventory-and-profile). -`inventory` section in `poller` has following fields to configure: +`inventory` section in `poller` has the following fields to configure: - `address` [REQUIRED] - IP address which SC4SNMP should connect to collect data from or name of the group of hosts. General information about groups can be found on [Configuring Groups](configuring-groups.md) page. @@ -71,10 +70,10 @@ Adding new devices for `values.yaml` is quite expensive from the Splunk Connect As it interacts with real, networking devices, it requires several checks before applying changes. SC4SNMP was designed to prevent changes in inventory task more often than every 5 min. -To apply inventory changes in `values.yaml`, following steps need to be executed: +To apply inventory changes in `values.yaml`, the following steps need to be executed: 1. Edit `values.yaml` -2. Check if inventory pod is still running by an execute command +2. Check if inventory pod is still running by the execute command: ```shell microk8s kubectl -n sc4snmp get pods | grep inventory @@ -83,7 +82,7 @@ microk8s kubectl -n sc4snmp get pods | grep inventory If the command does not return any pods, follow the next step. In another case, wait and execute the command again until the moment when inventory job finishes. -If you really need to apply changes immediately, you can get around the limitation by deleting inventory job, with: +If you really need to apply changes immediately, you can get around the limitation by deleting the inventory job with: ```shell microk8s kubectl delete job/snmp-splunk-connect-for-snmp-inventory -n sc4snmp @@ -97,5 +96,5 @@ After running this command, you can proceed with upgrading without a need to wai microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace ``` -NOTE: If you decide to change frequency of the profile without changing inventory data, the change will be reflected after -next walk process for the host. Walk happens every `walk_interval` or on any change in inventory. +NOTE: If you decide to change the frequency of the profile without changing the inventory data, the change will be reflected after +next the walk process for the host. The walk happens every `walk_interval`, or on any change in inventory. diff --git a/docs/configuration/redis-configuration.md b/docs/configuration/redis-configuration.md index e05570d1d..e66793e29 100644 --- a/docs/configuration/redis-configuration.md +++ b/docs/configuration/redis-configuration.md @@ -1,12 +1,12 @@ #Redis configuration -Recently, RabbitMQ was replaced with Redis as a queue service and periodic task database. The reason for that was to increase SC4SNMP performance and protect against bottlenecks. +Recently, RabbitMQ was replaced with Redis as a queue service and periodic task database. The reason for this is to increase SC4SNMP performance and protect against bottlenecks. -Redis is a service with is used for both managing periodic tasks and as a queue service for SC4SNMP. It is queuing tasks like SNMP Walk and Poll. +Redis both manages periodic tasks and queues the SC4SNMP service. It queues tasks like SNMP Walk and Poll. ### Redis configuration file -Redis configuration is kept in `values.yaml` file in section `redis`. +Redis configuration is kept in the `values.yaml` file in the `redis` section. `values.yaml` is being used during the installation process for configuring Kubernetes values. -In case of need to change it please follow documentation: [Redis on Kubernetes](https://github.com/bitnami/charts/tree/master/bitnami/redis) +To edit the configuration, see: [Redis on Kubernetes](https://github.com/bitnami/charts/tree/master/bitnami/redis) diff --git a/docs/configuration/sim-configuration.md b/docs/configuration/sim-configuration.md index 2d29cca92..2fa0b4b86 100644 --- a/docs/configuration/sim-configuration.md +++ b/docs/configuration/sim-configuration.md @@ -1,6 +1,6 @@ -# Otel configuration +# OTEL and Splunk Observability Cloud configuration -Splunk OpenTelemetry Collector is a component that provides an option to send metrics to SignalFx. +Splunk OpenTelemetry Collector is a component that provides an option to send metrics to Splunk Observability Cloud. In order to use it, you must set `enabled` flag in `values.yaml` to `true`: ```yaml @@ -9,7 +9,11 @@ sim: enabled: true ``` -Also, you need to specify SignalFx token and realm, so at the end sim element in `values.yaml` looks like this: +## Token and realm + +You need to specify Splunk Observability Cloud token and realm. There are two ways of configuring them: + +1. Pass those in a plain text via `values.yaml` so at the end sim element looks like this: ```yaml sim: @@ -18,6 +22,42 @@ sim: signalfxRealm: us0 ``` +2. Alternatively, create microk8s secret by yourself and pass its name in `values.yaml` file. Create secret: + +``` +microk8s kubectl create -n secret generic \ + --from-literal=signalfxToken= \ + --from-literal=signalfxRealm= +``` + +Modify `sim.secret` section of `values.yaml`. Disable creation of the secret with `sim.secret.create` and provide the +`` matching the one from the previous step. Pass it via `sim.secret.name`. For example, for ``=`signalfx` +the `sim` section would look like: + +```yaml +sim: + secret: + create: false + name: signalfx +``` + +Note: After the initial installation, if you change `sim.signalfxToken` and/or `sim.signalfxRealm` and no `sim.secret.name` is given, +the `sim` pod will sense the update by itself (after `helm3 upgrade` command) and trigger the recreation. But, when you edit secret created outside +of `values.yaml` (given by `sim.secret.name`), you need to roll out the deployment by yourself or delete the pod to update the data. + + +### Define annotations +In case you need to append some annotations to the `sim` service, you can do it by setting `sim.service.annotations`, for ex.: + +```yaml +sim: + service: + annotations: + annotation_key: annotation_value +``` + +## Verify the deployment + After executing `microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace `, the sim pod should be up and running: diff --git a/docs/configuration/snmpv3-configuration.md b/docs/configuration/snmpv3-configuration.md index d31d2faf6..6865f0a88 100644 --- a/docs/configuration/snmpv3-configuration.md +++ b/docs/configuration/snmpv3-configuration.md @@ -1,9 +1,9 @@ ### Create SNMP v3 users Configuration of SNMP v3, when supported by the monitored devices, is the most secure choice available -for authentication and data privacy. Each set of credentials will be stored as "Secret" objects in k8s -and will be referenced in the values.yaml. This allows the secret to being created once including automation -by third-party password managers then consumed without storing sensitive data in plain text. +for authentication and data privacy. Each set of credentials will be stored as "Secret" objects in k8s, +and will be referenced in values.yaml. This allows the secret to be created once, including automation +by third-party password managers, then consumed without storing sensitive data in plain text. ```bash # =Arbitrary name of the secret often the same as the username or prefixed with "sc4snmp-" @@ -21,5 +21,5 @@ microk8s kubectl create -n secret generic \ --from-literal=privProtocol= ``` -Configured credentials can be use in [poller](poller-configuration.md) and [trap](trap-configuration.md) services. -In services configuration, `secretname` needs to be provided. +Configured credentials can be used in [poller](poller-configuration.md) and [trap](trap-configuration.md) services. +In service configuration, `secretname` needs to be provided. diff --git a/docs/configuration/step-by-step-poll.md b/docs/configuration/step-by-step-poll.md index e65e78876..e27062d54 100644 --- a/docs/configuration/step-by-step-poll.md +++ b/docs/configuration/step-by-step-poll.md @@ -7,10 +7,10 @@ We have 4 hosts we want to poll from: 3. `10.202.4.203:161` 4. `10.202.4.204:163` -Let's say, that we're interested mostly in information about interfaces and some CPU related data. For this purposes, -we need to configure `IF-MIB` family for interfaces, and `UCD-SNMP-MIB` for the CPU. +Let's say that we're interested mostly in information about interfaces and some CPU related data. For this purposes, +we need to configure the `IF-MIB` family for interfaces, and `UCD-SNMP-MIB` for the CPU. -We'll do two things under `scheduler` section: define the group from which we want to poll, and the profile of what exactly will be polled: +We'll do two things under the `scheduler` section: define the group from which we want to poll, and the profile of what exactly will be polled: ```yaml scheduler: @@ -92,7 +92,7 @@ Successfully connected to http://snmp-mibserver/index.csv {"message": "New Record address='10.202.4.204' port=163 version='2c' community='public' secret=None security_engine=None walk_interval=2000 profiles=['switch_profile'] smart_profiles=True delete=False", "time": "2022-09-05T14:30:30.607641", "level": "INFO"} ``` -In some time (depending of how long does the walk takes), we'll see events under: +In some time (depending of how long the walk takes), we'll see events under: ```yaml | mpreview index=netmetrics | search profiles=switch_profile @@ -104,8 +104,8 @@ query in Splunk. When groups are used, we can also use querying by the group nam | mpreview index=netmetrics | search group=switch_group ``` -Keep in mind, that querying by profiles/group in Splunk is only possible in metrics index. Every piece of data being sent -by SC4SNMP is formed based on MIB file's definition of the SNMP object's index. The object is forwarded to an event index only if it doesn't have any metric value inside. +Keep in mind, that querying by profiles/group in Splunk is only possible in the metrics index. Every piece of data being sent +by SC4SNMP is formed based on the MIB file's definition of the SNMP object's index. The object is forwarded to an event index only if it doesn't have any metric value inside. The `raw` metrics in Splunk example is: @@ -150,4 +150,4 @@ or "profiles":"switch_profile", "metric_name:sc4snmp.UCD-SNMP-MIB.laIndex":1 } -``` \ No newline at end of file +``` diff --git a/docs/configuration/trap-configuration.md b/docs/configuration/trap-configuration.md index 9a088e5cb..0e1a18e43 100644 --- a/docs/configuration/trap-configuration.md +++ b/docs/configuration/trap-configuration.md @@ -3,8 +3,8 @@ A trap service is a simple server that can handle SNMP traps sent by SNMP device ### Trap configuration file -Trap configuration is kept in `values.yaml` file in section traps. -`values.yaml` is being used during the installation process for configuring Kubernetes values. +The trap configuration is kept in the `values.yaml` file in section traps. +`values.yaml` is used during the installation process for configuring Kubernetes values. Trap example configuration: ```yaml @@ -35,8 +35,8 @@ traps: ``` ### Define communities -`communities` define a version of SNMP protocol and SNMP community string which should be used. -`communities` key is split by protocol version, supported values are `1` and `2c`. Under `version` section, SNMP community string can be defined. +`communities` define a version of SNMP protocol and SNMP community string, which should be used. +`communities` key is split by protocol version, supported values are `1` and `2c`. Under the `version` section, SNMP community string can be defined. Example: ```yaml @@ -50,9 +50,9 @@ traps: ``` ### Configure user secrets for SNMPv3 -`usernameSecrets` key in the `traps` section define SNMPv3 secrets for trap messages sent by SNMP device. `usernameSecrets` define which secrets -in "Secret" objects in k8s should be used, as a value it needs to put the name of "Secret" objects. -More information on how to define the "Secret" object for SNMPv3 can be found in [SNMPv3 Configuration](snmpv3-configuration.md) +The `usernameSecrets` key in the `traps` section define SNMPv3 secrets for trap messages sent by SNMP device. `usernameSecrets` define which secrets +in "Secret" objects in k8s should be used, as a value it needs the name of "Secret" objects. +More information on how to define the "Secret" object for SNMPv3 can be found in [SNMPv3 Configuration](snmpv3-configuration.md). Example: ```yaml @@ -64,9 +64,9 @@ traps: ### Define security engines ID for SNMPv3 -SNMPv3 TRAPs mandate you configuring SNMP Engine ID of the TRAP sending application to USM users table of TRAP receiving -application for each USM user. It is usually unique per the device, and SC4SNMP as a trap receiver has to be aware of -which security engine ids to accept. Define all of them under `traps.securityEngineId` in `values.yaml`. +SNMPv3 TRAPs require the configuration SNMP Engine ID of the TRAP sending application for the USM users table of the TRAP receiving +application for each USM user. The SNMP Engine ID is usually unique for the device, and the SC4SNMP as a trap receiver has to be aware of +which security engine IDs to accept. Define all of them under `traps.securityEngineId` in `values.yaml`. By default, it is set to one-element list: `[80003a8c04]`. @@ -77,7 +77,7 @@ traps: - "80003a8c04" ``` -Security engine id is a substitute of `-e` variable in `snmptrap`. +Security engine ID is a substitute of the `-e` variable in `snmptrap`. An example of SNMPv3 trap is: ```yaml @@ -95,16 +95,26 @@ traps: ``` ### Define number of traps server replica -`replicaCount` Defines the number of replicas for trap container should be 2x number of nodes. The default value is `2`. +`replicaCount` defines that the number of replicas for trap container should be 2x number of nodes. The default value is `2`. Example: ```yaml traps: - #For production deployments the value should be 2x the number of nodes + #For production deployments the value should be at least 2x the number of nodes # Minimum 2 for a single node # Minimum 6 for multi-node HA replicaCount: 2 ``` ### Define log level -Log level for trap can be set by changing the value for key `logLevel`. Allowed values are: `DEBUG`, `INFO`, `WARNING`, `ERROR`. -The default value is `WARNING` \ No newline at end of file +The log level for trap can be set by changing the value for the `logLevel` key. The allowed values are: `DEBUG`, `INFO`, `WARNING`, `ERROR`. +The default value is `WARNING`. + +### Define annotations +In case you need to append some annotations to the `trap` service, you can do so by setting `traps.service.annotations`, for ex.: + +```yaml +traps: + service: + annotations: + annotation_key: annotation_value +``` diff --git a/docs/configuration/worker-configuration.md b/docs/configuration/worker-configuration.md index 8002992c3..0cd1c28a2 100644 --- a/docs/configuration/worker-configuration.md +++ b/docs/configuration/worker-configuration.md @@ -1,21 +1,21 @@ # Worker Configuration -The `worker` is a kubernetes pod which is responsible for the actual execution of polling, processing trap messages and sending +The `worker` is a kubernetes pod which is responsible for the actual execution of polling, processing trap messages, and sending data to Splunk. ### Worker types SC4SNMP has two base functionalities: monitoring traps and polling. These operations are handled by 3 types of workers: -1. `trap` worker consumes all the trap related tasks produced by the trap pod. +1. The `trap` worker consumes all the trap related tasks produced by the trap pod. -2. `poller` worker consumes all the tasks related to polling. +2. The `poller` worker consumes all the tasks related to polling. -3. `sender` worker handles sending data to splunk. You need to always have at least one sender pod running. +3. The `sender` worker handles sending data to Splunk. You need to always have at least one sender pod running. ### Worker configuration file -Worker configuration is kept in `values.yaml` file in the `worker` section. `worker` has 3 subsections: `poller`, `sender` or `trap`, that refer to the workers' types. -`values.yaml` is being used during the installation process for configuring Kubernetes values. +Worker configuration is kept in the `values.yaml` file in the `worker` section. `worker` has 3 subsections: `poller`, `sender`, or `trap`, that refer to the workers' types. +`values.yaml` is used during the installation process for configuring Kubernetes values. The `worker` default configuration is: ```yaml @@ -53,20 +53,18 @@ worker: logLevel: "INFO" ``` -All parameters are described in [Worker parameters](#worker-parameters) section. +All parameters are described in the [Worker parameters](#worker-parameters) section. ### Worker scaling -You can adjust number of worker pods to your needs in two ways: setting fixed value in `replicaCount` -or enabling `autoscaling` which scales pods automatically. +You can adjust worker pods in two ways: set fixed value in `replicaCount`, +or enable `autoscaling`, which scales pods automatically. -#### Real life scenario: I use SC4SNMP for only trap monitoring, I want to use my resources effectively +#### Real life scenario: I use SC4SNMP for only trap monitoring, I want to use my resources effectively. -If you don't use polling at all, would be the best to set `worker.poller.replicaCount` to `0`. -Remember, that if you'll want to use polling in the future you need to increase `replicaCount`, -otherwise it won't work. To monitor traps, adjust `worker.trap.replicaCount` depending on your needs -and `worker.sender.replicaCount` to send traps to splunk. Usually you need much less sender pods than trap ones. +If you don't use polling at all, set `worker.poller.replicaCount` to `0`. +If you'll want to use polling in the future, you need to increase `replicaCount`. To monitor traps, adjust `worker.trap.replicaCount` depending on your needs and `worker.sender.replicaCount` to send traps to Splunk. Usually you need much less sender pods than trap ones. This is the example of `values.yaml` without using autoscaling: @@ -102,9 +100,9 @@ worker: logLevel: "WARNING" ``` -In the example above both trap and sender pods are autoscaled. During an upgrade process -`minReplicas` number of pods is created, and then new ones are created only if CPU threshold -exceeds `targetCPUUtilizationPercentage` which by default is 80%. This solution helps you to keep +In the example above both trap and sender pods are autoscaled. During an upgrade process, the number of pods is created through +`minReplicas`, and then new ones are created only if the CPU threshold +exceeds the `targetCPUUtilizationPercentage`, which by default is 80%. This solution helps you to keep resources usage adjusted to what you actually need. After helm upgrade process, you will see `horizontalpodautoscaler` in `microk8s kubectl get all -n sc4snmp`: @@ -116,15 +114,15 @@ horizontalpodautoscaler.autoscaling/snmp-splunk-connect-for-snmp-worker-sender horizontalpodautoscaler.autoscaling/snmp-splunk-connect-for-snmp-worker-trap Deployment/snmp-splunk-connect-for-snmp-worker-trap 1%/80% 4 10 4 28m ``` -If you see `/80%` in `TARGETS` section instead of the CPU percentage, you probably don't have `metrics-server` addon enabled. -Enable it using: `microk8s enable metrics-server`. +If you see `/80%` in `TARGETS` section instead of the CPU percentage, you probably don't have the `metrics-server` add-on enabled. +Enable it using `microk8s enable metrics-server`. #### Real life scenario: I have a significant delay in polling Sometimes when polling is configured to be run frequently and on many devices, workers get overloaded -and there is a delay in delivering data to splunk. To avoid such situations we can scale poller and sender pods. -Because of the walk cycles (walk is a costly operation ran once for a while), poller workers require more resources +and there is a delay in delivering data to Splunk. To avoid such situations, we can scale poller and sender pods. +Because of the walk cycles (walk is a costly operation ran once in a while), poller workers require more resources for a short time. For this reason, enabling autoscaling is recommended. This is the example of `values.yaml` with autoscaling: @@ -153,7 +151,7 @@ worker: ``` Remember, that the system won't scale itself infinitely, there is a finite amount of resources that you can allocate. -By default, every worker has configured following resources: +By default, every worker has configured the following resources: ```yaml resources: @@ -167,12 +165,12 @@ By default, every worker has configured following resources: #### I have autoscaling enabled and experience problems with Mongo and Redis pod If MongoDB and Redis pods are crushing, and some of the pods are in infinite `Pending` state, that means -you're over your resources and SC4SNMP cannot scale more. You should decrease number of `maxReplicas` in -workers, so that it's not going beyond available CPU. +you're over your resources and SC4SNMP cannot scale more. You should decrease the number of `maxReplicas` in +workers, so that it's not going beyond the available CPU. #### I don't know how to set autoscaling parameters and how many replicas I need -The best way to see if pods are overloaded is to run command: +The best way to see if pods are overloaded is to run: ```yaml microk8s kubectl top pods -n sc4snmp @@ -226,4 +224,4 @@ Here you can read about Horizontal Autoscaling and how to adjust maximum replica | worker.sender.autoscaling.maxReplicas | maximum number of running sender worker pods when autoscaling is enabled | 40 | | worker.sender.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on sender worker pods to spawn another replica | 80 | | worker.sender.resources.limits | the resources limits for poller worker container | {} | -| worker.sender.resources.requests | the requested resources for poller worker container | {} | \ No newline at end of file +| worker.sender.resources.requests | the requested resources for poller worker container | {} | diff --git a/docs/gettingstarted/mk8s/k8s-microk8s.md b/docs/gettingstarted/mk8s/k8s-microk8s.md index f04874f8d..d7637e773 100644 --- a/docs/gettingstarted/mk8s/k8s-microk8s.md +++ b/docs/gettingstarted/mk8s/k8s-microk8s.md @@ -1,13 +1,15 @@ # Splunk Connect for SNMP using MicroK8s -Using this deployment option any Linux deployment of Microk8s can be used to support SC4SNMP given the following requirements are met. The minimum requirements below are suitable for proof of value and small installations, actual requirements will differ. +See the following requirements to use any Linux deployment of Microk8s to support SC4SMP. The minimum requirements below are suitable for proof of value and small installations, and actual requirements will differ. + +Single node minimum: -Single node minimum * 4 cores * 8 GB of memory per node * 50 GB mounted as / -Three node minimum per node +Three node minimum per node: + * 4 cores * 8 GB of memory per node * 50 GB mounted / @@ -23,14 +25,14 @@ may be found in the MicroK8s [documentation](https://microk8s.io/docs) including sudo snap install microk8s --classic ``` -Add user to the microk8s group to no longer have to use the `sudo` command +Add a user to the microk8s group so the `sudo` command is no longer necessary: ```bash sudo usermod -a -G microk8s $USER sudo chown -f -R $USER ~/.kube su - $USER ``` -Wait for Installation of Mk8S to complete +Wait for Installation of Mk8S to complete: ```bash microk8s status --wait-ready ``` @@ -38,17 +40,17 @@ microk8s status --wait-ready ## Add additional nodes (optional) * Repeat the steps above for each additional node (minimum total 3) -* On the first node issue the following, this will return joining instructions +* On the first node issue the following to return the instructions to join: ```bash microk8s add-node ``` -* On each additional node use the output from the command above +* On each additional node, use the output from the command above ## Install basic services required for sc4snmp -The following commands can be issued from any one node in a cluster +The following commands can be issued from any one node in a cluster: ```bash sudo systemctl enable iscsid @@ -59,8 +61,8 @@ microk8s enable metrics-server microk8s status --wait-ready ``` -Install the DNS server for mk8s and configure the forwarding DNS servers replace the IP addressed below (opendns) from -allowed values for your network +Install the DNS server for mk8s and configure the forwarding DNS servers. Replace the IP addressed below (opendns) with +allowed values for your network: ```bash microk8s enable dns:208.67.222.222,208.67.220.220 @@ -73,8 +75,8 @@ Note: when installing Metallb you will be prompted for one or more IPs to use as into the cluster. If your plan to enable clustering, this IP should not be assigned to the host (floats). If you do not plan to cluster, then this IP should be the IP of your host. -Note2: a single IP in cidr format is x.x.x.x/32 use CIDR or range syntax for single server installations this can be -the same as the primary ip. +Note2: a single IP in cidr format is x.x.x.x/32. Use CIDR or range syntax for single server installations. This can be +the same as the primary IP. ```bash microk8s enable metallb diff --git a/docs/gettingstarted/sc4snmp-installation.md b/docs/gettingstarted/sc4snmp-installation.md index 960f9b19e..556de7fc6 100644 --- a/docs/gettingstarted/sc4snmp-installation.md +++ b/docs/gettingstarted/sc4snmp-installation.md @@ -1,8 +1,8 @@ # SC4SNMP Helm installation -The basic installation process and configuration used in this section are typical -for single node non HA deployments and do not have resource requests and limits. -See the configuration sections for mongo, redis, scheduler, worker, and traps for guidance +The basic installation and configuration process discussed in this section is typical +for single node non-HA deployments. It does not have resource requests and limits. +See the mongo, redis, scheduler, worker, and traps configuration sections for guidance on production configuration. ### Offline installation @@ -112,11 +112,11 @@ mongodb: enabled: true ``` -`values.yaml` is being used during the installation process for configuring Kubernetes values. +`values.yaml` is used during the installation process for configuring Kubernetes values. ### Configure Splunk Enterprise or Splunk Cloud Connection -Splunk Enterprise or Splunk Cloud connection is enabled by default, to disable Splunk Enterprise or Splunk Cloud `splunk.enabled` property must be set to `false`. -Additionally, connection parameters for Splunk Enterprise or Splunk Cloud needs to be set in `splunk` section: +Splunk Enterprise or Splunk Cloud Connection is enabled by default. To disable Splunk Enterprise or Splunk Cloud `splunk.enabled` property, set it to `false`. +Additionally, the connection parameters for Splunk Enterprise or Splunk Cloud need to be set in the `splunk` section: | Placeholder | Description | Example | |---|---|---| @@ -136,9 +136,9 @@ Other optional variables can be configured: ### Configure Splunk Infrastructure Monitoring Connection -Splunk Infrastructure Monitoring is disabled by default, to enable Splunk Infrastructure Monitoring -`sim.enabled` property must be set to `true`. -Additionally, connection parameters for Splunk Infrastructure Monitoring need to be set in `sim` section: +Splunk Infrastructure Monitoring is disabled by default. To enable the Splunk Infrastructure Monitoring +`sim.enabled` property, set it to `true`. +Additionally, connection parameters for Splunk Infrastructure Monitoring need to be set in the `sim` section: | variable | description | default | | --- | --- | --- | @@ -184,7 +184,7 @@ snmp-splunk-connect-for-snmp-inventory-mjccw 0/1 Completed in snmp `netops` index. - Test the trap from a Linux system with SNMP installed. Replace the IP address - `10.0.101.22` with the shared IP address above + `10.0.101.22` with the shared IP address above. ``` bash apt update @@ -193,7 +193,7 @@ snmptrap -v2c -c public 10.0.101.22 123 1.3.6.1.2.1.1.4 1.3.6.1.2.1.1.4 s test ``` - Search Splunk: You should see one event per trap command with the host value of the - test machine IP address + test machine IP address. ``` bash index="netops" sourcetype="sc4snmp:traps" @@ -201,7 +201,7 @@ index="netops" sourcetype="sc4snmp:traps" ### Test SNMP Poller - Test the Poller by logging into Splunk and confirming the presence of events - in snmp `netops` and metrics in `netmetrics` index + in snmp `netops` and metrics in `netmetrics` index. - Test the trap from a Linux system install snmpd. @@ -210,16 +210,15 @@ apt update apt-get install snmpd ``` -- To test SNMP poller, snmpd needs to be configured to listen on the external IP. To enabled listening snmpd to external IP, -in configuration file: `/etc/snmp/snmpd.conf` replace the IP address `10.0.101.22` with the server IP address where snmpd is configured -`agentaddress 10.0.101.22,127.0.0.1,[::1]`. Restart snmpd by execute command: +- To test SNMP poller, snmpd needs to be configured to listen on the external IP. To enable listening snmpd to external IP, go to the `/etc/snmp/snmpd.conf` configuration file, and replace the IP address `10.0.101.22` with the server IP address where snmpd is configured. +`agentaddress 10.0.101.22,127.0.0.1,[::1]`. Restart snmpd through the execute command: ``` bash service snmpd stop service snmpd start ``` -- Configure SC4SNMP Poller to test add IP address which you want to poll. Add configuration entry in `values.yaml` file by -replacing the IP address `10.0.101.22` with the server IP address where snmpd was configured. +- Configure SC4SNMP Poller to test and add the IP address which you want to poll. Add the configuration entry into the `values.yaml` file by +replacing the IP address `10.0.101.22` with the server IP address where the snmpd was configured. ``` bash poller: inventory: | @@ -235,10 +234,10 @@ microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/spl - Check-in Splunk -Before polling starts, SC4SNMP must perform SNMP WALK process on the device. It is run first time after configuring the new device and then in every `walk_interval`. -Its purpose is to gather all the data and provide meaningful context for the polling records. May be, that your device is so big that walk takes too long and scope of walking must be limited. -In such cases, enable the small walk using the instruction: [walk takes too much time](../../bestpractices/#walking-a-device-takes-too-much-time). -When walk finishes, events appear in Splunk, check it with those queries: +Before polling starts, SC4SNMP must perform SNMP WALK process on the device. It is the run first time after configuring the new device, and then the run time in every `walk_interval`. +Its purpose is to gather all the data and provide meaningful context for the polling records. For example, it might report that your device is so large that the walk takes too long, so the scope of walking needs to be limited. +In such cases, enable the small walk. See: [walk takes too much time](../../bestpractices/#walking-a-device-takes-too-much-time). +When the walk finishes, events appear in Splunk. Confirm the walk with the following queries: ``` bash index="netops" sourcetype="sc4snmp:event" diff --git a/docs/gettingstarted/sck-installation.md b/docs/gettingstarted/sck-installation.md index 0c704c53d..4097a9624 100644 --- a/docs/gettingstarted/sck-installation.md +++ b/docs/gettingstarted/sck-installation.md @@ -1,12 +1,12 @@ # Splunk OpenTelemetry Collector for Kubernetes installation Splunk OpenTelemetry Collector for Kubernetes is not required for SC4SNMP installation. This is the tool that sends logs -and metrics from k8s cluster to Splunk instance, what makes SC4SNMP easier to debug. -You can do the same using `microk8s kubectl logs` command on instances you're interested in, but if you're not proficient in Kubernetes, +and metrics from a k8s cluster to a Splunk instance, which makes SC4SNMP easier to debug. +You can do the same using the `microk8s kubectl logs` command on instances you're interested in, but if you're not proficient in Kubernetes, Splunk OpenTelemetry Collector for Kubernetes is strongly advised. The below steps are sufficient for a Splunk OpenTelemetry Collector installation for the SC4SNMP project with Splunk Enterprise/Enterprise Cloud. -In order to learn more about Splunk OpenTelemetry Collector visit [Splunk OpenTelemetry Collector](https://github.com/signalfx/splunk-otel-collector-chart). +In order to learn more about Splunk OpenTelemetry Collector, visit [Splunk OpenTelemetry Collector](https://github.com/signalfx/splunk-otel-collector-chart). ### Offline installation @@ -59,7 +59,8 @@ microk8s helm3 upgrade --install sck \ ## Install Splunk OpenTelemetry Collector with HELM for Splunk Observability for Kubernetes -To run Splunk OpenTelemetry Collector on your environment, replace `<>` variables according to the description presented below +To run Splunk OpenTelemetry Collector on your environment, replace the `<>` variables according to the description presented below: + ```bash microk8s helm3 upgrade --install sck @@ -85,7 +86,7 @@ splunk-otel-collector-chart/splunk-otel-collector | ingest_url | Ingest URL from the Splunk Observability Cloud environment | https://ingest..signalfx.com | | api_url | API URL from the Splunk Observability Cloud environment | https://api..signalfx.com | -An example of filled up command is: +An example of a filled up command is: ```bash microk8s helm3 upgrade --install sck --set="clusterName=my_cluster" diff --git a/docs/gettingstarted/splunk-requirements.md b/docs/gettingstarted/splunk-requirements.md index e2f10b691..4d3b092bd 100644 --- a/docs/gettingstarted/splunk-requirements.md +++ b/docs/gettingstarted/splunk-requirements.md @@ -2,6 +2,8 @@ ## Prepare Splunk +See the following prerequisites for the Splunk Connect for SNMP. + ### Requirements (Splunk Enterprise/Enterprise Cloud) 1. Manually create the following indexes in Splunk: @@ -14,12 +16,11 @@ * netops (event type) Note: `netmetrics` and `netops` are the default names of SC4SNMP indexes. You can use the index names of your choice and -reference it in `values.yaml` file later on. -Parameters and the instruction on how to do it is here: [SC4SNMP Parameters](sc4snmp-installation.md#configure-splunk-enterprise-or-splunk-cloud-connection) +reference it in the `values.yaml` file later on. See parameters and instructions for details: [SC4SNMP Parameters](sc4snmp-installation.md#configure-splunk-enterprise-or-splunk-cloud-connection). 2. Create or obtain a new Splunk HTTP Event Collector token and the correct HTTPS endpoint. -3. Verify the token using [curl](https://docs.splunk.com/Documentation/Splunk/8.1.3/Data/FormateventsforHTTPEventCollector) Note: The endpoint must use a publicly trusted certificate authority. +3. Verify the token using [curl](https://docs.splunk.com/Documentation/Splunk/8.1.3/Data/FormateventsforHTTPEventCollector). Note: The endpoint must use a publicly trusted certificate authority. 4. The SHARED IP address to be used for SNMP Traps. Note Simple and POC deployments will use the same IP as the host server. If HA deployment will be used, the IP must be in addition to the management interface of each cluster member. 5. Obtain the IP address of an internal DNS server that can resolve the Splunk Endpoint. diff --git a/docs/ha.md b/docs/ha.md index af30e6bfe..1f46bf8a4 100644 --- a/docs/ha.md +++ b/docs/ha.md @@ -1,11 +1,11 @@ ## High Availability Considerations -The SNMP protocol uses UDP as the transport protocol and is subject to network reliability, as -a constraint. Network architecture should be considered when designing for high availability. +The SNMP protocol uses UDP as the transport protocol. Network reliability is a constraint. +Consider network architecture when designing for high availability: -* When using a single node collector ensure automatic recovery from virtual infrastructure i.e. VMware, Openstack, etc. -* When using a multi-node cluster ensure nodes are not located such that a simple majority of nodes can -be lost for example consider row, rack, network, power, storage -* When determining the placement of clusters the closest location by the number of network hops should be utilized. -* For "data center" applications collection should be local to the data center. -* Consider IP Anycast +* When using a single node collector, ensure automatic recovery from virtual infrastructure (i.e. VMware, Openstack, etc). +* When using a multi-node cluster, ensure nodes are not located such that a simple majority of nodes can +be lost. For example, consider row, rack, network, power, and storage. +* When determining the placement of clusters, the closest location by the number of network hops should be utilized. +* For "data center" applications, collection should be local to the data center. +* Consider IP Anycast. diff --git a/docs/index.md b/docs/index.md index 4b2f14ebe..3e64ee392 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,14 +1,13 @@ # Splunk Connect for SNMP Splunk welcomes your experimentation and feedback. Let your -account team knows you are testing Splunk Connect for SNMP. +account team know you are testing Splunk Connect for SNMP. Splunk Connect for SNMP is an edge-deployed, containerized, and highly available solution for collecting SNMP data for Splunk Enterprise, Splunk Enterprise Cloud and Splunk Infrastructure Monitoring. -SC4SNMP provides context-full information - not only forwards SNMP data to Splunk, but also combines -all of pieces into the meaningful objects. For example, you don't need to write queries in order to gather the information about +SC4SNMP provides context-full information. It not only forwards SNMP data to Splunk, but also integrates the data into meaningful objects. For example, you don't need to write queries in order to gather information about interfaces of the device, because SC4SNMP does that automatically: [![Interface metrics](images/interface_metrics.png)](images/interface_metrics.png) @@ -23,4 +22,4 @@ Here is a short presentation of how to browse SNMP data in Splunk: SC4SNMP can also easily monitor trap events sent by different SNMP devices. Trap events are JSON formatted, and are being stored under `netops` index. -[![Trap example](images/trap.png)](images/trap.png) \ No newline at end of file +[![Trap example](images/trap.png)](images/trap.png) diff --git a/docs/javascripts/footer.js b/docs/javascripts/footer.js new file mode 100644 index 000000000..b52917220 --- /dev/null +++ b/docs/javascripts/footer.js @@ -0,0 +1,16 @@ +function legalFooter() { + var copyright = document.createElement('div'); + copyright.classList.add('md-copyright__highlight'); + var content = document.createElement('p'); + content.style.textAlign = 'center'; + var termsLink = document.createElement('a'); + termsLink.href = 'https://github.com/splunk/splunk-connect-for-snmp/blob/main/LICENSE'; + termsLink.innerHTML = 'Apache 2.0'; + content.append('Splunk Documentation covered by: '); + content.append(termsLink); + + var endElement = document.getElementsByTagName('main')[0]; + endElement.insertAdjacentElement("afterend", content); +} + +legalFooter() \ No newline at end of file diff --git a/docs/offlineinstallation/offline-microk8s.md b/docs/offlineinstallation/offline-microk8s.md index bb0b30c7c..246a4df67 100644 --- a/docs/offlineinstallation/offline-microk8s.md +++ b/docs/offlineinstallation/offline-microk8s.md @@ -1,15 +1,15 @@ # Offline Microk8s installation issues Offline installation of Microk8s is described [here](https://microk8s.io/docs/install-alternatives#heading--offline), but -there are steps that you need to execute additionally in order to successfully install microk8s offline. +there are additional steps to install microk8s offline. ## Importing images After running: ``` -snap ack core_{microk8s_version}.assert -snap install core_{microk8s_version}.snap +snap ack microk8s_{microk8s_version}.assert +snap install microk8s_{microk8s_version}.snap --classic ``` You should check if the microk8s instance is healthy. Do it with: @@ -18,19 +18,50 @@ You should check if the microk8s instance is healthy. Do it with: microk8s kubectl get pods -A ``` -The output most probably will look like: +The output will probably look like: ``` NAMESPACE NAME READY STATUS RESTARTS AGE kube-system calico-kube-controllers-7c9c8dd885-fg8f2 0/1 Pending 0 14m kube-system calico-node-zg4c4 0/1 Init:0/3 0 23s ``` -The pods are in `Pending`/`Init` state because they're trying to download images, what is impossible to do offline. -In order to make them work you need to download all the images on a different server with an internet connection, pack it up and -import to microk8s image registry on your offline server. +The pods are in the `Pending`/`Init` state because they're trying to download images, which is impossible to do offline. +In order to make them work you need to download all the images on a different server with an internet connection, pack it up, and +import it to a microk8s image registry on your offline server. -Also, the addons we enable through `microk8s enable {addon}` needs some images to work. -For example, `microk8s` version `3597` requires this images to work correctly: +### Packing up images for offline environment + +You need to monitor + +```commandline +microk8s kubectl get events -A +``` + +to see if `microk8s` fails to pull images, and then import anything it needs. An example of such information is: + +```commandline +kube-system 0s Warning Failed pod/calico-node-sc784 Failed to pull image "docker.io/calico/cni:v3.21.4": rpc error: code = Unknown desc = failed to pull and unpack image "docker.io/calico/cni:v3.21.4": failed to resolve reference "docker.io/calico/cni:v3.21.4": failed to do request: Head "https://registry-1.docker.io/v2/calico/cni/manifests/v3.21.4": dial tcp 54.83.42.45:443: i/o timeout +kube-system 0s Warning Failed pod/calico-node-sc784 Error: ErrImagePull +``` + +This shows you that you lack a `docker.io/calico/cni:v3.21.4` image, and need to import it in order to fix the issue. + +The process of such action is always: + +```commandline +docker pull +docker save > image.tar +``` +Transfer package to the offline lab and execute: + +``` +microk8s ctr image import image.tar +``` + + +### Example of the offline installation + +For example, `microk8s` version `3597` requires these images to work correctly: ```commandline docker pull docker.io/calico/kube-controllers:v3.21.4 @@ -65,26 +96,12 @@ microk8s ctr image import pause.tar microk8s ctr image import metrics.tar ``` -NOTE: for other versions of `microk8s`, tags of images may differ. You need to monitor - -```commandline -microk8s kubectl get events -A -``` - -to see if `microk8s` fails to pull images, and then import anything it needs. An example of such information is: - -```commandline -kube-system 0s Warning Failed pod/calico-node-sc784 Failed to pull image "docker.io/calico/cni:v3.21.4": rpc error: code = Unknown desc = failed to pull and unpack image "docker.io/calico/cni:v3.21.4": failed to resolve reference "docker.io/calico/cni:v3.21.4": failed to do request: Head "https://registry-1.docker.io/v2/calico/cni/manifests/v3.21.4": dial tcp 54.83.42.45:443: i/o timeout -kube-system 0s Warning Failed pod/calico-node-sc784 Error: ErrImagePull -``` - -This shows you that you lack `docker.io/calico/cni:v3.21.4` image, and need to import it in order to fix the issue. - +NOTE: for other versions of `microk8s`, tags of images may differ. The healthy instance of microk8s, after running: ```commandline -microk8s enable storage +microk8s enable hostpath-storage microk8s enable rbac microk8s enable metrics-server ``` @@ -99,11 +116,22 @@ kube-system hostpath-provisioner-f57964d5f-zs4sj 1/1 Running kube-system metrics-server-5f8f64cb86-x7k29 1/1 Running 0 2m15s ``` +## Enabling DNS and Metallb + +The `dns` and `metallb` don't require importing any images, so you can enable them simply by: + +```yaml +microk8s enable dns +microk8s enable metallb +``` + +More on `metallb` [here](../gettingstarted/mk8s/k8s-microk8s.md#install-metallb). + ## Installing helm3 -The additional problem is the installation of `helm3` addon. You need to do a few things to make it work. +The additional problem is the installation of `helm3` add-on. You need to do a few things to make it work. -1. Check what is your server's platform with: +1. Check your server's platform with: ```commandline dpkg --print-architecture @@ -112,7 +140,7 @@ dpkg --print-architecture The output would be for ex.: `amd64`. You need the platform to download the correct version of helm. -2. Download the helm package from `https://get.helm.sh/helm-v3.8.0-linux-{{arch}}.tar.gz` where `{{arch}}` should be +2. Download the helm package from `https://get.helm.sh/helm-v3.8.0-linux-{{arch}}.tar.gz`, where `{{arch}}` should be replaced with the result from the previous command. Example: `https://get.helm.sh/helm-v3.8.0-linux-amd64.tar.gz` 3. Rename package to `helm.tar.gz` and send it to an offline lab. @@ -139,7 +167,9 @@ Save file. 6. Run `microk8s enable helm3` -7. Check if `helm3` was successfully installed with command: `microk8s status --wait-ready`. An example of +## Verify your instance + +Check if all the add-ons were installed successfully with command: `microk8s status --wait-ready`. An example of a correct output is: ```commandline @@ -149,22 +179,22 @@ high-availability: no datastore standby nodes: none addons: enabled: + dns # (core) CoreDNS ha-cluster # (core) Configure high availability on the current node helm3 # (core) Helm 3 - Kubernetes package manager hostpath-storage # (core) Storage class; allocates storage from host directory + metallb # (core) Loadbalancer for your Kubernetes cluster metrics-server # (core) K8s Metrics Server for API access to service metrics rbac # (core) Role-Based Access Control for authorisation storage # (core) Alias to hostpath-storage add-on, deprecated disabled: community # (core) The community addons repository dashboard # (core) The Kubernetes dashboard - dns # (core) CoreDNS gpu # (core) Automatic enablement of Nvidia CUDA helm # (core) Helm 2 - the package manager for Kubernetes host-access # (core) Allow Pods connecting to Host services smoothly ingress # (core) Ingress controller for external access mayastor # (core) OpenEBS MayaStor - metallb # (core) Loadbalancer for your Kubernetes cluster prometheus # (core) Prometheus operator for monitoring and logging registry # (core) Private image registry exposed on localhost:32000 -``` \ No newline at end of file +``` diff --git a/docs/offlineinstallation/offline-sc4snmp.md b/docs/offlineinstallation/offline-sc4snmp.md index 012cdf5da..b791fea16 100644 --- a/docs/offlineinstallation/offline-sc4snmp.md +++ b/docs/offlineinstallation/offline-sc4snmp.md @@ -1,17 +1,17 @@ # Offline SC4SNMP installation ## Local machine with internet access -To install SC4SNMP offline first some packages must be downloaded from github release and then moved -to the sc4snmp installation server. Those packages are: +To install the SC4SNMP offline, first, some packages must be downloaded from the Github release and then moved +to the SC4sNMP installation server. Those packages are: - `dependencies-images.tar` - `splunk-connect-for-snmp-chart.tar` -Moreover, SC4SNMP Docker image must be pulled, saved as `.tar` package and then moved to the server as well. -This process requires Docker installed locally. +Moreover, SC4SNMP Docker image must be pulled, saved as a `.tar` package, and then moved to the server as well. +This process requires Docker to be installed locally. -Image can be pulled from the following repository: `ghcr.io/splunk/splunk-connect-for-snmp/container:`. -The latest tag can be found [here](https://github.com/splunk/splunk-connect-for-snmp) under Releases section with label `latest`. +Images can be pulled from the following repository: `ghcr.io/splunk/splunk-connect-for-snmp/container:`. +The latest tag can be found [here](https://github.com/splunk/splunk-connect-for-snmp) under the Releases section with the label `latest`. Example of docker pull command: @@ -20,16 +20,16 @@ Example of docker pull command: docker pull ghcr.io/splunk/splunk-connect-for-snmp/container: ``` -Then save the image. Directory where this image will be saved can be specified after `>` sign: +Then save the image. Directory where this image will be saved can be specified after the `>` sign: ```bash docker save ghcr.io/splunk/splunk-connect-for-snmp/container: > snmp_image.tar ``` -All three packages `snmp_image.tar`, `dependencies-images.tar` and `splunk-connect-for-snmp-chart.tar` must be moved to the sc4snmp installation server. +All three packages, `snmp_image.tar`, `dependencies-images.tar`, and `splunk-connect-for-snmp-chart.tar`, must be moved to the SC4SNMP installation server. ## Installation on the server -On the server all the images must be imported to the microk8s cluster. This can be done with the following command: +On the server, all the images must be imported to the microk8s cluster. This can be done with the following command: ```bash microk8s ctr image import @@ -43,7 +43,7 @@ microk8s ctr image import snmp_image.tar ``` Then create `values.yaml`. It's a little different from `values.yaml` used in an online installation. -The difference are following lines added to prevent automatic image pulling: +The difference between the two files is the following, which is used for automatic image pulling: ```yaml image: @@ -60,6 +60,7 @@ splunk: insecureSSL: "false" port: "###SPLUNK_PORT###" image: + tag: ###TAG### pullPolicy: "Never" traps: communities: @@ -145,13 +146,17 @@ redis: pullPolicy: "Never" ``` -Next step is to unpack chart package `splunk-connect-for-snmp-chart.tar`. It will result in creating `splunk-connect-for-snmp` directory: +Fill `###` variables according to the description from [online installation](../gettingstarted/sc4snmp-installation.md#configure-splunk-enterprise-or-splunk-cloud-connection). + +Additionally, fill `###TAG###` with the same tag you used before to `docker pull` an SC4SNMP image. + +The next step is to unpack the chart package `splunk-connect-for-snmp-chart.tar`. It will result in creating the `splunk-connect-for-snmp` directory: ```bash tar -xvf splunk-connect-for-snmp-chart.tar --exclude='._*' ``` -Finally run helm install command in the directory where both `values.yaml` and `splunk-connect-for-snmp` directory are located: +Finally, run the helm install command in the directory where both the `values.yaml` and `splunk-connect-for-snmp` directories are located: ```bash microk8s helm3 install snmp -f values.yaml splunk-connect-for-snmp --namespace=sc4snmp --create-namespace diff --git a/docs/offlineinstallation/offline-sck.md b/docs/offlineinstallation/offline-sck.md index ee5d87075..bec45e4e2 100644 --- a/docs/offlineinstallation/offline-sck.md +++ b/docs/offlineinstallation/offline-sck.md @@ -2,11 +2,17 @@ ## Local machine with internet access -To install Splunk OpenTelemetry Collector offline first one must download packed chart `splunk-otel-collector-.tgz` -from github release where `` is the current OpenTelemetry release tag. This package must be later moved to the installation server. +To install Splunk OpenTelemetry Collector offline first one must download packed chart `splunk-otel-collector-.tgz` and the otel image `otel_image.tar` +from github release where `` is the current OpenTelemetry release tag. Both packages must be later moved to the installation server. ## Installation on the server +Otel image has to be imported to the `microk8s` registry with: + +```bash +microk8s ctr image import otel_image.tar +``` + Imported package must be unpacked with the following command : ```bash diff --git a/docs/planning.md b/docs/planning.md index f21ebd9b9..64cad2178 100644 --- a/docs/planning.md +++ b/docs/planning.md @@ -36,12 +36,12 @@ existing firewall. ## Planning Infrastructure A single installation of Splunk Connect for SNMP (SC4SNMP) on a machine with -16 Core/32 threads x64 and 64 GB ram will be able to handle up to 1500 -SNMP TRAPs per sec. +16 Core/32 threads x64 and 64 GB RAM will be able to handle up to 1500 +SNMP TRAPs per second. A single installation of Splunk Connect for SNMP (SC4SNMP) on a machine with -16 Core/32 threads x64 and 64 GB ram is able to handle up to 2750 SNMP varbinds per sec. -As for events per second visible in Splunk, please remember that single SC4SNMP event can contain more than one varbind inside - auto aggregation/grouping feature (varbinds which are describing same thing ie. network interface will be grouped in one event). -That is why, depending on configuration the number of events per second may vary. +16 Core/32 threads x64 and 64 GB RAM is able to handle up to 2750 SNMP varbinds per second. +As for events per second visible in Splunk, please remember that a single SC4SNMP event can contain more than one varbind inside - auto aggregation/grouping feature (varbinds which are describing same thing ie. network interface will be grouped in one event). +That is why, depending on configuration, the number of events per second may vary. When planning infrastructure for Splunk Connect for SNMP, (SC4SNMP) note the limitations highlighted above. diff --git a/docs/small-environment.md b/docs/small-environment.md index ed96ccc4a..1a1cb5031 100644 --- a/docs/small-environment.md +++ b/docs/small-environment.md @@ -1,9 +1,9 @@ # Lightweight SC4SNMP installation -SC4SNMP can be successfully installed in small environments with 2 CPUs and 4 Gb of memory. +SC4SNMP can be successfully installed in small environments with 2 CPUs and 4 GB of memory. One important thing to remember is that Splunk OpenTelemetry Collector for Kubernetes cannot be installed in such a small -environment along with SC4SNMP. The other difference from normal installation is that `resources` limits must be set for Kubernetes -pods. Example `values.yaml` with the appropriate resources below: +environment along with SC4SNMP. The other difference from normal installation is that the `resources` limits must be set for Kubernetes +pods. See the example of `values.yaml` with the appropriate resources below: ```yaml splunk: @@ -135,8 +135,8 @@ inventory: cpu: 20m ``` -The rest of the installation is the same as in [online](gettingstarted/sc4snmp-installation.md) or +The rest of the installation is the same as in [online](gettingstarted/sc4snmp-installation.md), or the [offline](offlineinstallation/offline-sc4snmp.md) installation. -Keep in mind, that lightweight instance of SC4SNMP won't be able to poll from many devices and may experience delays -in case of frequent polling. \ No newline at end of file +Keep in mind that a lightweight instance of SC4SNMP won't be able to poll from many devices and may experience delays +if there is frequent polling. diff --git a/docs/upgrade.md b/docs/upgrade.md index b602a9715..9beb4df3c 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -7,7 +7,7 @@ To upgrade SC4SNMP to the latest version, simply run the following command: microk8s helm3 repo update ``` -After that, next time you run: +Afterwards, run: ```yaml microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace @@ -53,4 +53,4 @@ For example: ```yaml microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace --version 1.6.3-beta.13 -``` \ No newline at end of file +``` diff --git a/mkdocs.yml b/mkdocs.yml index e4f53a8d4..b3b5244cf 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -16,6 +16,11 @@ plugins: - mkdocs-video: is_video: True +extra_javascript: + - javascripts/footer.js + +copyright:

Webpages built on GitHub Pages | Github Terms | GitHub Privacy

+ theme: name: "material" palette: diff --git a/pyproject.toml b/pyproject.toml index 1d37107d1..27d92bda1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "splunk-connect-for-snmp" -version = "1.8.1" +version = "1.8.2-beta.7" description = "" authors = ["omrozowicz-splunk "] license = "Apache-2.0" diff --git a/splunk_connect_for_snmp/__init__.py b/splunk_connect_for_snmp/__init__.py index 0563a3b4c..63887a9d8 100644 --- a/splunk_connect_for_snmp/__init__.py +++ b/splunk_connect_for_snmp/__init__.py @@ -15,4 +15,4 @@ # -__version__ = "1.8.1" +__version__ = "1.8.2-beta.7"