diff --git a/.github/workflows/ci-docker.yaml b/.github/workflows/ci-docker.yaml new file mode 100644 index 000000000..24fbb39a0 --- /dev/null +++ b/.github/workflows/ci-docker.yaml @@ -0,0 +1,29 @@ +name: create-compose-files +on: + push: + branches: + - fix/docker-compose + +jobs: + build: + name: Build + runs-on: ubuntu-latest + permissions: + contents: write + packages: write + strategy: + matrix: + python-version: + - 3.9 + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Zip docker_compose directory + run: zip -r docker_compose.zip docker_compose + - name: Create artifact + uses: actions/upload-artifact@v4 + with: + name: ${{ env.ARTIFACT_NAME }} + path: docker_compose.zip + env: + ARTIFACT_NAME: docker_compose diff --git a/.gitignore b/.gitignore index 7f783c156..43d5f3804 100644 --- a/.gitignore +++ b/.gitignore @@ -103,6 +103,7 @@ celerybeat.pid # Environments .env +!docker_compose/.env .venv env/ venv/ diff --git a/docker_compose/.env b/docker_compose/.env new file mode 100644 index 000000000..82cbd1e75 --- /dev/null +++ b/docker_compose/.env @@ -0,0 +1,66 @@ +# Deployment configuration +SC4SNMP_IMAGE=ghcr.io/splunk/splunk-connect-for-snmp/container +SC4SNMP_TAG=latest +SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH= +TRAPS_CONFIG_FILE_ABSOLUTE_PATH= +INVENTORY_FILE_ABSOLUTE_PATH= +COREFILE_ABS_PATH= +COREDNS_ADDRESS=172.28.0.255 +SC4SNMP_VERSION=1.10.0-beta.4 + +# Dependencies images +COREDNS_IMAGE=coredns/coredns +COREDNS_TAG=1.11.1 +MIBSERVER_IMAGE=ghcr.io/pysnmp/mibs/container +MIBSERVER_TAG=latest +REDIS_IMAGE=docker.io/bitnami/redis +REDIS_TAG=7.2.1-debian-11-r0 +MONGO_IMAGE=docker.io/bitnami/mongodb +MONGO_TAG=6.0.9-debian-11-r5 + +# Splunk instance configuration +SPLUNK_HEC_HOST= +SPLUNK_HEC_PROTOCOL=https +SPLUNK_HEC_PORT=8088 +SPLUNK_HEC_TOKEN= +SPLUNK_HEC_INSECURESSL=false +SPLUNK_SOURCETYPE_TRAPS=sc4snmp:traps +SPLUNK_SOURCETYPE_POLLING_EVENTS=sc4snmp:event +SPLUNK_SOURCETYPE_POLLING_METRICS=sc4snmp:metric +SPLUNK_HEC_INDEX_EVENTS=netops +SPLUNK_HEC_INDEX_METRICS=netmetrics +SPLUNK_HEC_PATH=/services/collector +SPLUNK_AGGREGATE_TRAPS_EVENTS=false +IGNORE_EMPTY_VARBINDS=false + +# Workers configration +WALK_RETRY_MAX_INTERVAL=180 +WALK_MAX_RETRIES=5 +METRICS_INDEXING_ENABLED=false +POLL_BASE_PROFILES=true +IGNORE_NOT_INCREASING_OIDS= +WORKER_LOG_LEVEL=INFO +UDP_CONNECTION_TIMEOUT=3 +MAX_OID_TO_PROCESS=70 +WORKER_POLLER_CONCURRENCY=4 +WORKER_SENDER_CONCURRENCY=4 +WORKER_TRAP_CONCURRENCY=4 +PREFETCH_POLLER_COUNT=1 +PREFETCH_SENDER_COUNT=30 +PREFETCH_TRAP_COUNT=30 +RESOLVE_TRAP_ADDRESS=false +MAX_DNS_CACHE_SIZE_TRAPS=500 +TTL_DNS_CACHE_TRAPS=1800 + +# Inventory configuration +INVENTORY_LOG_LEVEL=INFO +CHAIN_OF_TASKS_EXPIRY_TIME=500 + +# Traps configuration +SNMP_V3_SECURITY_ENGINE_ID=80003a8c04 +TRAPS_PORT=162 + +# Scheduler configuration +SCHEDULER_LOG_LEVEL=INFO + +#Secrets diff --git a/docker_compose/Corefile b/docker_compose/Corefile new file mode 100644 index 000000000..7ea43e1b2 --- /dev/null +++ b/docker_compose/Corefile @@ -0,0 +1,7 @@ +.:53 { + log + errors + auto + reload + forward . 8.8.8.8 +} \ No newline at end of file diff --git a/docker_compose/docker-compose-coredns.yaml b/docker_compose/docker-compose-coredns.yaml new file mode 100644 index 000000000..887991b76 --- /dev/null +++ b/docker_compose/docker-compose-coredns.yaml @@ -0,0 +1,15 @@ +version: '3.8' +services: + coredns: + image: ${COREDNS_IMAGE}:${COREDNS_TAG:-latest} + command: ["-conf", "/Corefile"] + container_name: coredns + restart: on-failure + expose: + - '53' + - '53/udp' + volumes: + - '${COREFILE_ABS_PATH}:/Corefile' + networks: + my_network: + ipv4_address: ${COREDNS_ADDRESS} diff --git a/docker_compose/docker-compose-dependencies.yaml b/docker_compose/docker-compose-dependencies.yaml new file mode 100644 index 000000000..2eebf6d16 --- /dev/null +++ b/docker_compose/docker-compose-dependencies.yaml @@ -0,0 +1,40 @@ +version: '3.8' +services: + snmp-mibserver: + image: ${MIBSERVER_IMAGE}:${MIBSERVER_TAG:-latest} + container_name: snmp-mibserver + environment: + - NGINX_ENTRYPOINT_QUIET_LOGS=${NGINX_ENTRYPOINT_QUIET_LOGS:-1} + volumes: + - snmp-mibserver-tmp:/tmp/ + depends_on: + - coredns + networks: + my_network: + dns: + - ${COREDNS_ADDRESS} + + redis: + image: ${REDIS_IMAGE}:${REDIS_TAG:-latest} + container_name: redis + restart: always + environment: + - ALLOW_EMPTY_PASSWORD=yes + depends_on: + - coredns + networks: + my_network: + dns: + - ${COREDNS_ADDRESS} + mongo: + image: ${MONGO_IMAGE}:${MONGO_TAG:-latest} + container_name: mongo + restart: always + depends_on: + - coredns + networks: + my_network: + dns: + - ${COREDNS_ADDRESS} +volumes: + snmp-mibserver-tmp: diff --git a/docker_compose/docker-compose-inventory.yaml b/docker_compose/docker-compose-inventory.yaml new file mode 100644 index 000000000..0c666f546 --- /dev/null +++ b/docker_compose/docker-compose-inventory.yaml @@ -0,0 +1,36 @@ +version: '3.8' +services: + inventory: + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + container_name: sc4snmp-inventory + command: ["inventory"] + environment: + - CONFIG_PATH=/app/config/config.yaml + - REDIS_URL=redis://redis:6379/1 + - CELERY_BROKER_URL=redis://redis:6379/0 + - MONGO_URI=mongodb://mongo:27017/ + - MIB_SOURCES=http://snmp-mibserver:8000/asn1/@mib@ + - MIB_INDEX=http://snmp-mibserver:8000/index.csv + - MIB_STANDARD=http://snmp-mibserver:8000/standard.txt + + # Inventory configuration + - LOG_LEVEL=${INVENTORY_LOG_LEVEL:-INFO} + - CHAIN_OF_TASKS_EXPIRY_TIME=${CHAIN_OF_TASKS_EXPIRY_TIME:-500} + - CONFIG_FROM_MONGO=${CONFIG_FROM_MONGO:-false} + depends_on: + - redis + - mongo + - coredns + volumes: + - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml + - ${INVENTORY_FILE_ABSOLUTE_PATH}:/app/inventory/inventory.csv + - inventory-pysnmp-cache-volume:/.pysnmp/ + - inventory-tmp:/tmp/ + restart: on-failure + networks: + my_network: + dns: + - ${COREDNS_ADDRESS} +volumes: + inventory-tmp: + inventory-pysnmp-cache-volume: diff --git a/docker_compose/docker-compose-network.yaml b/docker_compose/docker-compose-network.yaml new file mode 100644 index 000000000..ce09f5a6a --- /dev/null +++ b/docker_compose/docker-compose-network.yaml @@ -0,0 +1,7 @@ +version: '3.8' +networks: + my_network: + ipam: + driver: default + config: + - subnet: 172.28.0.0/16 \ No newline at end of file diff --git a/docker_compose/docker-compose-scheduler.yaml b/docker_compose/docker-compose-scheduler.yaml new file mode 100644 index 000000000..c2a951815 --- /dev/null +++ b/docker_compose/docker-compose-scheduler.yaml @@ -0,0 +1,34 @@ +version: '3.8' +services: + scheduler: + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + container_name: sc4snmp-scheduler + command: ["celery", "beat"] + environment: + - CONFIG_PATH=/app/config/config.yaml + - REDIS_URL=redis://redis:6379/1 + - CELERY_BROKER_URL=redis://redis:6379/0 + - MONGO_URI=mongodb://mongo:27017/ + - MIB_SOURCES=http://snmp-mibserver:8000/asn1/@mib@ + - MIB_INDEX=http://snmp-mibserver:8000/index.csv + - MIB_STANDARD=http://snmp-mibserver:8000/standard.txt + - INVENTORY_REFRESH_RATE=${INVENTORY_REFRESH_RATE:-600} + + # Scheduler configuration + - LOG_LEVEL=${SCHEDULER_LOG_LEVEL:-INFO} + depends_on: + - redis + - mongo + - coredns + volumes: + - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml + - scheduler-pysnmp-cache-volume:/.pysnmp/ + - scheduler-tmp:/tmp/ + restart: on-failure + networks: + my_network: + dns: + - ${COREDNS_ADDRESS} +volumes: + scheduler-tmp: + scheduler-pysnmp-cache-volume: \ No newline at end of file diff --git a/docker_compose/docker-compose-secrets.yaml b/docker_compose/docker-compose-secrets.yaml new file mode 100644 index 000000000..c1dae5281 --- /dev/null +++ b/docker_compose/docker-compose-secrets.yaml @@ -0,0 +1,2 @@ +secrets: {} +version: '3.8' diff --git a/docker_compose/docker-compose-traps.yaml b/docker_compose/docker-compose-traps.yaml new file mode 100644 index 000000000..4249f73aa --- /dev/null +++ b/docker_compose/docker-compose-traps.yaml @@ -0,0 +1,46 @@ +services: + traps: + command: + - trap + container_name: sc4snmp-traps + depends_on: + - redis + - mongo + - coredns + dns: + - ${COREDNS_ADDRESS} + environment: + - CONFIG_PATH=/app/config/config.yaml + - REDIS_URL=redis://redis:6379/1 + - CELERY_BROKER_URL=redis://redis:6379/0 + - MONGO_URI=mongodb://mongo:27017/ + - MIB_SOURCES=http://snmp-mibserver:8000/asn1/@mib@ + - MIB_INDEX=http://snmp-mibserver:8000/index.csv + - MIB_STANDARD=http://snmp-mibserver:8000/standard.txt + - LOG_LEVEL=${SCHEDULER_LOG_LEVEL:-INFO} + - INVENTORY_REFRESH_RATE=${INVENTORY_REFRESH_RATE:-600} + - SPLUNK_HEC_HOST=${SPLUNK_HEC_HOST} + - SPLUNK_HEC_SCHEME=${SPLUNK_HEC_PROTOCOL:-https} + - SPLUNK_HEC_PORT=${SPLUNK_HEC_PORT} + - SPLUNK_HEC_TOKEN=${SPLUNK_HEC_TOKEN} + - SPLUNK_HEC_INSECURESSL=${SPLUNK_HEC_INSECURESSL:-false} + - SPLUNK_HEC_PATH=${SPLUNK_HEC_PATH:-/services/collector} + - SNMP_V3_SECURITY_ENGINE_ID=${SNMP_V3_SECURITY_ENGINE_ID:-80003a8c04} + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + networks: + my_network: null + ports: + - mode: host + protocol: udp + published: ${TRAPS_PORT} + target: 2162 + restart: on-failure + secrets: [] + volumes: + - ${TRAPS_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml + - traps-pysnmp-cache-volume:/.pysnmp/ + - traps-tmp:/tmp/ +version: '3.8' +volumes: + traps-pysnmp-cache-volume: null + traps-tmp: null diff --git a/docker_compose/docker-compose-worker-poller.yaml b/docker_compose/docker-compose-worker-poller.yaml new file mode 100644 index 000000000..3bf4197da --- /dev/null +++ b/docker_compose/docker-compose-worker-poller.yaml @@ -0,0 +1,58 @@ +services: + worker-poller: + command: + - celery + - worker-poller + container_name: sc4snmp-worker-poller + depends_on: + - redis + - mongo + - coredns + dns: + - ${COREDNS_ADDRESS} + environment: + - CONFIG_PATH=/app/config/config.yaml + - REDIS_URL=redis://redis:6379/1 + - CELERY_BROKER_URL=redis://redis:6379/0 + - MONGO_URI=mongodb://mongo:27017/ + - SC4SNMP_VERSION=${SC4SNMP_VERSION:-0.0.0} + - MIB_SOURCES=http://snmp-mibserver:8000/asn1/@mib@ + - MIB_INDEX=http://snmp-mibserver:8000/index.csv + - MIB_STANDARD=http://snmp-mibserver:8000/standard.txt + - SPLUNK_HEC_HOST=${SPLUNK_HEC_HOST} + - SPLUNK_HEC_SCHEME=${SPLUNK_HEC_PROTOCOL:-https} + - SPLUNK_HEC_PORT=${SPLUNK_HEC_PORT} + - SPLUNK_HEC_TOKEN=${SPLUNK_HEC_TOKEN} + - SPLUNK_HEC_INSECURESSL=${SPLUNK_HEC_INSECURESSL:-false} + - SPLUNK_SOURCETYPE_TRAPS=${SPLUNK_SOURCETYPE_TRAPS:-sc4snmp:traps} + - SPLUNK_SOURCETYPE_POLLING_EVENTS=${SPLUNK_SOURCETYPE_POLLING_EVENTS:-sc4snmp:event} + - SPLUNK_SOURCETYPE_POLLING_METRICS=${SPLUNK_SOURCETYPE_POLLING_METRICS:-sc4snmp:metric} + - SPLUNK_HEC_INDEX_EVENTS=${SPLUNK_HEC_INDEX_EVENTS:-netops} + - SPLUNK_HEC_INDEX_METRICS=${SPLUNK_HEC_INDEX_METRICS:-netmetrics} + - SPLUNK_HEC_PATH=${SPLUNK_HEC_PATH:-/services/collector} + - SPLUNK_AGGREGATE_TRAPS_EVENTS=${SPLUNK_AGGREGATE_TRAPS_EVENTS:-false} + - IGNORE_EMPTY_VARBINDS=${IGNORE_EMPTY_VARBINDS:-false} + - WALK_RETRY_MAX_INTERVAL=${WALK_RETRY_MAX_INTERVAL:-180} + - WALK_MAX_RETRIES=${WALK_MAX_RETRIES:-5} + - METRICS_INDEXING_ENABLED=${METRICS_INDEXING_ENABLED:-false} + - POLL_BASE_PROFILES=${POLL_BASE_PROFILES:-true} + - IGNORE_NOT_INCREASING_OIDS=${IGNORE_NOT_INCREASING_OIDS:-} + - LOG_LEVEL=${WORKER_LOG_LEVEL:-INFO} + - UDP_CONNECTION_TIMEOUT=${UDP_CONNECTION_TIMEOUT:-3} + - MAX_OID_TO_PROCESS=${MAX_OID_TO_PROCESS:-70} + - PROFILES_RELOAD_DELAY=${PROFILES_RELOAD_DELAY:-60} + - WORKER_CONCURRENCY=${WORKER_POLLER_CONCURRENCY:-2} + - PREFETCH_COUNT=${PREFETCH_POLLER_COUNT:-1} + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + networks: + my_network: null + restart: on-failure + secrets: [] + volumes: + - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml + - worker-poller-pysnmp-cache-volume:/.pysnmp/ + - worker-poller-tmp:/tmp/ +version: '3.8' +volumes: + worker-poller-pysnmp-cache-volume: null + worker-poller-tmp: null diff --git a/docker_compose/docker-compose-worker-sender.yaml b/docker_compose/docker-compose-worker-sender.yaml new file mode 100644 index 000000000..dbb5f68a9 --- /dev/null +++ b/docker_compose/docker-compose-worker-sender.yaml @@ -0,0 +1,60 @@ +version: '3.8' +services: + worker-sender: + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + container_name: sc4snmp-worker-sender + command: ["celery", "worker-sender"] + environment: + - CONFIG_PATH=/app/config/config.yaml + - REDIS_URL=redis://redis:6379/1 + - CELERY_BROKER_URL=redis://redis:6379/0 + - MONGO_URI=mongodb://mongo:27017/ + - SC4SNMP_VERSION=${SC4SNMP_VERSION:-0.0.0} + - MIB_SOURCES=http://snmp-mibserver:8000/asn1/@mib@ + - MIB_INDEX=http://snmp-mibserver:8000/index.csv + - MIB_STANDARD=http://snmp-mibserver:8000/standard.txt + #- OTEL_METRICS_URL= #If sim enabled + + # Splunk instance configuration + - SPLUNK_HEC_HOST=${SPLUNK_HEC_HOST} + - SPLUNK_HEC_SCHEME=${SPLUNK_HEC_PROTOCOL:-https} + - SPLUNK_HEC_PORT=${SPLUNK_HEC_PORT} + - SPLUNK_HEC_TOKEN=${SPLUNK_HEC_TOKEN} + - SPLUNK_HEC_INSECURESSL=${SPLUNK_HEC_INSECURESSL:-false} + - SPLUNK_SOURCETYPE_TRAPS=${SPLUNK_SOURCETYPE_TRAPS:-sc4snmp:traps} + - SPLUNK_SOURCETYPE_POLLING_EVENTS=${SPLUNK_SOURCETYPE_POLLING_EVENTS:-sc4snmp:event} + - SPLUNK_SOURCETYPE_POLLING_METRICS=${SPLUNK_SOURCETYPE_POLLING_METRICS:-sc4snmp:metric} + - SPLUNK_HEC_INDEX_EVENTS=${SPLUNK_HEC_INDEX_EVENTS:-netops} + - SPLUNK_HEC_INDEX_METRICS=${SPLUNK_HEC_INDEX_METRICS:-netmetrics} + - SPLUNK_HEC_PATH=${SPLUNK_HEC_PATH:-/services/collector} + - SPLUNK_AGGREGATE_TRAPS_EVENTS=${SPLUNK_AGGREGATE_TRAPS_EVENTS:-false} + - IGNORE_EMPTY_VARBINDS=${IGNORE_EMPTY_VARBINDS:-false} + + # Workers configuration + - WALK_RETRY_MAX_INTERVAL=${WALK_RETRY_MAX_INTERVAL:-180} + - WALK_MAX_RETRIES=${WALK_MAX_RETRIES:-5} + - METRICS_INDEXING_ENABLED=${METRICS_INDEXING_ENABLED:-false} + - POLL_BASE_PROFILES=${POLL_BASE_PROFILES:-true} + - IGNORE_NOT_INCREASING_OIDS=${IGNORE_NOT_INCREASING_OIDS:-} + - LOG_LEVEL=${WORKER_LOG_LEVEL:-INFO} + - UDP_CONNECTION_TIMEOUT=${UDP_CONNECTION_TIMEOUT:-3} + - MAX_OID_TO_PROCESS=${MAX_OID_TO_PROCESS:-70} + - PROFILES_RELOAD_DELAY=${PROFILES_RELOAD_DELAY:-60} + - WORKER_CONCURRENCY=${WORKER_SENDER_CONCURRENCY:-2} + - PREFETCH_COUNT=${PREFETCH_SENDER_COUNT:-1} + depends_on: + - redis + - mongo + - coredns + volumes: + - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml + - worker-sender-pysnmp-cache-volume:/.pysnmp/ + - worker-sender-tmp:/tmp/ + restart: on-failure + networks: + my_network: + dns: + - ${COREDNS_ADDRESS} +volumes: + worker-sender-tmp: + worker-sender-pysnmp-cache-volume: \ No newline at end of file diff --git a/docker_compose/docker-compose-worker-trap.yaml b/docker_compose/docker-compose-worker-trap.yaml new file mode 100644 index 000000000..4d1089e57 --- /dev/null +++ b/docker_compose/docker-compose-worker-trap.yaml @@ -0,0 +1,63 @@ +version: '3.8' +services: + worker-trap: + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + container_name: sc4snmp-worker-trap + command: ["celery", "worker-trap"] + environment: + - CONFIG_PATH=/app/config/config.yaml + - REDIS_URL=redis://redis:6379/1 + - CELERY_BROKER_URL=redis://redis:6379/0 + - MONGO_URI=mongodb://mongo:27017/ + - SC4SNMP_VERSION=${SC4SNMP_VERSION:-0.0.0} + - MIB_SOURCES=http://snmp-mibserver:8000/asn1/@mib@ + - MIB_INDEX=http://snmp-mibserver:8000/index.csv + - MIB_STANDARD=http://snmp-mibserver:8000/standard.txt + #- OTEL_METRICS_URL= #If sim enabled + + # Splunk instance configuration + - SPLUNK_HEC_HOST=${SPLUNK_HEC_HOST} + - SPLUNK_HEC_SCHEME=${SPLUNK_HEC_PROTOCOL:-https} + - SPLUNK_HEC_PORT=${SPLUNK_HEC_PORT} + - SPLUNK_HEC_TOKEN=${SPLUNK_HEC_TOKEN} + - SPLUNK_HEC_INSECURESSL=${SPLUNK_HEC_INSECURESSL:-false} + - SPLUNK_SOURCETYPE_TRAPS=${SPLUNK_SOURCETYPE_TRAPS:-sc4snmp:traps} + - SPLUNK_SOURCETYPE_POLLING_EVENTS=${SPLUNK_SOURCETYPE_POLLING_EVENTS:-sc4snmp:event} + - SPLUNK_SOURCETYPE_POLLING_METRICS=${SPLUNK_SOURCETYPE_POLLING_METRICS:-sc4snmp:metric} + - SPLUNK_HEC_INDEX_EVENTS=${SPLUNK_HEC_INDEX_EVENTS:-netops} + - SPLUNK_HEC_INDEX_METRICS=${SPLUNK_HEC_INDEX_METRICS:-netmetrics} + - SPLUNK_HEC_PATH=${SPLUNK_HEC_PATH:-/services/collector} + - SPLUNK_AGGREGATE_TRAPS_EVENTS=${SPLUNK_AGGREGATE_TRAPS_EVENTS:-false} + - IGNORE_EMPTY_VARBINDS=${IGNORE_EMPTY_VARBINDS:-false} + + # Workers configuration + - WALK_RETRY_MAX_INTERVAL=${WALK_RETRY_MAX_INTERVAL:-180} + - WALK_MAX_RETRIES=${WALK_MAX_RETRIES:-5} + - METRICS_INDEXING_ENABLED=${METRICS_INDEXING_ENABLED:-false} + - POLL_BASE_PROFILES=${POLL_BASE_PROFILES:-true} + - IGNORE_NOT_INCREASING_OIDS=${IGNORE_NOT_INCREASING_OIDS:-} + - LOG_LEVEL=${WORKER_LOG_LEVEL:-INFO} + - UDP_CONNECTION_TIMEOUT=${UDP_CONNECTION_TIMEOUT:-3} + - MAX_OID_TO_PROCESS=${MAX_OID_TO_PROCESS:-70} + - PROFILES_RELOAD_DELAY=${PROFILES_RELOAD_DELAY:-60} + - WORKER_CONCURRENCY=${WORKER_TRAP_CONCURRENCY:-2} + - PREFETCH_COUNT=${PREFETCH_TRAP_COUNT:-1} + - RESOLVE_TRAP_ADDRESS=${RESOLVE_TRAP_ADDRESS:-false} + - MAX_DNS_CACHE_SIZE_TRAPS=${MAX_DNS_CACHE_SIZE_TRAPS:-500} + - TTL_DNS_CACHE_TRAPS=${TTL_DNS_CACHE_TRAPS:-1800} + depends_on: + - redis + - mongo + - coredns + volumes: + - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml + - worker-trap-pysnmp-cache-volume:/.pysnmp/ + - worker-trap-tmp:/tmp/ + restart: on-failure + networks: + my_network: + dns: + - ${COREDNS_ADDRESS} +volumes: + worker-trap-tmp: + worker-trap-pysnmp-cache-volume: \ No newline at end of file diff --git a/docker_compose/manage_secrets.py b/docker_compose/manage_secrets.py new file mode 100644 index 000000000..9ce469b13 --- /dev/null +++ b/docker_compose/manage_secrets.py @@ -0,0 +1,223 @@ +import argparse +import os +from typing import Union +import yaml + + +def human_bool(flag: Union[str, bool], default: bool = False) -> bool: + if flag is None: + return False + if isinstance(flag, bool): + return flag + if flag.lower() in [ + "true", + "1", + "t", + "y", + "yes", + ]: + return True + elif flag.lower() in [ + "false", + "0", + "f", + "n", + "no", + ]: + return False + else: + return default + + +def remove_variables_from_env(file_path, variables_to_remove): + try: + with open(file_path, 'r') as env_file: + lines = env_file.readlines() + + with open(file_path, 'w') as env_file: + for line in lines: + key = line.split('=')[0].strip() + if key not in variables_to_remove: + env_file.write(line) + + print("Variables removed successfully from .env file.") + except Exception as e: + print(f"Error: {e}") + + +def create_secrets(variables, path_to_compose_files, secret_name, worker_poller, traps): + for k, v in variables.items(): + if k != "contextEngineId" and not v: + raise ValueError(f"Value {k} is not set") + + new_secrets = [] + new_secrets_in_workers = [] + + for k, v in variables.items(): + if v: + new_secrets.append( + {"secret_name": f"{secret_name}_{k}", "secret_config": {"environment": f"{secret_name}_{k}"}}) + new_secrets_in_workers.append( + {"source": f"{secret_name}_{k}", "target": f"/app/secrets/snmpv3/{secret_name}/{k}"}) + + # Open secrets file + try: + with open(os.path.join(path_to_compose_files, "docker-compose-secrets.yaml"), 'r') as file: + secrets_file = yaml.load(file, Loader=yaml.FullLoader) + if secrets_file["secrets"] is None: + secrets_file["secrets"] = {} + for new_secret in new_secrets: + if new_secret["secret_name"] in secrets_file["secrets"]: + print(f"Secret {secret_name} already configured. New secret not added.") + return + secrets_file["secrets"][new_secret["secret_name"]] = new_secret["secret_config"] + secrets_file_ready = True + except: + print("Problem with editing docker-compose-secrets.yaml. Secret not added.") + secrets_file_ready = False + + if worker_poller: + try: + # Open poller configuration + with open(os.path.join(path_to_compose_files, "docker-compose-worker-poller.yaml"), 'r') as file: + worker_poller_file = yaml.load(file, Loader=yaml.FullLoader) + if 'secrets' not in worker_poller_file['services']['worker-poller']: + worker_poller_file['services']['worker-poller']["secrets"] = [] + worker_poller_file['services']['worker-poller']["secrets"].extend(new_secrets_in_workers) + worker_poller_file_ready = True + except: + print("Problem with editing docker-compose-worker-poller.yaml. Secret not added.") + worker_poller_file_ready = False + else: + worker_poller_file_ready = True + + if traps: + try: + # Open traps configuration + with open(os.path.join(path_to_compose_files, "docker-compose-traps.yaml"), 'r') as file: + traps_file = yaml.load(file, Loader=yaml.FullLoader) + if 'secrets' not in traps_file['services']['traps']: + traps_file['services']['traps']["secrets"] = [] + traps_file['services']['traps']["secrets"].extend(new_secrets_in_workers) + traps_file_ready = True + except: + print("Problem with editing docker-compose-traps.yaml. Secret not added.") + traps_file_ready = False + else: + traps_file_ready = True + + if secrets_file_ready and worker_poller_file_ready and traps_file_ready: + with open(os.path.join(path_to_compose_files, "docker-compose-secrets.yaml"), 'w') as file: + yaml.dump(secrets_file, file, default_flow_style=False) + with open(os.path.join(path_to_compose_files, ".env"), 'a') as file: + for k, v in variables.items(): + if v: + file.write(f'\n{secret_name}_{k}={v}') + if worker_poller: + with open(os.path.join(path_to_compose_files, "docker-compose-worker-poller.yaml"), 'w') as file: + yaml.dump(worker_poller_file, file, default_flow_style=False) + if traps: + with open(os.path.join(path_to_compose_files, "docker-compose-traps.yaml"), 'w') as file: + yaml.dump(traps_file, file, default_flow_style=False) + + +def delete_secrets(variables, path_to_compose_files, secret_name, worker_poller, traps): + secrets = [] + for key in variables.keys(): + secrets.append(f"{secret_name}_{key}") + + # Open secrets file + with open(os.path.join(path_to_compose_files, "docker-compose-secrets.yaml"), 'r') as file: + secrets_file = yaml.load(file, Loader=yaml.FullLoader) + for secret in secrets: + if secret in secrets_file["secrets"]: + del secrets_file["secrets"][secret] + with open(os.path.join(path_to_compose_files, "docker-compose-secrets.yaml"), 'w') as file: + yaml.dump(secrets_file, file, default_flow_style=False) + + # Delete secrets from .env + try: + with open(os.path.join(path_to_compose_files, ".env"), 'r') as env_file: + lines = env_file.readlines() + with open(os.path.join(path_to_compose_files, ".env"), 'w') as env_file: + lines_to_write = [] + for line in lines: + key = line.split('=')[0].strip() + if key not in secrets: + lines_to_write.append(line.strip()) + for i, line in enumerate(lines_to_write): + if i < len(lines_to_write) - 1: + env_file.write(f"{line}\n") + else: + env_file.write(line) + except Exception as e: + print(f"Error: {e}") + + if worker_poller: + # Open poller configuration + with open(os.path.join(path_to_compose_files, "docker-compose-worker-poller.yaml"), 'r') as file: + worker_poller_file = yaml.load(file, Loader=yaml.FullLoader) + worker_poller_file['services']['worker-poller']["secrets"] = list( + filter(lambda el: el["source"] not in secrets, + worker_poller_file['services']['worker-poller']["secrets"])) + with open(os.path.join(path_to_compose_files, "docker-compose-worker-poller.yaml"), 'w') as file: + yaml.dump(worker_poller_file, file, default_flow_style=False) + if traps: + # Open traps configuration + with open(os.path.join(path_to_compose_files, "docker-compose-traps.yaml"), 'r') as file: + traps_file = yaml.load(file, Loader=yaml.FullLoader) + traps_file['services']['traps']["secrets"] = list( + filter(lambda el: el["source"] not in secrets, + traps_file['services']['traps']["secrets"])) + with open(os.path.join(path_to_compose_files, "docker-compose-traps.yaml"), 'w') as file: + yaml.dump(traps_file, file, default_flow_style=False) + + +def main(): + parser = argparse.ArgumentParser(description='Manage secrets in docker compose') + parser.add_argument('--delete', default='false', help='If true, delete the secret') + parser.add_argument('--secret_name', help='Secret name') + parser.add_argument('--path_to_compose', help='Path to dockerfiles') + parser.add_argument('--worker_poller', default='true', help='Add secret to worker poller') + parser.add_argument('--traps', default='true', help='Add secret to traps') + parser.add_argument('--userName', default='', help='SNMPV3 username') + parser.add_argument('--privProtocol', default='', help='SNMPV3 privProtocol') + parser.add_argument('--privKey', default='', help='SNMPV3 privKey') + parser.add_argument('--authProtocol', default='', help='SNMPV3 authProtocol') + parser.add_argument('--authKey', default='', help='SNMPV3 authKey') + parser.add_argument('--contextEngineId', default='', help='SNMPV3 contextEngineId') + + args = parser.parse_args() + + delete_secret = human_bool(args.delete) + secret_name = args.secret_name + path_to_compose_files = args.path_to_compose + worker_poller = human_bool(args.worker_poller) + traps = human_bool(args.traps) + variables = { + "userName": args.userName, + "privProtocol": args.privProtocol, + "privKey": args.privKey, + "authProtocol": args.authProtocol, + "authKey": args.authKey, + "contextEngineId": args.contextEngineId + } + + if not os.path.exists(path_to_compose_files): + print("Path to compose files doesn't exist") + return + if not secret_name: + print("Secret name not specified") + return + + if not delete_secret: + try: + create_secrets(variables, path_to_compose_files, secret_name, worker_poller, traps) + except ValueError as e: + print(e) + else: + delete_secrets(variables, path_to_compose_files, secret_name, worker_poller, traps) + + +if __name__ == "__main__": + main() diff --git a/docs/dockercompose/1-install-docker.md b/docs/dockercompose/1-install-docker.md new file mode 100644 index 000000000..7951d433e --- /dev/null +++ b/docs/dockercompose/1-install-docker.md @@ -0,0 +1,5 @@ +# Install Docker + +To install `Docker` in your environment follow steps from the `Install using the apt repository` section from +the Docker [documentation](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository). Install the +latest version. \ No newline at end of file diff --git a/docs/dockercompose/2-download-package.md b/docs/dockercompose/2-download-package.md new file mode 100644 index 000000000..3b8dcb706 --- /dev/null +++ b/docs/dockercompose/2-download-package.md @@ -0,0 +1,35 @@ +# Download package with docker compose files + +## Downloading a package +Currently, docker compose installation is not released. Package with the necessary files can be downloaded form +the GitHub branch [fix/docker-compose](https://github.com/splunk/splunk-connect-for-snmp/pull/937/checks). After entering +this link, click the `create-compose-files` workflow which can be seen on the left side: + +![Workflows](../images/dockercompose/workflows.png){style="border:2px solid; width:500px; height:auto" } + +
+ +Then click `Summary` button on the left side. At the bottom of the page in the `Artifacts` section there will be +`docker_compose` package. Download it and unzip it in your environment. + +![Workflows](../images/dockercompose/artifact.png){style="border:2px solid; width:800px; height:auto" } + +
+ +## Deploy the app +After following the rest of the instructions with the configuration details, application can be deployed by running the +following command inside the `docker_compose` directory: + +```shell +sudo docker compose $(find docker* | sed -e 's/^/-f /') up -d +``` + +Every time any update is made to the configuration, the same command can be run to apply these changes. + +## Uninstall the app + +To uninstall the app, run the following command inside the `docker_compose` directory: + +```shell +sudo docker compose $(find docker* | sed -e 's/^/-f /') down +``` \ No newline at end of file diff --git a/docs/dockercompose/3-inventory-configuration.md b/docs/dockercompose/3-inventory-configuration.md new file mode 100644 index 000000000..6dc4cd5a3 --- /dev/null +++ b/docs/dockercompose/3-inventory-configuration.md @@ -0,0 +1,12 @@ +# Inventory configuration + +Inventory configuration is stored in the `inventory.csv` file. Structure of this file looks the same as the structure +of the `poller.inventory` section in `values.yaml` file. Documentation of this section can be found [here](../configuration/poller-configuration.md#configure-inventory). + +## Example of the configuration + +```csv +address,port,version,community,secret,securityEngine,walk_interval,profiles,smart_profiles,delete +0.0.0.0,161,2c,public,,,1800,small_walk;test_profile,t, +my_group,161,3,,my_secret,,1800,single_metric,t, +``` \ No newline at end of file diff --git a/docs/dockercompose/4-scheduler-configuration.md b/docs/dockercompose/4-scheduler-configuration.md new file mode 100644 index 000000000..42bc94be4 --- /dev/null +++ b/docs/dockercompose/4-scheduler-configuration.md @@ -0,0 +1,67 @@ +# Scheduler configuration + +Scheduler configuration is stored in the `schduler-config.yaml` file. This file has the following sections: + +```yaml +communities: + 2c: + public: + communityIndex: + contextEngineId: + contextName: + tag: + securityName: +customTranslations: +profiles: +groups: +``` + +- `communities`: communities used for version `1` and `2c` of the `snmp`. The default one is `public`. +- `customTranslations`: configuration of the custom translations. Configuration of this section looks the same as in the `values.yaml` in `scheduler.customTranslations` section, which can be checked in the documentation [here](../configuration/configuring-profiles.md#custom-translations). +- `profiles`: configuration of the profiles. Configuration of this section looks the same as in the `values.yaml` in `scheduler.profiles` section, which can be checked in the documentation [here](../configuration/configuring-profiles.md). +- `groups`: configuration of the groups. Configuration of this section looks the same as in the `values.yaml` in `scheduler.groups` section, which can be checked in the documentation [here](../configuration/configuring-groups.md). + +## Example of the configuration + +```yaml +communities: + 2c: + public: + communityIndex: + contextEngineId: + contextName: + tag: + securityName: +customTranslations: + IF-MIB: + ifInDiscards: myCustomName1 + ifOutErrors: myCustomName2 + SNMPv2-MIB: + sysDescr: myCustomName3 +profiles: + small_walk: + condition: + type: "walk" + varBinds: + - [ 'IP-MIB' ] + - [ 'IF-MIB' ] + - [ 'TCP-MIB' ] + - [ 'UDP-MIB' ] + multiple_conditions: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - [ 'IF-MIB', 'ifOutDiscards' ] +groups: + group1: + - address: 18.116.10.255 + port: 1163 +``` diff --git a/docs/dockercompose/5-traps-configuration.md b/docs/dockercompose/5-traps-configuration.md new file mode 100644 index 000000000..9ec5edf14 --- /dev/null +++ b/docs/dockercompose/5-traps-configuration.md @@ -0,0 +1,33 @@ +# Traps configuration + +Scheduler configuration is stored in the `traps-config.yaml` file. This file has the following sections: + +```yaml +communities: + 2c: + public: + communityIndex: + contextEngineId: + contextName: + tag: + securityName: +usernameSecrets: [] +``` + +- `communities`: communities used for version `1` and `2c` of the snmp. The default one is `public`. +- `usernameSecrets`: names of the secrets configured in docker used for `snmp v3` traps . + +## Example of the configuration + +```yaml +communities: + 2c: + public: + communityIndex: + contextEngineId: + contextName: + tag: + securityName: +usernameSecrets: + - my_secret +``` \ No newline at end of file diff --git a/docs/dockercompose/6-env-file-configuration.md b/docs/dockercompose/6-env-file-configuration.md new file mode 100644 index 000000000..ab80e80c9 --- /dev/null +++ b/docs/dockercompose/6-env-file-configuration.md @@ -0,0 +1,89 @@ +# .env file configuration + +Inside the directory with the docker compose files, there is a `.env`. Variables can be divided into few sections. + +## Deployment section + +| Variable | Description | +|---------------------------------------|------------------------------------------------------------------------------------------------------| +| `SC4SNMP_IMAGE` | The registry and name of the SC4SNMP image to pull | +| `SC4SNMP_TAG` | SC4SNMP image tag to pull | +| `SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH` | Absolute path to [schduler-config.yaml](./4-scheduler-configuration.md) file | +| `TRAPS_CONFIG_FILE_ABSOLUTE_PATH` | Absolute path to [traps-config.yaml](./5-traps-configuration.md) file | +| `INVENTORY_FILE_ABSOLUTE_PATH` | Absolute path to [inventory.csv](./3-inventory-configuration.md) file | +| `COREFILE_ABS_PATH` | Absolute path to Corefile used by coreDNS. Default Corefile can be found inside the `docker_compose` | +| `COREDNS_ADDRESS` | IP address of the coredns inside docker network. Shouldn’t be changed | +| `SC4SNMP_VERSION` | Version of SC4SNMP | + +## Dependencies images section + +| Variable | Description | +|-------------------|--------------------------------------| +| `COREDNS_IMAGE` | Registry and name of Coredns image | +| `COREDNS_TAG` | Coredns image tag to pull | +| `MIBSERVER_IMAGE` | Registry and name of Mibserver image | +| `MIBSERVER_TAG` | Mibserver image tag to pull | +| `REDIS_IMAGE` | Registry and name of Redis image | +| `REDIS_TAG` | Redis image tag to pull | +| `MONGO_IMAGE` | Registry and name of MongoDB image | +| `MONGO_TAG` | MongoDB image tag to pull | + +## Splunk instance section + +| Variable | Description | +|-------------------------------------|-------------------------------------------------------------------------------------------| +| `SPLUNK_HEC_HOST` | IP address or a domain name of a Splunk instance to send data to | +| `SPLUNK_HEC_PROTOCOL` | The protocol of the HEC endpoint: `https` or `http` | +| `SPLUNK_HEC_PORT` | The port of the HEC endpoint | +| `SPLUNK_HEC_TOKEN` | Splunk HTTP Event Collector token | +| `SPLUNK_HEC_INSECURESSL` | Whether to skip checking the certificate of the HEC endpoint when sending data over HTTPS | +| `SPLUNK_SOURCETYPE_TRAPS` | Splunk sourcetype for trap events | +| `SPLUNK_SOURCETYPE_POLLING_EVENTS` | Splunk sourcetype for non-metric polling events | +| `SPLUNK_SOURCETYPE_POLLING_METRICS` | Splunk sourcetype for metric polling events | +| `SPLUNK_HEC_INDEX_EVENTS` | Name of the Splunk event index | +| `SPLUNK_HEC_INDEX_METRICS` | Name of the Splunk metrics index | +| `SPLUNK_HEC_PATH` | Path for the HEC endpoint | +| `SPLUNK_AGGREGATE_TRAPS_EVENTS` | When set to true makes traps events collected as one event inside splunk | +| `IGNORE_EMPTY_VARBINDS` | Details can be found [here](../bestpractices.md#empty-snmp-response-message-problem) | + +## Workers section + +| Variable | Description | +|------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------| +| `WALK_RETRY_MAX_INTERVAL` | Maximum time interval between walk attempts | +| `WALK_MAX_RETRIES` | Maximum number of walk retries | +| `METRICS_INDEXING_ENABLED` | Details can be found [here](../configuration/poller-configuration.md#append-oid-index-part-to-the-metrics) | +| `POLL_BASE_PROFILES` | Enable polling base profiles (with IF-MIB and SNMPv2-MIB) | +| `IGNORE_NOT_INCREASING_OIDS` | Ignoring `occurred: OID not increasing` issues for hosts specified in the array, ex: IGNORE_NOT_INCREASING_OIDS=127.0.0.1:164,127.0.0.6 | +| `WORKER_LOG_LEVEL` | Logging level of the workers, possible options: DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL | +| `UDP_CONNECTION_TIMEOUT` | Timeout in seconds for SNMP operations | +| `MAX_OID_TO_PROCESS` | Sometimes SNMP Agent cannot accept more than X OIDs per once, so if the error "TooBig" is visible in logs, decrease the number of MAX_OID_TO_PROCESS | +| `WORKER_POLLER_CONCURRENCY` | Minimum number of threads in the poller container | +| `WORKER_SENDER_CONCURRENCY` | Minimum number of threads in the sender container | +| `WORKER_TRAP_CONCURRENCY` | Minimum number of threads in the trap container | +| `PREFETCH_POLLER_COUNT` | How many tasks are consumed from the queue at once in the poller container | +| `PREFETCH_SENDER_COUNT` | How many tasks are consumed from the queue at once in the sender container | +| `PREFETCH_TRAP_COUNT` | How many tasks are consumed from the queue at once in the trap container | +| `RESOLVE_TRAP_ADDRESS` | Use reverse dns lookup for trap IP address and send the hostname to Splunk | +| `MAX_DNS_CACHE_SIZE_TRAPS` | If RESOLVE_TRAP_ADDRESS is set to true, this is the maximum number of records in cache | +| `TTL_DNS_CACHE_TRAPS` | If RESOLVE_TRAP_ADDRESS is set to true, this is the time to live of the cached record in seconds | + +## Inventory section + +| Variable | Description | +|------------------------------|---------------------------------------------------------------------------------------------------| +| `INVENTORY_LOG_LEVEL` | Logging level of the inventory, possible options: DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL | +| `CHAIN_OF_TASKS_EXPIRY_TIME` | Tasks expirations time in seconds | + +## Traps section + +| Variable | Description | +|------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `SNMP_V3_SECURITY_ENGINE_ID` | SNMPv3 TRAPs require the configuration SNMP Engine ID of the TRAP sending application for the USM users table of the TRAP receiving application for each USM user, ex: SNMP_V3_SECURITY_ENGINE_ID=80003a8c04,aab123456 | +| `TRAPS_PORT` | External port exposed for traps server | + +## Scheduler section + +| Variable | Description | +|-----------------------|---------------------------------------------------------------------------------------------------| +| `SCHEDULER_LOG_LEVEL` | Logging level of the scheduler, possible options: DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL | \ No newline at end of file diff --git a/docs/dockercompose/7-snmpv3-secrets.md b/docs/dockercompose/7-snmpv3-secrets.md new file mode 100644 index 000000000..02fb35de9 --- /dev/null +++ b/docs/dockercompose/7-snmpv3-secrets.md @@ -0,0 +1,77 @@ +# SNMPv3 secrets + +Creating a secret requires updating configuration of several docker compose files. To simplify this process, inside the +`docker_compose` package there is a `manage_secrets.py` file which will automatically manage secrets. + +## Creating a new secret + +To create a new secret, `manage_secrets.py` must be run with the following flags: + +| Flag | Description | +|---------------------|--------------------------------------------------------------------------------| +| `--secret_name` | New secret name | +| `--path_to_compose` | Absolute path to directory with docker compose files | +| `--worker_poller` | \[OPTIONAL\] Add new secrets to worker poller. Default value is set to 'true'. | +| `--traps` | \[OPTIONAL\] Add new secrets to traps server. Default value is set to 'true'. | +| `--userName` | SNMPv3 userName | +| `--privProtocol` | SNMPv3 privProtocol | +| `--privKey` | SNMPv3 privKey | +| `--authProtocol` | SNMPv3 authProtocol | +| `--authKey` | SNMPv3 authKey | +| `--contextEngineId` | \[OPTIONAL\] SNMPv3 engine id | + + +This script, apart from updating configuration files, creates environmental variables with values of the secret at the +end of the `.env` file in the `docker_compose` directory. To apply these secrets run the +`sudo docker compose $(find docker* | sed -e 's/^/-f /') up -d` command inside the `docker_compose` directory. After running this command, plain text secrets +from the `.env` file can be deleted. One important thing is that if any change in `.env` is made, these secrets must be +recreated ([delete](#deleting-a-secret) an existing secret and create it once again). + +### Example of creating a secret: +```shell +python3 --path_to_compose \ +--secret_name my_secret \ +--userName r-wuser \ +--privProtocol AES \ +--privKey admin1234 \ +--authProtocol SHA \ +--authKey admin1234 \ +--contextEngineId 090807060504037 +``` + +Inside `docker_compose` directory run : + +```shell +sudo docker compose $(find docker* | sed -e 's/^/-f /') up -d +``` + +Now, the following lines from the `.env` can be deleted: + +```.env +my_secret_userName=r-wuser +my_secret_privProtocol=AES +my_secret_privKey=admin1234 +my_secret_authProtocol=SHA +my_secret_authKey=admin1234 +my_secret_contextEngineId=090807060504037 +``` + +## Deleting a secret + +To create a secret, `manage_secrets.py` must be run with the following flags: + +| Flag | Description | +|---------------------|------------------------------------------------------| +| `--secret_name` | Secret name | +| `--path_to_compose` | Absolute path to directory with docker compose files | +| `--delete` | Set this flag to true to delete the secret | + +This will delete the secret with a given name from all docker compose files. Also, if this secret hasn't been deleted +from `.env` file, it will be also deleted from there. + +### Example of deleting a secret: +```shell +python3 --path_to_compose \ +--secret_name my_secret \ +--delete true +``` \ No newline at end of file diff --git a/docs/dockercompose/8-offline-installation.md b/docs/dockercompose/8-offline-installation.md new file mode 100644 index 000000000..8886e8d70 --- /dev/null +++ b/docs/dockercompose/8-offline-installation.md @@ -0,0 +1,57 @@ +# Offline installation + +In order to install SC4SNMP using docker compose in the offline environment, several docker images must be imported to +docker. These images can be found in `.env` file: + +- `SC4SNMP_IMAGE` and `SC4SNMP_TAG` in `Deployment configuration` section +- `COREDNS_IMAGE` and `COREDNS_TAG` in `Dependencies images` section +- `MIBSERVER_IMAGE` and `MIBSERVER_TAG` in `Dependencies images` section +- `REDIS_IMAGE` and `REDIS_TAG` in `Dependencies images` section +- `MONGO_IMAGE` and `MONGO_TAG` in `Dependencies images` section + +These images must be downloaded in the online environment, saved to `.tar` archive and moved to the offline environment. + +## Steps to install necessary images + +Suppose that `.env` contains the following images: + +```.env +SC4SNMP_IMAGE=ghcr.io/splunk/splunk-connect-for-snmp/container +SC4SNMP_TAG=latest + +COREDNS_IMAGE=coredns/coredns +COREDNS_TAG=1.11.1 + +MIBSERVER_IMAGE=ghcr.io/pysnmp/mibs/container +MIBSERVER_TAG=latest + +REDIS_IMAGE=docker.io/bitnami/redis +REDIS_TAG=7.2.1-debian-11-r0 + +MONGO_IMAGE=docker.io/bitnami/mongodb +MONGO_TAG=6.0.9-debian-11-r5 +``` + +These images must be downloaded in the online environment: + +```shell +docker pull ghcr.io/splunk/splunk-connect-for-snmp/container:latest +docker pull coredns/coredns:1.11.1 +docker pull ghcr.io/pysnmp/mibs/container:latest +docker pull docker.io/bitnami/redis:7.2.1-debian-11-r0 +docker pull docker.io/bitnami/mongodb:6.0.9-debian-11-r5 +``` + +Next step is to save these images to `sc4snmp_offline_images.tar` archive: +```shell +docker save ghcr.io/splunk/splunk-connect-for-snmp/container:latest \ +coredns/coredns:1.11.1 \ +ghcr.io/pysnmp/mibs/container:latest \ +docker.io/bitnami/redis:7.2.1-debian-11-r0 \ +docker.io/bitnami/mongodb:6.0.9-debian-11-r5 > sc4snmp_offline_images.tar +``` + +After moving `sc4snmp_offline_images.tar` archive to the offline environment, images can be loaded to docker: +```shell +docker load --input sc4snmp_offline_images.tar +``` \ No newline at end of file diff --git a/docs/images/dockercompose/artifact.png b/docs/images/dockercompose/artifact.png new file mode 100644 index 000000000..06cde7e65 Binary files /dev/null and b/docs/images/dockercompose/artifact.png differ diff --git a/docs/images/dockercompose/workflows.png b/docs/images/dockercompose/workflows.png new file mode 100644 index 000000000..378cd1271 Binary files /dev/null and b/docs/images/dockercompose/workflows.png differ diff --git a/mkdocs.yml b/mkdocs.yml index acff02b53..78bffce69 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -44,7 +44,7 @@ nav: - Deployment: "configuration/deployment-configuration.md" - Polling: - Poller: "configuration/poller-configuration.md" - - Scheduler: "configuration/scheduler-configuration.md" + - Scheduler: "configuration/4-scheduler-configuration.md" - Configuring Profiles: "configuration/configuring-profiles.md" - Configuring Groups: "configuration/configuring-groups.md" - Step by Step polling example: "configuration/step-by-step-poll.md" @@ -66,6 +66,15 @@ nav: - Configuring Groups: "gui/groups-gui.md" - Configuring Inventory: "gui/inventory-gui.md" - Apply changes: "gui/apply-changes.md" + - Docker compose: + - Install Docker: "dockercompose/1-install-docker.md" + - Download package with docker compose files: "dockercompose/2-download-package.md" + - Inventory configuration: "dockercompose/3-inventory-configuration.md" + - Scheduler configuration: "dockercompose/4-scheduler-configuration.md" + - Traps configuration: "dockercompose/5-traps-configuration.md" + - .env file configuration: "dockercompose/6-env-file-configuration.md" + - SNMPv3 secrets configuration: "dockercompose/7-snmpv3-secrets.md" + - Offline installation: "dockercompose/8-offline-installation.md" - Lightweight installation: "small-environment.md" - Planning: "planning.md" - Security: "security.md"