diff --git a/cdk-lib/capture-stacks/capture-bucket-stack.ts b/cdk-lib/capture-stacks/capture-bucket-stack.ts index d281cb4..d6bc70c 100644 --- a/cdk-lib/capture-stacks/capture-bucket-stack.ts +++ b/cdk-lib/capture-stacks/capture-bucket-stack.ts @@ -4,7 +4,7 @@ import * as kms from 'aws-cdk-lib/aws-kms'; import * as s3 from 'aws-cdk-lib/aws-s3'; import * as ssm from 'aws-cdk-lib/aws-ssm'; -import * as plan from '../core/capacity-plan'; +import * as plan from '../core/context-types'; import { ProductStack } from 'aws-cdk-lib/aws-servicecatalog'; diff --git a/cdk-lib/capture-stacks/capture-nodes-stack.ts b/cdk-lib/capture-stacks/capture-nodes-stack.ts index aefa8c5..296c54e 100644 --- a/cdk-lib/capture-stacks/capture-nodes-stack.ts +++ b/cdk-lib/capture-stacks/capture-nodes-stack.ts @@ -17,11 +17,12 @@ import * as path from 'path' import { Construct } from 'constructs'; import * as constants from '../core/constants'; -import * as plan from '../core/capacity-plan'; +import * as plan from '../core/context-types'; import {ClusterSsmValue} from '../core/ssm-wrangling'; -import * as user from '../core/user-config'; +import * as types from '../core/context-types'; export interface CaptureNodesStackProps extends cdk.StackProps { + readonly arkimeFilesMap: types.ArkimeFilesMap; readonly captureBucket: s3.Bucket; readonly captureBucketKey: kms.Key; readonly captureVpc: ec2.Vpc; @@ -30,7 +31,7 @@ export interface CaptureNodesStackProps extends cdk.StackProps { readonly osPassword: secretsmanager.Secret; readonly planCluster: plan.ClusterPlan; readonly ssmParamNameCluster: string; - readonly userConfig: user.UserConfig; + readonly userConfig: types.UserConfig; } export class CaptureNodesStack extends cdk.Stack { @@ -141,6 +142,13 @@ export class CaptureNodesStack extends cdk.Stack { resources: [ksmEncryptionKey.keyArn] }), ); + taskDefinition.addToTaskRolePolicy( + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['ssm:GetParameter'], // Container pulls configuration from Parameter Store + resources: [`arn:aws:ssm:${this.region}:${this.account}:parameter*`] + }), + ); props.osPassword.grantRead(taskDefinition.taskRole); props.captureBucket.grantReadWrite(taskDefinition.taskRole); props.captureBucketKey.grantEncryptDecrypt(taskDefinition.taskRole); @@ -157,6 +165,8 @@ export class CaptureNodesStack extends cdk.Stack { image: ecs.ContainerImage.fromAsset(path.resolve(__dirname, '..', '..', 'docker-capture-node')), logging: new ecs.AwsLogDriver({ streamPrefix: 'CaptureNodes', mode: ecs.AwsLogDriverMode.NON_BLOCKING }), environment: { + 'ARKIME_CONFIG_INI_LOC': props.arkimeFilesMap.captureIniLoc, + 'ARKIME_ADD_FILE_LOCS': JSON.stringify(props.arkimeFilesMap.captureAddFileLocs), 'AWS_REGION': this.region, // Seems not to be defined in this container, strangely 'BUCKET_NAME': props.captureBucket.bucketName, 'CLUSTER_NAME': props.clusterName, diff --git a/cdk-lib/capture-stacks/capture-vpc-stack.ts b/cdk-lib/capture-stacks/capture-vpc-stack.ts index cbf4c54..b03078b 100644 --- a/cdk-lib/capture-stacks/capture-vpc-stack.ts +++ b/cdk-lib/capture-stacks/capture-vpc-stack.ts @@ -4,7 +4,7 @@ import * as ec2 from 'aws-cdk-lib/aws-ec2'; import * as logs from 'aws-cdk-lib/aws-logs'; import { Stack, StackProps } from 'aws-cdk-lib'; -import * as plan from '../core/capacity-plan'; +import * as plan from '../core/context-types'; export interface CaptureVpcStackProps extends StackProps { readonly planCluster: plan.ClusterPlan; diff --git a/cdk-lib/capture-stacks/opensearch-domain-stack.ts b/cdk-lib/capture-stacks/opensearch-domain-stack.ts index 23f29c0..b19ae76 100644 --- a/cdk-lib/capture-stacks/opensearch-domain-stack.ts +++ b/cdk-lib/capture-stacks/opensearch-domain-stack.ts @@ -7,7 +7,7 @@ import {Domain, EngineVersion, TLSSecurityPolicy} from 'aws-cdk-lib/aws-opensear import * as secretsmanager from 'aws-cdk-lib/aws-secretsmanager'; import * as ssm from 'aws-cdk-lib/aws-ssm'; -import * as plan from '../core/capacity-plan'; +import * as plan from '../core/context-types'; export interface OpenSearchDomainStackProps extends StackProps { diff --git a/cdk-lib/cloud-demo.ts b/cdk-lib/cloud-demo.ts index 0cf227a..6eecc86 100644 --- a/cdk-lib/cloud-demo.ts +++ b/cdk-lib/cloud-demo.ts @@ -44,6 +44,7 @@ switch(params.type) { const captureNodesStack = new CaptureNodesStack(app, params.nameCaptureNodesStack, { env: env, + arkimeFilesMap: params.arkimeFileMap, captureBucket: captureBucketStack.bucket, captureBucketKey: captureBucketStack.bucketKey, captureVpc: captureVpcStack.vpc, @@ -60,6 +61,7 @@ switch(params.type) { const viewerNodesStack = new ViewerNodesStack(app, params.nameViewerNodesStack, { env: env, + arkimeFilesMap: params.arkimeFileMap, arnViewerCert: params.nameViewerCertArn, captureBucket: captureBucketStack.bucket, viewerVpc: captureVpcStack.vpc, diff --git a/cdk-lib/core/command-params.ts b/cdk-lib/core/command-params.ts index 39a128b..dcd53f2 100644 --- a/cdk-lib/core/command-params.ts +++ b/cdk-lib/core/command-params.ts @@ -1,5 +1,4 @@ -import * as plan from './capacity-plan'; -import * as user from './user-config'; +import * as types from './context-types'; /** * Base type for receiving arguments from the Python side of the app. These directly match the interface on the Python @@ -13,6 +12,7 @@ export interface CommandParamsRaw { } */ export interface ClusterMgmtParamsRaw extends CommandParamsRaw { type: 'ClusterMgmtParamsRaw'; + arkimeFileMap: string; nameCluster: string; nameCaptureBucketStack: string; nameCaptureBucketSsmParam: string; @@ -78,6 +78,7 @@ export interface DestroyDemoTrafficParams extends CommandParams { */ export interface ClusterMgmtParams extends CommandParams { type: 'ClusterMgmtParams' + arkimeFileMap: types.ArkimeFilesMap; nameCluster: string; nameCaptureBucketStack: string; nameCaptureBucketSsmParam: string; @@ -91,8 +92,8 @@ export interface ClusterMgmtParams extends CommandParams { nameViewerPassSsmParam: string; nameViewerUserSsmParam: string; nameViewerNodesStack: string; - planCluster: plan.ClusterPlan; - userConfig: user.UserConfig; + planCluster: types.ClusterPlan; + userConfig: types.UserConfig; } /** diff --git a/cdk-lib/core/capacity-plan.ts b/cdk-lib/core/context-types.ts similarity index 75% rename from cdk-lib/core/capacity-plan.ts rename to cdk-lib/core/context-types.ts index 5089697..f3f1fb0 100644 --- a/cdk-lib/core/capacity-plan.ts +++ b/cdk-lib/core/context-types.ts @@ -66,3 +66,24 @@ export interface ClusterPlan { osDomain: OSDomainPlan; s3: S3Plan; } + +/** + * Structure to hold the user's input configuration + */ +export interface UserConfig { + expectedTraffic: number; + spiDays: number; + historyDays: number; + replicas: number; + pcapDays: number; +} + +/** + * Structure to hold the mapping of Arkime files to their path in the data store + */ +export interface ArkimeFilesMap { + captureIniLoc: string; + captureAddFileLocs: string[]; + viewerIniLoc: string; + viewerAddFileLocs: string[]; +} diff --git a/cdk-lib/core/context-wrangling.ts b/cdk-lib/core/context-wrangling.ts index f7734c3..2c58ba8 100644 --- a/cdk-lib/core/context-wrangling.ts +++ b/cdk-lib/core/context-wrangling.ts @@ -1,5 +1,4 @@ import * as cdk from 'aws-cdk-lib'; -import * as plan from './capacity-plan'; import * as prms from './command-params'; import {CDK_CONTEXT_CMD_VAR, CDK_CONTEXT_REGION_VAR, CDK_CONTEXT_PARAMS_VAR, ManagementCmd} from './constants'; @@ -86,6 +85,7 @@ function validateArgs(args: ValidateArgs) : (prms.ClusterMgmtParams | prms.Deplo type: 'ClusterMgmtParams', awsAccount: args.awsAccount, awsRegion: args.awsRegion, + arkimeFileMap: JSON.parse(rawClusterMgmtParamsObj.arkimeFileMap), nameCluster: rawClusterMgmtParamsObj.nameCluster, nameCaptureBucketStack: rawClusterMgmtParamsObj.nameCaptureBucketStack, nameCaptureBucketSsmParam: rawClusterMgmtParamsObj.nameCaptureBucketSsmParam, diff --git a/cdk-lib/core/ssm-wrangling.ts b/cdk-lib/core/ssm-wrangling.ts index 5ad0603..4489a43 100644 --- a/cdk-lib/core/ssm-wrangling.ts +++ b/cdk-lib/core/ssm-wrangling.ts @@ -1,4 +1,4 @@ -import * as plan from '../core/capacity-plan'; +import * as plan from './context-types'; import * as user from '../core/user-config'; /** diff --git a/cdk-lib/core/user-config.ts b/cdk-lib/core/user-config.ts deleted file mode 100644 index f49919c..0000000 --- a/cdk-lib/core/user-config.ts +++ /dev/null @@ -1,10 +0,0 @@ -/** - * Structure to hold the user's input configuration - */ -export interface UserConfig { - expectedTraffic: number; - spiDays: number; - historyDays: number; - replicas: number; - pcapDays: number; -} \ No newline at end of file diff --git a/cdk-lib/viewer-stacks/viewer-nodes-stack.ts b/cdk-lib/viewer-stacks/viewer-nodes-stack.ts index 44f8fff..245b5e7 100644 --- a/cdk-lib/viewer-stacks/viewer-nodes-stack.ts +++ b/cdk-lib/viewer-stacks/viewer-nodes-stack.ts @@ -11,8 +11,10 @@ import * as secretsmanager from 'aws-cdk-lib/aws-secretsmanager'; import * as ssm from 'aws-cdk-lib/aws-ssm'; import * as path from 'path' import { Construct } from 'constructs'; +import * as types from '../core/context-types'; export interface ViewerNodesStackProps extends cdk.StackProps { + readonly arkimeFilesMap: types.ArkimeFilesMap; readonly arnViewerCert: string; readonly captureBucket: s3.Bucket; readonly viewerVpc: ec2.Vpc; @@ -62,6 +64,13 @@ export class ViewerNodesStack extends cdk.Stack { resources: [ksmEncryptionKey.keyArn] }), ); + taskDefinition.addToTaskRolePolicy( + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['ssm:GetParameter'], // Container pulls configuration from Parameter Store + resources: [`arn:aws:ssm:${this.region}:${this.account}:parameter*`] + }), + ); props.osPassword.grantRead(taskDefinition.taskRole); props.captureBucket.grantRead(taskDefinition.taskRole); props.osDomain.grantReadWrite(taskDefinition.taskRole); @@ -73,6 +82,8 @@ export class ViewerNodesStack extends cdk.Stack { image: ecs.ContainerImage.fromAsset(path.resolve(__dirname, '..', '..', 'docker-viewer-node')), logging: new ecs.AwsLogDriver({ streamPrefix: 'ViewerNodes', mode: ecs.AwsLogDriverMode.NON_BLOCKING }), environment: { + 'ARKIME_CONFIG_INI_LOC': props.arkimeFilesMap.viewerIniLoc, + 'ARKIME_ADD_FILE_LOCS': JSON.stringify(props.arkimeFilesMap.viewerAddFileLocs), 'AWS_REGION': this.region, // Seems not to be defined in this container, strangely 'BUCKET_NAME': props.captureBucket.bucketName, 'CLUSTER_NAME': props.clusterName, diff --git a/docker-capture-node/Dockerfile b/docker-capture-node/Dockerfile index 1025b6b..fae0bc1 100644 --- a/docker-capture-node/Dockerfile +++ b/docker-capture-node/Dockerfile @@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND noninteractive # Get required and quality-of-life utilities RUN apt-get update && \ - apt-get install -y --no-install-recommends wget curl ca-certificates unzip less mandoc man-db vim libmagic1 iproute2 dnsutils net-tools tcpdump + apt-get install -y --no-install-recommends wget curl ca-certificates unzip less mandoc man-db vim libmagic1 iproute2 dnsutils net-tools tcpdump jq # Install the AWS CLI so we can pull passwords from AWS Secrets Manager at runtime RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \ @@ -20,8 +20,5 @@ RUN wget -O arkime.deb https://s3.amazonaws.com/files.molo.ch/arkime-main_ubuntu # Download the rir & oui files RUN /opt/arkime/bin/arkime_update_geo.sh -COPY ./arkime_config.ini /opt/arkime/etc/config.ini -COPY ./default.rules /opt/arkime/etc/default.rules - COPY ./run_capture_node.sh /run_capture_node.sh CMD ["/run_capture_node.sh"] diff --git a/docker-capture-node/run_capture_node.sh b/docker-capture-node/run_capture_node.sh index a9cb7e5..58596b1 100755 --- a/docker-capture-node/run_capture_node.sh +++ b/docker-capture-node/run_capture_node.sh @@ -5,6 +5,8 @@ set -e echo "============================================================" echo "Cluster: $CLUSTER_NAME" echo "Role: Capture Node" +echo "Arkime Config INI Datastore Location: $ARKIME_CONFIG_INI_LOC" +echo "Arkime Additional File Datastore Locations: $ARKIME_ADD_FILE_LOCS" echo "AWS Region: $AWS_REGION" echo "Bucket Name: $BUCKET_NAME" echo "LB Healthcheck Port: $LB_HEALTH_PORT" @@ -13,22 +15,52 @@ echo "OpenSearch Secret Arn: $OPENSEARCH_SECRET_ARN" echo "S3 Storage Class: $S3_STORAGE_CLASS" echo "============================================================" +# Pull our configuration files from the cloud +function write_file_from_datastore() { + datastore_location=$1 + + # Retrieve our file from the cloud and account for wacky escaping + param_val=$(aws ssm get-parameter --name "$datastore_location" --query Parameter.Value) + corrected_string=$(echo "$param_val" | sed 's/\\\"/\"/g' | sed 's/\\\\/\\/g') # Remove extra escaping + corrected_string=$(echo "$corrected_string" | sed 's/^"//' | sed 's/"$//') # Remove starting/ending quotes + + # Pull out the values we need + system_path=$(echo "$corrected_string" | jq -r '.system_path') + echo "System Path: $system_path" >&2 + contents=$(echo "$corrected_string" | jq -r '.contents') + + # Write the file to disk + echo -e "$contents" > "$system_path" + + # Return the path to the calling context + echo "$system_path" +} + +echo "$ARKIME_ADD_FILE_LOCS" | jq -r '.[]' | while IFS= read -r path; do + echo "Processing File in Datastore: $path" + full_file_path=$(write_file_from_datastore "$path") + echo "Written to: $full_file_path" +done + +echo "Processing config.ini in Datastore: $ARKIME_CONFIG_INI_LOC" +config_ini_path=$(write_file_from_datastore "$ARKIME_CONFIG_INI_LOC") +echo "Written to: $config_ini_path" + # Pull configuration from ENV and AWS in order to set up our Arkime install. The ENV variables come from the Fargate # Container definition. We perform some escaping of the our replacement strings for safety. # See: https://stackoverflow.com/questions/407523/escape-a-string-for-a-sed-replace-pattern -echo "Configuring /opt/arkime/etc/config.ini ..." +echo "Configuring $config_ini_path ..." ESCAPED_ENDPOINT=$(printf '%s\n' "$OPENSEARCH_ENDPOINT" | sed -e 's/[\/&]/\\&/g') -sed -i'' "s/_ENDPOINT_/$ESCAPED_ENDPOINT/g" /opt/arkime/etc/config.ini +sed -i'' "s/_OS_ENDPOINT_/$ESCAPED_ENDPOINT/g" "$config_ini_path" OPENSEARCH_PASS=$(aws secretsmanager get-secret-value --secret-id $OPENSEARCH_SECRET_ARN --output text --query SecretString) BASE64_AUTH=$(echo -n "admin:$OPENSEARCH_PASS" | base64) -sed -i'' "s/_AUTH_/$BASE64_AUTH/g" /opt/arkime/etc/config.ini +sed -i'' "s/_OS_AUTH_/$BASE64_AUTH/g" "$config_ini_path" -sed -i'' "s/_BUCKET_/$BUCKET_NAME/g" /opt/arkime/etc/config.ini -sed -i'' "s/_HEALTH_PORT_/$LB_HEALTH_PORT/g" /opt/arkime/etc/config.ini -sed -i'' "s/_REGION_/$AWS_REGION/g" /opt/arkime/etc/config.ini -sed -i'' "s/_STORAGE_CLASS_/$S3_STORAGE_CLASS/g" /opt/arkime/etc/config.ini -echo "Successfully configured /opt/arkime/etc/config.ini" +sed -i'' "s/_PCAP_BUCKET_/$BUCKET_NAME/g" "$config_ini_path" +sed -i'' "s/_HEALTH_PORT_/$LB_HEALTH_PORT/g" "$config_ini_path" +sed -i'' "s/_AWS_REGION_/$AWS_REGION/g" "$config_ini_path" +echo "Successfully configured $config_ini_path" echo "Testing connection/creds to OpenSearch domain $OPENSEARCH_ENDPOINT ..." curl -u admin:$OPENSEARCH_PASS -X GET https://$OPENSEARCH_ENDPOINT:443 @@ -46,4 +78,4 @@ chown nobody /opt/arkime/raw # Unneeded when using S3 offload # Start Arkime Capture echo "Running Arkime Capture process ..." -/opt/arkime/bin/capture +/opt/arkime/bin/capture --config "$config_ini_path" diff --git a/docker-viewer-node/Dockerfile b/docker-viewer-node/Dockerfile index 9736043..5d953c8 100644 --- a/docker-viewer-node/Dockerfile +++ b/docker-viewer-node/Dockerfile @@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND noninteractive # Get required and quality-of-life utilities RUN apt-get update && \ - apt-get install -y --no-install-recommends wget curl ca-certificates unzip less mandoc man-db vim libmagic1 iproute2 dnsutils net-tools tcpdump + apt-get install -y --no-install-recommends wget curl ca-certificates unzip less mandoc man-db vim libmagic1 iproute2 dnsutils net-tools tcpdump jq # Install the AWS CLI so we can pull the OpenSearch Domain password from AWS Secrets Manager at runtime RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \ @@ -16,7 +16,6 @@ RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2 RUN wget -O arkime.deb https://s3.amazonaws.com/files.molo.ch/arkime-main_ubuntu20_amd64.deb && \ apt-get install -y ./arkime.deb && \ rm -rf ./arkime.deb -COPY ./arkime_config.ini /opt/arkime/etc/config.ini COPY ./run_viewer_node.sh /run_viewer_node.sh CMD ["/run_viewer_node.sh"] diff --git a/docker-viewer-node/run_viewer_node.sh b/docker-viewer-node/run_viewer_node.sh index 6ca467a..bc20d35 100755 --- a/docker-viewer-node/run_viewer_node.sh +++ b/docker-viewer-node/run_viewer_node.sh @@ -5,6 +5,8 @@ set -e echo "============================================================" echo "Cluster: $CLUSTER_NAME" echo "Role: Viewer Node" +echo "Arkime Config INI Datastore Location: $ARKIME_CONFIG_INI_LOC" +echo "Arkime Additional File Datastore Locations: $ARKIME_ADD_FILE_LOCS" echo "AWS Region: $AWS_REGION" echo "Bucket Name: $BUCKET_NAME" echo "OpenSearch Endpoint: $OPENSEARCH_ENDPOINT" @@ -14,21 +16,52 @@ echo "Viewer Password Secret Arn: $VIEWER_PASS_ARN" echo "Viewer User: $VIEWER_USER" echo "============================================================" +# Pull our configuration files from the cloud +function write_file_from_datastore() { + datastore_location=$1 + + # Retrieve our file from the cloud and account for wacky escaping + param_val=$(aws ssm get-parameter --name "$datastore_location" --query Parameter.Value) + corrected_string=$(echo "$param_val" | sed 's/\\\"/\"/g' | sed 's/\\\\/\\/g') # Remove extra escaping + corrected_string=$(echo "$corrected_string" | sed 's/^"//' | sed 's/"$//') # Remove starting/ending quotes + + # Pull out the values we need + system_path=$(echo "$corrected_string" | jq -r '.system_path') + echo "System Path: $system_path" >&2 + contents=$(echo "$corrected_string" | jq -r '.contents') + + # Write the file to disk + echo -e "$contents" > "$system_path" + + # Return the path to the calling context + echo "$system_path" +} + +echo "$ARKIME_ADD_FILE_LOCS" | jq -r '.[]' | while IFS= read -r path; do + echo "Processing File in Datastore: $path" + full_file_path=$(write_file_from_datastore "$path") + echo "Written to: $full_file_path" +done + +echo "Processing config.ini in Datastore: $ARKIME_CONFIG_INI_LOC" +config_ini_path=$(write_file_from_datastore "$ARKIME_CONFIG_INI_LOC") +echo "Written to: $config_ini_path" + # Pull configuration from ENV and AWS in order to set up our Arkime install. The ENV variables come from the Fargate # Container definition. We perform some escaping of the our replacement strings for safety. # See: https://stackoverflow.com/questions/407523/escape-a-string-for-a-sed-replace-pattern -echo "Configuring /opt/arkime/etc/config.ini ..." +echo "Configuring $config_ini_path ..." ESCAPED_ENDPOINT=$(printf '%s\n' "$OPENSEARCH_ENDPOINT" | sed -e 's/[\/&]/\\&/g') -sed -i'' "s/_ENDPOINT_/$ESCAPED_ENDPOINT/g" /opt/arkime/etc/config.ini +sed -i'' "s/_OS_ENDPOINT_/$ESCAPED_ENDPOINT/g" "$config_ini_path" OPENSEARCH_PASS=$(aws secretsmanager get-secret-value --secret-id $OPENSEARCH_SECRET_ARN --output text --query SecretString) BASE64_AUTH=$(echo -n "admin:$OPENSEARCH_PASS" | base64) -sed -i'' "s/_AUTH_/$BASE64_AUTH/g" /opt/arkime/etc/config.ini +sed -i'' "s/_OS_AUTH_/$BASE64_AUTH/g" "$config_ini_path" VIEWER_PASS=$(aws secretsmanager get-secret-value --secret-id $VIEWER_PASS_ARN --output text --query SecretString) -sed -i'' "s/_VIEW_PORT_/$VIEWER_PORT/g" /opt/arkime/etc/config.ini -echo "Successfully configured /opt/arkime/etc/config.ini" +sed -i'' "s/_VIEWER_PORT_/$VIEWER_PORT/g" "$config_ini_path" +echo "Successfully configured $config_ini_path" echo "Testing connection/creds to OpenSearch domain $OPENSEARCH_ENDPOINT ..." curl -u admin:$OPENSEARCH_PASS -X GET https://$OPENSEARCH_ENDPOINT:443 @@ -42,4 +75,4 @@ mkdir -p /opt/arkime/raw echo "Running Arkime Viewer process ..." cd /opt/arkime/viewer /opt/arkime/bin/node addUser.js $VIEWER_USER $VIEWER_USER $VIEWER_PASS --admin --packetSearch -/opt/arkime/bin/node viewer.js \ No newline at end of file +/opt/arkime/bin/node viewer.js -c "$config_ini_path" \ No newline at end of file diff --git a/manage_arkime/arkime_interactions/arkime_files.py b/manage_arkime/arkime_interactions/arkime_files.py new file mode 100644 index 0000000..750653a --- /dev/null +++ b/manage_arkime/arkime_interactions/arkime_files.py @@ -0,0 +1,47 @@ +from dataclasses import dataclass +from typing import Dict, List + + +@dataclass +class ArkimeFile: + """ + Class to encapsulate a file to be written to disk on the Arkime Capture/Viewer nodes. + """ + + system_path: str # Absolute path where the file should live on-disk (prefix + filename) + contents: str # The contents of the file + + def __eq__(self, other: object) -> bool: + if not isinstance(other, ArkimeFile): + return False + return self.system_path == other.system_path and self.contents == other.contents + + def to_dict(self) -> Dict[str, str]: + return { + "system_path": self.system_path, + "contents": self.contents + } + +@dataclass +class ArkimeFilesMap: + """ + Class to provide a map to the in-datastore location of files needed on Arkime Capture/Viewer Nodes + """ + captureIniLoc: str # The in-datastore location of Capture Nodes' .INI file for the Capture process + captureAddFileLocs: List[str] # The in-datastore locations of to any additional Capture Node files + viewerIniLoc: str # The Viewer Nodes' .INI file for the Viewer process + viewerAddFileLocs: List[str] # Paths to any additional Viewer Node files + + def __eq__(self, other: object) -> bool: + if not isinstance(other, ArkimeFilesMap): + return False + return (self.captureIniLoc == other.captureIniLoc and self.captureAddFileLocs == other.captureAddFileLocs + and self.viewerIniLoc == other.viewerIniLoc and self.viewerAddFileLocs == other.viewerAddFileLocs) + + def to_dict(self) -> Dict[str, any]: + return { + "captureIniLoc": self.captureIniLoc, + "captureAddFileLocs": self.captureAddFileLocs, + "viewerIniLoc": self.viewerIniLoc, + "viewerAddFileLocs": self.viewerAddFileLocs + } \ No newline at end of file diff --git a/manage_arkime/arkime_interactions/generate_config.py b/manage_arkime/arkime_interactions/generate_config.py new file mode 100644 index 0000000..368a6eb --- /dev/null +++ b/manage_arkime/arkime_interactions/generate_config.py @@ -0,0 +1,116 @@ +from arkime_interactions.arkime_files import ArkimeFile + +# ========== Magic Strings ========== +# Magic key strings we'll search for as targets for a sed find/replace on our Capture/Viewer hosts during their startup +# steps. We only know their real values at CloudFormation deploy-time and embed those values into the containers. +AWS_REGION = "_AWS_REGION_" +HEALTH_PORT = "_HEALTH_PORT_" +OS_AUTH = "_OS_AUTH_" +OS_ENDPOINT = "_OS_ENDPOINT_" +PCAP_BUCKET = "_PCAP_BUCKET_" +VIEWER_PORT = "_VIEWER_PORT_" + +# ========== Config File Generation ========== +def get_capture_ini(s3_storage_class: str) -> ArkimeFile: + contents = f""" +[default] +debug=1 +dropUser=nobody +dropGroup=daemon + +elasticsearch=https://{OS_ENDPOINT} +elasticsearchBasicAuth={OS_AUTH} +rotateIndex=daily +logESRequests=true + +tcpHealthCheckPort={HEALTH_PORT} +pluginsDir=/opt/arkime/plugins +plugins=tcphealthcheck.so;writer-s3 + +### PCAP Reading +interface=eth0 +pcapDir=/opt/arkime/raw +snapLen=32768 +pcapReadMethod=afpacketv3 +tpacketv3NumThreads=1 + +### PCAP Writing +pcapWriteMethod=s3 +s3Compression=zstd +s3Region={AWS_REGION} +s3Bucket={PCAP_BUCKET} +s3StorageClass={s3_storage_class} +s3UseECSEnv=true +maxFileTimeM=1 + +### Processing +packetThreads=1 +rulesFiles=/opt/arkime/etc/default.rules +rirFile=/opt/arkime/etc/ipv4-address-space.csv +ouiFile=/opt/arkime/etc/oui.txt +""" + + return ArkimeFile( + "/opt/arkime/etc/config.ini", + contents + ) + +def get_capture_rules_default() -> ArkimeFile: + contents = """ +--- +version: 1 +rules: + - name: "Truncate Encrypted PCAP" + when: "fieldSet" + fields: + protocols: + - tls + - ssh + - quic + ops: + _maxPacketsToSave: 20 + + - name: "Drop syn scan" + when: "beforeFinalSave" + fields: + packets.src: 1 + packets.dst: 0 + tcpflags.syn: 1 + ops: + _dontSaveSPI: 1 +""" + + return ArkimeFile( + "/opt/arkime/etc/default.rules", + contents + ) + +def get_viewer_ini() -> ArkimeFile: + contents = f""" +[default] +debug=1 +dropUser=nobody +dropGroup=daemon + +elasticsearch=https://{OS_ENDPOINT} +elasticsearchBasicAuth={OS_AUTH} +rotateIndex=daily + +passwordSecret=ignore + +cronQueries=auto + +spiDataMaxIndices=7 +pluginsDir=/opt/arkime/plugins +viewerPlugins=writer-s3 +viewPort={VIEWER_PORT} + +### PCAP Config +pcapDir=/opt/arkime/raw +pcapWriteMethod=s3 +""" + + return ArkimeFile( + "/opt/arkime/etc/config.ini", + contents + ) \ No newline at end of file diff --git a/manage_arkime/cdk_interactions/cdk_context.py b/manage_arkime/cdk_interactions/cdk_context.py index 5426f62..841c52b 100644 --- a/manage_arkime/cdk_interactions/cdk_context.py +++ b/manage_arkime/cdk_interactions/cdk_context.py @@ -2,14 +2,16 @@ import shlex from typing import Dict, List +from arkime_interactions.arkime_files import ArkimeFilesMap import constants as constants from core.capacity_planning import (CaptureNodesPlan, CaptureVpcPlan, ClusterPlan, DataNodesPlan, EcsSysResourcePlan, MasterNodesPlan, OSDomainPlan, INSTANCE_TYPE_CAPTURE_NODE, DEFAULT_NUM_AZS, S3Plan, DEFAULT_S3_STORAGE_CLASS) from core.user_config import UserConfig -def generate_create_cluster_context(name: str, viewer_cert_arn: str, cluster_plan: ClusterPlan, user_config: UserConfig) -> Dict[str, str]: - create_context = _generate_cluster_context(name, viewer_cert_arn, cluster_plan, user_config) +def generate_create_cluster_context(name: str, viewer_cert_arn: str, cluster_plan: ClusterPlan, + user_config: UserConfig, file_map: ArkimeFilesMap) -> Dict[str, str]: + create_context = _generate_cluster_context(name, viewer_cert_arn, cluster_plan, user_config, file_map) create_context[constants.CDK_CONTEXT_CMD_VAR] = constants.CMD_CREATE_CLUSTER return create_context @@ -26,13 +28,16 @@ def generate_destroy_cluster_context(name: str) -> Dict[str, str]: S3Plan(DEFAULT_S3_STORAGE_CLASS, 1) ) fake_user_config = UserConfig(1, 1, 1, 1, 1) + fake_map = ArkimeFilesMap("", [], "", []) - destroy_context = _generate_cluster_context(name, fake_arn, fake_cluster_plan, fake_user_config) + destroy_context = _generate_cluster_context(name, fake_arn, fake_cluster_plan, fake_user_config, fake_map) destroy_context[constants.CDK_CONTEXT_CMD_VAR] = constants.CMD_DESTROY_CLUSTER return destroy_context -def _generate_cluster_context(name: str, viewer_cert_arn: str, cluster_plan: ClusterPlan, user_config: UserConfig) -> Dict[str, str]: +def _generate_cluster_context(name: str, viewer_cert_arn: str, cluster_plan: ClusterPlan, user_config: UserConfig, + file_map: ArkimeFilesMap) -> Dict[str, str]: cmd_params = { + "arkimeFileMap": json.dumps(file_map.to_dict()), "nameCluster": name, "nameCaptureBucketStack": constants.get_capture_bucket_stack_name(name), "nameCaptureBucketSsmParam": constants.get_capture_bucket_ssm_param_name(name), diff --git a/manage_arkime/commands/create_cluster.py b/manage_arkime/commands/create_cluster.py index cee15ff..9be92b2 100644 --- a/manage_arkime/commands/create_cluster.py +++ b/manage_arkime/commands/create_cluster.py @@ -1,5 +1,8 @@ +import json import logging +import arkime_interactions.arkime_files as arkime_files +import arkime_interactions.generate_config as arkime_conf from aws_interactions.acm_interactions import upload_default_elb_cert from aws_interactions.aws_client_provider import AwsClientProvider import aws_interactions.events_interactions as events @@ -21,6 +24,7 @@ def cmd_create_cluster(profile: str, region: str, name: str, expected_traffic: f aws_provider = AwsClientProvider(aws_profile=profile, aws_region=region) + # Generate our capacity plan and confirm it's what the user expected previous_user_config = _get_previous_user_config(name, aws_provider) next_user_config = _get_next_user_config(name, expected_traffic, spi_days, history_days, replicas, pcap_days, aws_provider) previous_capacity_plan = _get_previous_capacity_plan(name, aws_provider) @@ -30,8 +34,13 @@ def cmd_create_cluster(profile: str, region: str, name: str, expected_traffic: f logger.info("Aborting per user response") return + # Set up the cert the Viewers use for HTTPS cert_arn = _set_up_viewer_cert(name, aws_provider) + # Set up any additional state + file_map = _write_arkime_config_to_datastore(name, next_capacity_plan, aws_provider) + + # Deploy the CFN Resources cdk_client = CdkClient() stacks_to_deploy = [ constants.get_capture_bucket_stack_name(name), @@ -40,9 +49,10 @@ def cmd_create_cluster(profile: str, region: str, name: str, expected_traffic: f constants.get_opensearch_domain_stack_name(name), constants.get_viewer_nodes_stack_name(name) ] - create_context = context.generate_create_cluster_context(name, cert_arn, next_capacity_plan, next_user_config) + create_context = context.generate_create_cluster_context(name, cert_arn, next_capacity_plan, next_user_config, file_map) cdk_client.deploy(stacks_to_deploy, aws_profile=profile, aws_region=region, context=create_context) + # Kick off Events to ensure that ISM is set up on the CFN-created OpenSearch Domain _configure_ism(name, next_user_config.historyDays, next_user_config.spiDays, next_user_config.replicas, aws_provider) def _get_previous_user_config(cluster_name: str, aws_provider: AwsClientProvider) -> UserConfig: @@ -131,6 +141,53 @@ def _confirm_usage(prev_capacity_plan: ClusterPlan, next_capacity_plan: ClusterP return True return report.get_confirmation() +def _write_arkime_config_to_datastore(cluster_name: str, next_capacity_plan: ClusterPlan, + aws_provider: AwsClientProvider) -> arkime_files.ArkimeFilesMap: + # Initialize our map + map = arkime_files.ArkimeFilesMap( + constants.get_capture_config_ini_ssm_param_name(cluster_name), + [], + constants.get_viewer_config_ini_ssm_param_name(cluster_name), + [], + ) + + # Write the Arkime INI files + capture_ini = arkime_conf.get_capture_ini(next_capacity_plan.s3.pcapStorageClass) + ssm_ops.put_ssm_param( + map.captureIniLoc, + json.dumps(capture_ini.to_dict()), + aws_provider, + description="Contents of the Capture Nodes' .ini file", + overwrite=True + ) + + viewer_ini = arkime_conf.get_viewer_ini() + ssm_ops.put_ssm_param( + map.viewerIniLoc, + json.dumps(viewer_ini.to_dict()), + aws_provider, + description="Contents of the Viewer Nodes' .ini file", + overwrite=True + ) + + # Write any/all additional Capture Node files + capture_additional_files = [ + arkime_conf.get_capture_rules_default() + ] + for capture_file in capture_additional_files: + new_path = constants.get_capture_file_ssm_param_name(cluster_name, capture_file.system_path) + ssm_ops.put_ssm_param( + new_path, + json.dumps(capture_file.to_dict()), + aws_provider, + description="A Capture Node file", + overwrite=True + ) + + map.captureAddFileLocs.append(new_path) + + return map + def _set_up_viewer_cert(name: str, aws_provider: AwsClientProvider) -> str: # Only set up the certificate if it doesn't exist cert_ssm_param = constants.get_viewer_cert_ssm_param_name(name) diff --git a/manage_arkime/commands/destroy_cluster.py b/manage_arkime/commands/destroy_cluster.py index 70499ae..2296c99 100644 --- a/manage_arkime/commands/destroy_cluster.py +++ b/manage_arkime/commands/destroy_cluster.py @@ -1,5 +1,6 @@ import logging +import arkime_interactions.generate_config as arkime_conf from aws_interactions.acm_interactions import destroy_cert from aws_interactions.aws_client_provider import AwsClientProvider from aws_interactions.destroy_os_domain import destroy_os_domain_and_wait @@ -59,6 +60,9 @@ def cmd_destroy_cluster(profile: str, region: str, name: str, destroy_everything # Destroy our cert _destroy_viewer_cert(name, aws_provider) + # Destroy any additional remaining state + _delete_arkime_config_from_datastore(name, aws_provider) + def _destroy_viewer_cert(cluster_name: str, aws_provider: AwsClientProvider): # Only destroy up the certificate if it exists cert_ssm_param = constants.get_viewer_cert_ssm_param_name(cluster_name) @@ -71,4 +75,26 @@ def _destroy_viewer_cert(cluster_name: str, aws_provider: AwsClientProvider): # Destroy the cert and state logger.debug("Destroying certificate and SSM parameter...") destroy_cert(cert_arn, aws_provider) # destroy first so if op fails we still know the ARN - delete_ssm_param(cert_ssm_param, aws_provider) \ No newline at end of file + delete_ssm_param(cert_ssm_param, aws_provider) + +def _delete_arkime_config_from_datastore(cluster_name: str, aws_provider: AwsClientProvider): + # Delete the Arkime INI files + delete_ssm_param( + constants.get_capture_config_ini_ssm_param_name(cluster_name), + aws_provider + ) + + delete_ssm_param( + constants.get_viewer_config_ini_ssm_param_name(cluster_name), + aws_provider + ) + + # Write any/all additional Capture Node files + capture_additional_files = [ + arkime_conf.get_capture_rules_default() + ] + for capture_file in capture_additional_files: + delete_ssm_param( + constants.get_capture_file_ssm_param_name(cluster_name, capture_file.system_path), + aws_provider + ) \ No newline at end of file diff --git a/manage_arkime/constants.py b/manage_arkime/constants.py index 7cde40d..be09e97 100644 --- a/manage_arkime/constants.py +++ b/manage_arkime/constants.py @@ -48,6 +48,12 @@ def get_capture_bucket_stack_name(cluster_name: str) -> str: def get_capture_bucket_ssm_param_name(cluster_name: str) -> str: return f"{SSM_CLUSTERS_PREFIX}/{cluster_name}/capture-bucket-name" +def get_capture_config_ini_ssm_param_name(cluster_name: str) -> str: + return f"{SSM_CLUSTERS_PREFIX}/{cluster_name}/capture-ini" + +def get_capture_file_ssm_param_name(cluster_name: str, system_path: str) -> str: + return f"{SSM_CLUSTERS_PREFIX}/{cluster_name}/capture-files{system_path}" + def get_capture_nodes_stack_name(cluster_name: str) -> str: return f"{cluster_name}-CaptureNodes" @@ -69,6 +75,9 @@ def get_subnet_ssm_param_name(cluster_name: str, vpc_id: str, subnet_id: str) -> def get_viewer_cert_ssm_param_name(cluster_name: str) -> str: return f"/arkime/clusters/{cluster_name}/viewer-cert" +def get_viewer_config_ini_ssm_param_name(cluster_name: str) -> str: + return f"{SSM_CLUSTERS_PREFIX}/{cluster_name}/viewer-ini" + def get_viewer_dns_ssm_param_name(cluster_name: str) -> str: return f"/arkime/clusters/{cluster_name}/viewer-dns" diff --git a/test_manage_arkime/commands/test_create_cluster.py b/test_manage_arkime/commands/test_create_cluster.py index 3f5753c..9734d40 100644 --- a/test_manage_arkime/commands/test_create_cluster.py +++ b/test_manage_arkime/commands/test_create_cluster.py @@ -3,11 +3,13 @@ import shlex import unittest.mock as mock +import arkime_interactions.arkime_files as arkime_files +import arkime_interactions.generate_config as arkime_conf from aws_interactions.events_interactions import ConfigureIsmEvent import aws_interactions.ssm_operations as ssm_ops from commands.create_cluster import (cmd_create_cluster, _set_up_viewer_cert, _get_next_capacity_plan, _get_next_user_config, _confirm_usage, - _get_previous_capacity_plan, _get_previous_user_config, _configure_ism) + _get_previous_capacity_plan, _get_previous_user_config, _configure_ism, _write_arkime_config_to_datastore) import constants as constants from core.capacity_planning import (CaptureNodesPlan, EcsSysResourcePlan, MINIMUM_TRAFFIC, OSDomainPlan, DataNodesPlan, MasterNodesPlan, CaptureVpcPlan, ClusterPlan, DEFAULT_SPI_DAYS, DEFAULT_REPLICAS, DEFAULT_NUM_AZS, S3Plan, @@ -15,6 +17,7 @@ from core.user_config import UserConfig @mock.patch("commands.create_cluster.AwsClientProvider", mock.Mock()) +@mock.patch("commands.create_cluster._write_arkime_config_to_datastore") @mock.patch("commands.create_cluster._configure_ism") @mock.patch("commands.create_cluster._get_previous_user_config") @mock.patch("commands.create_cluster._get_previous_capacity_plan") @@ -24,7 +27,8 @@ @mock.patch("commands.create_cluster._set_up_viewer_cert") @mock.patch("commands.create_cluster.CdkClient") def test_WHEN_cmd_create_cluster_called_THEN_cdk_command_correct(mock_cdk_client_cls, mock_set_up, mock_get_plans, mock_get_config, - mock_confirm, mock_get_prev_plan, mock_get_prev_config, mock_configure): + mock_confirm, mock_get_prev_plan, mock_get_prev_config, mock_configure, + mock_write_arkime): # Set up our mock mock_set_up.return_value = "arn" @@ -45,6 +49,9 @@ def test_WHEN_cmd_create_cluster_called_THEN_cdk_command_correct(mock_cdk_client mock_confirm.return_value = True + map = arkime_files.ArkimeFilesMap("cap", ["f1"], "view", ["f2", "f3"]) + mock_write_arkime.return_value = map + # Run our test cmd_create_cluster("profile", "region", "my-cluster", None, None, None, None, None, True) @@ -63,6 +70,7 @@ def test_WHEN_cmd_create_cluster_called_THEN_cdk_command_correct(mock_cdk_client context={ constants.CDK_CONTEXT_CMD_VAR: constants.CMD_CREATE_CLUSTER, constants.CDK_CONTEXT_PARAMS_VAR: shlex.quote(json.dumps({ + "arkimeFileMap": json.dumps(map.to_dict()), "nameCluster": "my-cluster", "nameCaptureBucketStack": constants.get_capture_bucket_stack_name("my-cluster"), "nameCaptureBucketSsmParam": constants.get_capture_bucket_ssm_param_name("my-cluster"), @@ -94,7 +102,13 @@ def test_WHEN_cmd_create_cluster_called_THEN_cdk_command_correct(mock_cdk_client ] assert expected_configure_calls == mock_configure.call_args_list + expected_write_arkime_calls = [ + mock.call("my-cluster", cluster_plan, mock.ANY) + ] + assert expected_write_arkime_calls == mock_write_arkime.call_args_list + @mock.patch("commands.create_cluster.AwsClientProvider", mock.Mock()) +@mock.patch("commands.create_cluster._write_arkime_config_to_datastore") @mock.patch("commands.create_cluster._configure_ism") @mock.patch("commands.create_cluster._get_previous_user_config") @mock.patch("commands.create_cluster._get_previous_capacity_plan") @@ -104,7 +118,8 @@ def test_WHEN_cmd_create_cluster_called_THEN_cdk_command_correct(mock_cdk_client @mock.patch("commands.create_cluster._set_up_viewer_cert") @mock.patch("commands.create_cluster.CdkClient") def test_WHEN_cmd_create_cluster_called_AND_abort_usage_THEN_as_expected(mock_cdk_client_cls, mock_set_up, mock_get_plans, mock_get_config, - mock_confirm, mock_get_prev_plan, mock_get_prev_config, mock_configure): + mock_confirm, mock_get_prev_plan, mock_get_prev_config, mock_configure, + mock_write_arkime): # Set up our mock mock_set_up.return_value = "arn" @@ -138,6 +153,9 @@ def test_WHEN_cmd_create_cluster_called_AND_abort_usage_THEN_as_expected(mock_cd expected_configure_calls = [] assert expected_configure_calls == mock_configure.call_args_list + expected_write_arkime_calls = [] + assert expected_write_arkime_calls == mock_write_arkime.call_args_list + @mock.patch("commands.create_cluster.ssm_ops") def test_WHEN_get_previous_user_config_called_AND_exists_THEN_as_expected(mock_ssm_ops): # Set up our mock @@ -419,6 +437,58 @@ def test_WHEN_confirm_usage_called_THEN_as_expected(mock_report_cls): assert False == actual_value assert mock_report.get_confirmation.called +@mock.patch("commands.create_cluster.ssm_ops") +def test_WHEN_write_arkime_config_to_datastore_called_THEN_as_expected(mock_ssm_ops): + # Set up our mock + cluster_plan = ClusterPlan( + CaptureNodesPlan("m5.xlarge", 20, 25, 1), + CaptureVpcPlan(DEFAULT_NUM_AZS), + EcsSysResourcePlan(3584, 15360), + OSDomainPlan(DataNodesPlan(2, "t3.small.search", 100), MasterNodesPlan(3, "m6g.large.search")), + S3Plan(DEFAULT_S3_STORAGE_CLASS, DEFAULT_S3_STORAGE_DAYS) + ) + + mock_provider = mock.Mock() + + # Run our test + actual_value = _write_arkime_config_to_datastore("my-cluster", cluster_plan, mock_provider) + + # Check our results + expected_map = arkime_files.ArkimeFilesMap( + constants.get_capture_config_ini_ssm_param_name("my-cluster"), + [ + constants.get_capture_file_ssm_param_name("my-cluster", arkime_conf.get_capture_rules_default().system_path) + ], + constants.get_viewer_config_ini_ssm_param_name("my-cluster"), + [], + ) + assert expected_map == actual_value + + expected_put_ssm_calls = [ + mock.call( + constants.get_capture_config_ini_ssm_param_name("my-cluster"), + mock.ANY, + mock_provider, + description=mock.ANY, + overwrite=True + ), + mock.call( + constants.get_viewer_config_ini_ssm_param_name("my-cluster"), + mock.ANY, + mock_provider, + description=mock.ANY, + overwrite=True + ), + mock.call( + constants.get_capture_file_ssm_param_name("my-cluster", arkime_conf.get_capture_rules_default().system_path), + mock.ANY, + mock_provider, + description=mock.ANY, + overwrite=True + ), + ] + assert expected_put_ssm_calls == mock_ssm_ops.put_ssm_param.call_args_list + @mock.patch("commands.create_cluster.upload_default_elb_cert") @mock.patch("commands.create_cluster.ssm_ops") def test_WHEN_set_up_viewer_cert_called_THEN_set_up_correctly(mock_ssm_ops, mock_upload): diff --git a/test_manage_arkime/commands/test_destroy_cluster.py b/test_manage_arkime/commands/test_destroy_cluster.py index b9eca6d..a0f74f2 100644 --- a/test_manage_arkime/commands/test_destroy_cluster.py +++ b/test_manage_arkime/commands/test_destroy_cluster.py @@ -2,8 +2,10 @@ import shlex import unittest.mock as mock +import arkime_interactions.arkime_files as arkime_files +import arkime_interactions.generate_config as arkime_conf from aws_interactions.ssm_operations import ParamDoesNotExist -from commands.destroy_cluster import cmd_destroy_cluster, _destroy_viewer_cert +from commands.destroy_cluster import cmd_destroy_cluster, _destroy_viewer_cert, _delete_arkime_config_from_datastore import constants as constants from core.capacity_planning import (CaptureNodesPlan, EcsSysResourcePlan, OSDomainPlan, DataNodesPlan, MasterNodesPlan, ClusterPlan, CaptureVpcPlan, S3Plan, DEFAULT_S3_STORAGE_CLASS) @@ -11,12 +13,15 @@ TEST_CLUSTER = "my-cluster" +@mock.patch("commands.destroy_cluster._delete_arkime_config_from_datastore") @mock.patch("commands.destroy_cluster._destroy_viewer_cert") @mock.patch("commands.destroy_cluster.get_ssm_names_by_path") @mock.patch("commands.destroy_cluster.destroy_os_domain_and_wait") @mock.patch("commands.destroy_cluster.destroy_s3_bucket") @mock.patch("commands.destroy_cluster.CdkClient") -def test_WHEN_cmd_destroy_cluster_called_AND_dont_destroy_everything_THEN_expected_cmds(mock_cdk_client_cls, mock_destroy_bucket, mock_destroy_domain, mock_ssm_get, mock_destroy_cert): +def test_WHEN_cmd_destroy_cluster_called_AND_dont_destroy_everything_THEN_expected_cmds(mock_cdk_client_cls, mock_destroy_bucket, + mock_destroy_domain, mock_ssm_get, mock_destroy_cert, + mock_delete_arkime): # Set up our mock mock_ssm_get.return_value = [] @@ -49,6 +54,7 @@ def test_WHEN_cmd_destroy_cluster_called_AND_dont_destroy_everything_THEN_expect context={ constants.CDK_CONTEXT_CMD_VAR: constants.CMD_DESTROY_CLUSTER, constants.CDK_CONTEXT_PARAMS_VAR: shlex.quote(json.dumps({ + "arkimeFileMap": json.dumps(arkime_files.ArkimeFilesMap("", [], "", []).to_dict()), "nameCluster": TEST_CLUSTER, "nameCaptureBucketStack": constants.get_capture_bucket_stack_name(TEST_CLUSTER), "nameCaptureBucketSsmParam": constants.get_capture_bucket_ssm_param_name(TEST_CLUSTER), @@ -73,16 +79,24 @@ def test_WHEN_cmd_destroy_cluster_called_AND_dont_destroy_everything_THEN_expect expected_destroy_calls = [ mock.call(TEST_CLUSTER, mock.ANY) ] - assert expected_destroy_calls == mock_destroy_cert.call_args_list + assert expected_destroy_calls == mock_destroy_cert.call_args_list + + expected_delete_arkime_calls = [ + mock.call(TEST_CLUSTER, mock.ANY) + ] + assert expected_delete_arkime_calls == mock_delete_arkime.call_args_list @mock.patch("commands.destroy_cluster.AwsClientProvider", mock.Mock()) +@mock.patch("commands.destroy_cluster._delete_arkime_config_from_datastore") @mock.patch("commands.destroy_cluster._destroy_viewer_cert") @mock.patch("commands.destroy_cluster.get_ssm_names_by_path") @mock.patch("commands.destroy_cluster.destroy_os_domain_and_wait") @mock.patch("commands.destroy_cluster.destroy_s3_bucket") @mock.patch("commands.destroy_cluster.get_ssm_param_value") @mock.patch("commands.destroy_cluster.CdkClient") -def test_WHEN_cmd_destroy_cluster_called_AND_destroy_everything_THEN_expected_cmds(mock_cdk_client_cls, mock_get_ssm, mock_destroy_bucket, mock_destroy_domain, mock_ssm_names, mock_destroy_cert): +def test_WHEN_cmd_destroy_cluster_called_AND_destroy_everything_THEN_expected_cmds(mock_cdk_client_cls, mock_get_ssm, mock_destroy_bucket, + mock_destroy_domain, mock_ssm_names, mock_destroy_cert, + mock_delete_arkime): # Set up our mock mock_ssm_names.return_value = [] @@ -137,6 +151,7 @@ def test_WHEN_cmd_destroy_cluster_called_AND_destroy_everything_THEN_expected_cm context={ constants.CDK_CONTEXT_CMD_VAR: constants.CMD_DESTROY_CLUSTER, constants.CDK_CONTEXT_PARAMS_VAR: shlex.quote(json.dumps({ + "arkimeFileMap": json.dumps(arkime_files.ArkimeFilesMap("", [], "", []).to_dict()), "nameCluster": TEST_CLUSTER, "nameCaptureBucketStack": constants.get_capture_bucket_stack_name(TEST_CLUSTER), "nameCaptureBucketSsmParam": constants.get_capture_bucket_ssm_param_name(TEST_CLUSTER), @@ -161,7 +176,12 @@ def test_WHEN_cmd_destroy_cluster_called_AND_destroy_everything_THEN_expected_cm expected_destroy_calls = [ mock.call(TEST_CLUSTER, mock.ANY) ] - assert expected_destroy_calls == mock_destroy_cert.call_args_list + assert expected_destroy_calls == mock_destroy_cert.call_args_list + + expected_delete_arkime_calls = [ + mock.call(TEST_CLUSTER, mock.ANY) + ] + assert expected_delete_arkime_calls == mock_delete_arkime.call_args_list @mock.patch("commands.destroy_cluster.get_ssm_names_by_path") @mock.patch("commands.destroy_cluster.destroy_os_domain_and_wait") @@ -241,4 +261,29 @@ def test_WHEN_destroy_viewer_cert_called_AND_doesnt_exist_THEN_skip(mock_ssm_get assert expected_destroy_cert_calls == mock_destroy_cert.call_args_list expected_delete_ssm_calls = [] + assert expected_delete_ssm_calls == mock_ssm_delete.call_args_list + +@mock.patch("commands.destroy_cluster.delete_ssm_param") +def test_WHEN_delete_arkime_config_from_datastore_called_THEN_as_expected(mock_ssm_delete): + # Set up our mock + mock_provider = mock.Mock() + + # Run our test + _delete_arkime_config_from_datastore(TEST_CLUSTER, mock_provider) + + # Check our results + expected_delete_ssm_calls = [ + mock.call( + constants.get_capture_config_ini_ssm_param_name(TEST_CLUSTER), + mock_provider + ), + mock.call( + constants.get_viewer_config_ini_ssm_param_name(TEST_CLUSTER), + mock_provider + ), + mock.call( + constants.get_capture_file_ssm_param_name(TEST_CLUSTER, arkime_conf.get_capture_rules_default().system_path), + mock_provider + ), + ] assert expected_delete_ssm_calls == mock_ssm_delete.call_args_list \ No newline at end of file