From bf9b10164bd72a1b16ebac7c8f09f5ee2ba5b81d Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 4 Oct 2023 09:53:25 -0400 Subject: [PATCH 01/42] Add SQS/Lamda/Fargate Scan execution - Create the SQS queue in terraform - In Serverless Framework, define new Lambda function that will trigger fargate from message command --- backend/env.yml | 2 + backend/serverless.yml | 9 ++++ backend/src/tasks/functions.yml | 10 ++++ backend/src/tasks/scanExecution.ts | 75 ++++++++++++++++++++++++++++++ infrastructure/prod.tfvars | 1 + infrastructure/sqs.tf | 14 ++++++ infrastructure/stage.tfvars | 1 + infrastructure/vars.tf | 7 +++ 8 files changed, 119 insertions(+) create mode 100644 backend/src/tasks/scanExecution.ts create mode 100644 infrastructure/sqs.tf diff --git a/backend/env.yml b/backend/env.yml index 301404984..d0d2df1f5 100644 --- a/backend/env.yml +++ b/backend/env.yml @@ -41,6 +41,7 @@ staging: EXPORT_BUCKET_NAME: cisa-crossfeed-staging-exports PE_API_URL: ${ssm:/crossfeed/staging/PE_API_URL} REPORTS_BUCKET_NAME: cisa-crossfeed-staging-reports + SQS_QUEUE_NAME: crossfeed-staging-worker-queue prod: DB_DIALECT: 'postgres' @@ -76,6 +77,7 @@ prod: EXPORT_BUCKET_NAME: cisa-crossfeed-prod-exports PE_API_URL: ${ssm:/crossfeed/staging/PE_API_URL} REPORTS_BUCKET_NAME: cisa-crossfeed-prod-reports + SQS_QUEUE_NAME: crossfeed-prod-worker-queue dev-vpc: securityGroupIds: diff --git a/backend/serverless.yml b/backend/serverless.yml index 825aafd00..09005180e 100644 --- a/backend/serverless.yml +++ b/backend/serverless.yml @@ -57,6 +57,15 @@ provider: - s3:PutObject - s3:PutObjectAcl Resource: '*' + - Effect: Allow + Action: + - sts:AssumeRole + Resource: '*' + - Effect: Allow + Action: + - sqs:ReceiveMessage + - sqs:SendMessage + Resource: '*' functions: - ${file(./src/tasks/functions.yml)} diff --git a/backend/src/tasks/functions.yml b/backend/src/tasks/functions.yml index a7da691b5..20bd0f1c8 100644 --- a/backend/src/tasks/functions.yml +++ b/backend/src/tasks/functions.yml @@ -21,6 +21,16 @@ bastion: makeGlobalAdmin: handler: src/tasks/makeGlobalAdmin.handler +scanExecution: + handler: src/tasks/scanExecution.handler + events: + - sqs: + arn: + Fn::GetAtt: + - ${file(env.yml):${self:provider.stage}.SQS_QUEUE_NAME, ''} + - Arn + batchSize: 5 # Number of messages the lambda can continue to process while a fargate is still running + updateScanTaskStatus: handler: src/tasks/updateScanTaskStatus.handler events: diff --git a/backend/src/tasks/scanExecution.ts b/backend/src/tasks/scanExecution.ts new file mode 100644 index 000000000..ded5194b9 --- /dev/null +++ b/backend/src/tasks/scanExecution.ts @@ -0,0 +1,75 @@ +import { SQSEvent, SQSRecord } from 'aws-lambda'; +import * as AWS from 'aws-sdk'; + +const ecs = new AWS.ECS(); +const sqs = new AWS.SQS(); + +export const invokeFargateTask = async (event: SQSEvent): Promise => { + try { + const sqsRecord: SQSRecord = event.Records[0]; + const commandOptions: string = sqsRecord.body; + + // Get the ARN of the SQS queue from the event + const sqsQueueArn: string | undefined = sqsRecord.eventSourceARN; + + if (!sqsQueueArn) { + throw new Error('SQS Queue ARN not found in event'); + } + + // Describe the SQS queue to get its URL + const sqsQueue = { + QueueUrl: sqsQueueArn // Use the ARN as the QueueUrl + }; + const queueAttributesResponse = await sqs + .getQueueAttributes(sqsQueue) + .promise(); + const sqsQueueUrl = queueAttributesResponse.Attributes?.QueueUrl; + + if (!sqsQueueUrl) { + throw new Error('SQS Queue URL not found'); + } + + const params: AWS.ECS.RunTaskRequest = { + cluster: process.env.FARGATE_CLUSTER_NAME!, + taskDefinition: process.env.FARGATE_TASK_DEFINITION_NAME!, + launchType: 'FARGATE', + networkConfiguration: { + awsvpcConfiguration: { + assignPublicIp: 'ENABLED', + securityGroups: [process.env.FARGATE_SG_ID!], + subnets: [process.env.FARGATE_SUBNET_ID!] + } + }, + platformVersion: '1.4.0', + overrides: { + containerOverrides: [ + { + name: 'main', // from task definition + command: [commandOptions] // Pass the command options as an array + } + ] + } + }; + + const data = await ecs.runTask(params).promise(); + console.log('Fargate task started:', data); + + // Send a message to the SQS queue to trigger processing + const sqsParams: AWS.SQS.SendMessageRequest = { + MessageBody: 'Start processing...', + QueueUrl: sqsQueueUrl + }; + await sqs.sendMessage(sqsParams).promise(); + + return { + statusCode: 200, + body: JSON.stringify('Fargate task started and message sent to SQS queue') + }; + } catch (error) { + console.error('Error starting Fargate task:', error); + return { + statusCode: 500, + body: JSON.stringify('Error starting Fargate task') + }; + } +}; diff --git a/infrastructure/prod.tfvars b/infrastructure/prod.tfvars index 84a565b77..dff1c1233 100644 --- a/infrastructure/prod.tfvars +++ b/infrastructure/prod.tfvars @@ -67,3 +67,4 @@ create_db_accessor_instance = true db_accessor_instance_class = "t3.2xlarge" create_elk_instance = false elk_instance_class = "t3.2xlarge" +sqs_queue_name = "crossfeed-prod-worker-queue" diff --git a/infrastructure/sqs.tf b/infrastructure/sqs.tf new file mode 100644 index 000000000..2a62f8449 --- /dev/null +++ b/infrastructure/sqs.tf @@ -0,0 +1,14 @@ + +# SQS Queue +resource "aws_sqs_queue" "terraform_queue" { + name = var.sqs_queue_name + delay_seconds = 90 + max_message_size = 262144 + message_retention_seconds = 345600 # 4 days + receive_wait_time_seconds = 10 + + tags = { + Project = var.project + Stage = var.stage + } +} \ No newline at end of file diff --git a/infrastructure/stage.tfvars b/infrastructure/stage.tfvars index 9312408f2..f80fe20d5 100644 --- a/infrastructure/stage.tfvars +++ b/infrastructure/stage.tfvars @@ -67,3 +67,4 @@ create_db_accessor_instance = true db_accessor_instance_class = "t3.2xlarge" create_elk_instance = true elk_instance_class = "t3.2xlarge" +sqs_queue_name = "crossfeed-staging-worker-queue" diff --git a/infrastructure/vars.tf b/infrastructure/vars.tf index c1e49cf6e..de9a093c6 100644 --- a/infrastructure/vars.tf +++ b/infrastructure/vars.tf @@ -411,3 +411,10 @@ variable "create_elk_instance" { type = bool default = false } + +variable "sqs_queue_name" { + description = "sqs_queue_name" + type = string + default = "crossfeed-staging-worker-queue" +} + From 36fc6a851b8a877336dfc44ae3d1fb5bfac44dcb Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 4 Oct 2023 10:52:01 -0400 Subject: [PATCH 02/42] Add policy allowing Accessor EC2 to send SQS messages --- infrastructure/database.tf | 74 ++++++++++++++++++++++++++------------ 1 file changed, 51 insertions(+), 23 deletions(-) diff --git a/infrastructure/database.tf b/infrastructure/database.tf index 4c25fe6f3..cb3ab1be1 100644 --- a/infrastructure/database.tf +++ b/infrastructure/database.tf @@ -75,6 +75,33 @@ data "aws_ami" "ubuntu" { owners = ["099720109477"] } +# DB Accessor EC2 +resource "aws_instance" "db_accessor" { + count = var.create_db_accessor_instance ? 1 : 0 + ami = data.aws_ami.ubuntu.id + instance_type = var.db_accessor_instance_class + associate_public_ip_address = false + + tags = { + Project = var.project + Stage = var.stage + } + root_block_device { + volume_size = 1000 + } + + vpc_security_group_ids = [aws_security_group.allow_internal.id] + subnet_id = aws_subnet.backend.id + + iam_instance_profile = aws_iam_instance_profile.db_accessor.id + user_data = file("./ssm-agent-install.sh") + + lifecycle { + # prevent_destroy = true + ignore_changes = [ami] + } +} + resource "aws_iam_role" "db_accessor" { name = "crossfeed-db-accessor-${var.stage}" assume_role_policy = < Date: Wed, 4 Oct 2023 10:54:48 -0400 Subject: [PATCH 03/42] Add additional permissions for EC2 to interact with SQS "sqs:SendMessage", "sqs:ReceiveMessage", "sqs:DeleteMessage", "sqs:GetQueueAttributes", "sqs:ListQueues", "sqs:GetQueueUrl" --- infrastructure/database.tf | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/infrastructure/database.tf b/infrastructure/database.tf index cb3ab1be1..bda061d8f 100644 --- a/infrastructure/database.tf +++ b/infrastructure/database.tf @@ -172,7 +172,12 @@ resource "aws_iam_policy" "sqs_send_message_policy" { Statement = [ { Action = [ - "sqs:SendMessage" + "sqs:SendMessage", + "sqs:ReceiveMessage", + "sqs:DeleteMessage", + "sqs:GetQueueAttributes", + "sqs:ListQueues", + "sqs:GetQueueUrl" ], Effect = "Allow", Resource = aws_sqs_queue.terraform_queue.arn From 51715daed4c921297a5f29ac2a8823c48dadb135 Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 4 Oct 2023 11:05:13 -0400 Subject: [PATCH 04/42] Add 10 minute timeout to the lambda function --- backend/src/tasks/functions.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/backend/src/tasks/functions.yml b/backend/src/tasks/functions.yml index f2093fef0..8d6b54209 100644 --- a/backend/src/tasks/functions.yml +++ b/backend/src/tasks/functions.yml @@ -31,6 +31,7 @@ makeGlobalAdmin: scanExecution: handler: src/tasks/scanExecution.handler + timeout: 600 # 10 minutes events: - sqs: arn: From f0dfdf959b2324967173ded7e4337fda9fdb65cb Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 4 Oct 2023 11:18:15 -0400 Subject: [PATCH 05/42] remove extra lines in database.tf --- infrastructure/database.tf | 2 -- 1 file changed, 2 deletions(-) diff --git a/infrastructure/database.tf b/infrastructure/database.tf index bda061d8f..bbdf78339 100644 --- a/infrastructure/database.tf +++ b/infrastructure/database.tf @@ -237,7 +237,6 @@ resource "aws_ssm_parameter" "worker_subnet_id" { } } - resource "aws_ssm_parameter" "crossfeed_send_db_host" { name = var.ssm_db_host type = "SecureString" @@ -296,7 +295,6 @@ resource "aws_s3_bucket_logging" "reports_bucket" { target_prefix = "reports_bucket/" } - # P&E DB Backups S3 bucket resource "aws_s3_bucket" "pe_db_backups_bucket" { bucket = var.pe_db_backups_bucket_name From 3dc1f7dfcf53cde785776f9a11cab32878f90c1a Mon Sep 17 00:00:00 2001 From: aloftus23 <79927030+aloftus23@users.noreply.github.com> Date: Wed, 4 Oct 2023 11:31:21 -0400 Subject: [PATCH 06/42] SQS POC: Add queue and lambda function to trigger scans in fargate (#2278) * Add SQS/Lamda/Fargate Scan execution - Create the SQS queue in terraform - In Serverless Framework, define new Lambda function that will trigger fargate from message command --- backend/env.yml | 2 + backend/serverless.yml | 9 ++++ backend/src/tasks/functions.yml | 11 +++++ backend/src/tasks/scanExecution.ts | 75 ++++++++++++++++++++++++++++ infrastructure/database.tf | 79 +++++++++++++++++++++--------- infrastructure/prod.tfvars | 1 + infrastructure/sqs.tf | 14 ++++++ infrastructure/stage.tfvars | 1 + infrastructure/vars.tf | 7 +++ 9 files changed, 175 insertions(+), 24 deletions(-) create mode 100644 backend/src/tasks/scanExecution.ts create mode 100644 infrastructure/sqs.tf diff --git a/backend/env.yml b/backend/env.yml index 4a922725a..ac16d4ddc 100644 --- a/backend/env.yml +++ b/backend/env.yml @@ -41,6 +41,7 @@ staging: EXPORT_BUCKET_NAME: cisa-crossfeed-staging-exports PE_API_URL: ${ssm:/crossfeed/staging/PE_API_URL} REPORTS_BUCKET_NAME: cisa-crossfeed-staging-reports + SQS_QUEUE_NAME: crossfeed-staging-worker-queue CLOUDWATCH_BUCKET_NAME: cisa-crossfeed-staging-cloudwatch prod: @@ -77,6 +78,7 @@ prod: EXPORT_BUCKET_NAME: cisa-crossfeed-prod-exports PE_API_URL: ${ssm:/crossfeed/staging/PE_API_URL} REPORTS_BUCKET_NAME: cisa-crossfeed-prod-reports + SQS_QUEUE_NAME: crossfeed-prod-worker-queue CLOUDWATCH_BUCKET_NAME: cisa-crossfeed-prod-cloudwatch dev-vpc: diff --git a/backend/serverless.yml b/backend/serverless.yml index 12cf2f9c9..0524e0af4 100644 --- a/backend/serverless.yml +++ b/backend/serverless.yml @@ -57,6 +57,15 @@ provider: - s3:PutObject - s3:PutObjectAcl Resource: '*' + - Effect: Allow + Action: + - sts:AssumeRole + Resource: '*' + - Effect: Allow + Action: + - sqs:ReceiveMessage + - sqs:SendMessage + Resource: '*' - Effect: Allow Action: - logs:Describe* diff --git a/backend/src/tasks/functions.yml b/backend/src/tasks/functions.yml index 64c55d4a1..8d6b54209 100644 --- a/backend/src/tasks/functions.yml +++ b/backend/src/tasks/functions.yml @@ -29,6 +29,17 @@ bastion: makeGlobalAdmin: handler: src/tasks/makeGlobalAdmin.handler +scanExecution: + handler: src/tasks/scanExecution.handler + timeout: 600 # 10 minutes + events: + - sqs: + arn: + Fn::GetAtt: + - ${file(env.yml):${self:provider.stage}.SQS_QUEUE_NAME, ''} + - Arn + batchSize: 5 # Number of messages the lambda can continue to process while a fargate is still running + updateScanTaskStatus: handler: src/tasks/updateScanTaskStatus.handler events: diff --git a/backend/src/tasks/scanExecution.ts b/backend/src/tasks/scanExecution.ts new file mode 100644 index 000000000..ded5194b9 --- /dev/null +++ b/backend/src/tasks/scanExecution.ts @@ -0,0 +1,75 @@ +import { SQSEvent, SQSRecord } from 'aws-lambda'; +import * as AWS from 'aws-sdk'; + +const ecs = new AWS.ECS(); +const sqs = new AWS.SQS(); + +export const invokeFargateTask = async (event: SQSEvent): Promise => { + try { + const sqsRecord: SQSRecord = event.Records[0]; + const commandOptions: string = sqsRecord.body; + + // Get the ARN of the SQS queue from the event + const sqsQueueArn: string | undefined = sqsRecord.eventSourceARN; + + if (!sqsQueueArn) { + throw new Error('SQS Queue ARN not found in event'); + } + + // Describe the SQS queue to get its URL + const sqsQueue = { + QueueUrl: sqsQueueArn // Use the ARN as the QueueUrl + }; + const queueAttributesResponse = await sqs + .getQueueAttributes(sqsQueue) + .promise(); + const sqsQueueUrl = queueAttributesResponse.Attributes?.QueueUrl; + + if (!sqsQueueUrl) { + throw new Error('SQS Queue URL not found'); + } + + const params: AWS.ECS.RunTaskRequest = { + cluster: process.env.FARGATE_CLUSTER_NAME!, + taskDefinition: process.env.FARGATE_TASK_DEFINITION_NAME!, + launchType: 'FARGATE', + networkConfiguration: { + awsvpcConfiguration: { + assignPublicIp: 'ENABLED', + securityGroups: [process.env.FARGATE_SG_ID!], + subnets: [process.env.FARGATE_SUBNET_ID!] + } + }, + platformVersion: '1.4.0', + overrides: { + containerOverrides: [ + { + name: 'main', // from task definition + command: [commandOptions] // Pass the command options as an array + } + ] + } + }; + + const data = await ecs.runTask(params).promise(); + console.log('Fargate task started:', data); + + // Send a message to the SQS queue to trigger processing + const sqsParams: AWS.SQS.SendMessageRequest = { + MessageBody: 'Start processing...', + QueueUrl: sqsQueueUrl + }; + await sqs.sendMessage(sqsParams).promise(); + + return { + statusCode: 200, + body: JSON.stringify('Fargate task started and message sent to SQS queue') + }; + } catch (error) { + console.error('Error starting Fargate task:', error); + return { + statusCode: 500, + body: JSON.stringify('Error starting Fargate task') + }; + } +}; diff --git a/infrastructure/database.tf b/infrastructure/database.tf index 4c25fe6f3..bbdf78339 100644 --- a/infrastructure/database.tf +++ b/infrastructure/database.tf @@ -75,6 +75,33 @@ data "aws_ami" "ubuntu" { owners = ["099720109477"] } +# DB Accessor EC2 +resource "aws_instance" "db_accessor" { + count = var.create_db_accessor_instance ? 1 : 0 + ami = data.aws_ami.ubuntu.id + instance_type = var.db_accessor_instance_class + associate_public_ip_address = false + + tags = { + Project = var.project + Stage = var.stage + } + root_block_device { + volume_size = 1000 + } + + vpc_security_group_ids = [aws_security_group.allow_internal.id] + subnet_id = aws_subnet.backend.id + + iam_instance_profile = aws_iam_instance_profile.db_accessor.id + user_data = file("./ssm-agent-install.sh") + + lifecycle { + # prevent_destroy = true + ignore_changes = [ami] + } +} + resource "aws_iam_role" "db_accessor" { name = "crossfeed-db-accessor-${var.stage}" assume_role_policy = < Date: Wed, 4 Oct 2023 10:34:11 -0500 Subject: [PATCH 07/42] Fix medium vulnerabilities flagged by GitHub Actions (#2277) * Bump graphql 16.8.0 => 16.8.1; bump postcss 8.4.28 => 8.4.31. --- docs/package-lock.json | 12 ++++++------ frontend/package-lock.json | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index 173e2716f..90ef9a2f1 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -12979,9 +12979,9 @@ "dev": true }, "node_modules/graphql": { - "version": "16.8.0", - "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.8.0.tgz", - "integrity": "sha512-0oKGaR+y3qcS5mCu1vb7KG+a89vjn06C7Ihq/dDl3jA+A8B3TKomvi3CiEcVLJQGalbu8F52LxkOym7U5sSfbg==", + "version": "16.8.1", + "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.8.1.tgz", + "integrity": "sha512-59LZHPdGZVh695Ud9lRzPBVTtlX9ZCV150Er2W43ro37wVof0ctenSaskPPjN7lVTIN8mSZt8PHUNKZuNQUuxw==", "dev": true, "engines": { "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" @@ -17435,9 +17435,9 @@ "dev": true }, "node_modules/postcss": { - "version": "8.4.28", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.28.tgz", - "integrity": "sha512-Z7V5j0cq8oEKyejIKfpD8b4eBy9cwW2JWPk0+fB1HOAMsfHbnAXLLS+PfVWlzMSLQaWttKDt607I0XHmpE67Vw==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "funding": [ { "type": "opencollective", diff --git a/frontend/package-lock.json b/frontend/package-lock.json index c641345c2..79b30b44b 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -26131,9 +26131,9 @@ } }, "node_modules/postcss": { - "version": "8.4.28", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.28.tgz", - "integrity": "sha512-Z7V5j0cq8oEKyejIKfpD8b4eBy9cwW2JWPk0+fB1HOAMsfHbnAXLLS+PfVWlzMSLQaWttKDt607I0XHmpE67Vw==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "funding": [ { "type": "opencollective", From 3eb5d80e982ec36b35c9289d35b66e5fbcd23f9f Mon Sep 17 00:00:00 2001 From: Matthew <106278637+Matthew-Grayson@users.noreply.github.com> Date: Wed, 4 Oct 2023 10:42:29 -0500 Subject: [PATCH 08/42] 2267 s3 buckets should deny non ssl requests (#2269) Set policies for S3 buckets to deny requests not using SSL; affects the following buckets: - crossfeed.cyber.dhs.gov - staging.crossfeed.cyber.dhs.gov - cisa-crossfeed-prod-exports - cisa-crossfeed-prod-logging - cisa-crossfeed-prod-pe-db-backups - cisa-crossfeed-prod-reports - cisa-crossfeed-staging-cloudtrail - cisa-crossfeed-staging-cloudwatch - cisa-crossfeed-staging-exports - cisa-crossfeed-staging-logging - cisa-crossfeed-staging-pe-db-backups - cisa-crossfeed-staging-reports --- infrastructure/cloudtrail_bucket_policy.tpl | 22 +++++++++-- infrastructure/cloudwatch.tf | 17 +++++++- infrastructure/database.tf | 44 +++++++++++++++++++++ infrastructure/frontend_bucket_policy.tpl | 18 ++++++++- infrastructure/main.tf | 20 ++++++++++ infrastructure/worker.tf | 24 +++++++++++ 6 files changed, 139 insertions(+), 6 deletions(-) diff --git a/infrastructure/cloudtrail_bucket_policy.tpl b/infrastructure/cloudtrail_bucket_policy.tpl index 5bbd3dc62..a277ae497 100644 --- a/infrastructure/cloudtrail_bucket_policy.tpl +++ b/infrastructure/cloudtrail_bucket_policy.tpl @@ -2,7 +2,7 @@ "Version": "2012-10-17", "Statement": [ { - "Sid": "AWSCloudTrailAclCheck20150319", + "Sid": "AWSCloudTrailAclCheck20121017", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" @@ -11,18 +11,34 @@ "Resource": ["arn:aws:s3:::${bucketName}"] }, { - "Sid": "AWSCloudTrailWrite20150319", + "Sid": "AWSCloudTrailWrite20121017", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": ["s3:PutObject"], - "Resource": ["arn:aws:s3:::${bucketName}/AWSLogs/${accountId}/*"], + "Resource": [ + "arn:aws:s3:::${bucketName}/AWSLogs/${accountId}", + "arn:aws:s3:::${bucketName}/AWSLogs/${accountId}/*" + ], "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } + }, + { + "Sid": "RequireSSLRequests", + "Effect": "Deny", + "Resource": [ + "arn:aws:s3:::${bucketName}", + "arn:aws:s3:::${bucketName}/*" + ], + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + } } ] } diff --git a/infrastructure/cloudwatch.tf b/infrastructure/cloudwatch.tf index 57df5b3e8..93d8f7ea5 100644 --- a/infrastructure/cloudwatch.tf +++ b/infrastructure/cloudwatch.tf @@ -23,7 +23,7 @@ resource "aws_s3_bucket_policy" "cloudwatch_bucket" { "Version" : "2012-10-17", "Statement" : [ { - "Sid" : "Allow Cloudwatch to check bucket permissions", + "Sid" : "AWSLogDeliveryGetBucketACL", "Effect" : "Allow", "Principal" : { "Service" : "logs.amazonaws.com" @@ -32,7 +32,7 @@ resource "aws_s3_bucket_policy" "cloudwatch_bucket" { "Resource" : aws_s3_bucket.cloudwatch_bucket.arn }, { - "Sid" : "Allow Cloudwatch to write to bucket", + "Sid" : "AWSLogDeliveryWrite", "Effect" : "Allow", "Principal" : { "Service" : "logs.amazonaws.com" @@ -47,6 +47,19 @@ resource "aws_s3_bucket_policy" "cloudwatch_bucket" { "s3:x-amz-acl" : "bucket-owner-full-control" } } + }, + { + "Sid" : "RequireSSLRequests", + "Effect" : "Deny", + "Resource" : [ + aws_s3_bucket.cloudwatch_bucket.arn, + "${aws_s3_bucket.cloudwatch_bucket.arn}/*" + ], + "Condition" : { + "Bool" : { + "aws:SecureTransport" : "false" + } + } } ] }) diff --git a/infrastructure/database.tf b/infrastructure/database.tf index bbdf78339..a2510fd61 100644 --- a/infrastructure/database.tf +++ b/infrastructure/database.tf @@ -268,6 +268,28 @@ resource "aws_s3_bucket" "reports_bucket" { } } +resource "aws_s3_bucket_policy" "reports_bucket" { + bucket = var.reports_bucket_name + policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Sid" : "RequireSSLRequests", + "Effect" : "Deny", + "Resource" : [ + aws_s3_bucket.reports_bucket.arn, + "${aws_s3_bucket.reports_bucket.arn}/*" + ], + "Condition" : { + "Bool" : { + "aws:SecureTransport" : "false" + } + } + } + ] + }) +} + resource "aws_s3_bucket_acl" "reports_bucket" { bucket = aws_s3_bucket.reports_bucket.id acl = "private" @@ -304,6 +326,28 @@ resource "aws_s3_bucket" "pe_db_backups_bucket" { } } +resource "aws_s3_bucket_policy" "pe_db_backups_bucket" { + bucket = aws_s3_bucket.pe_db_backups_bucket.id + policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Sid" : "RequireSSLRequests", + "Effect" : "Deny", + "Resource" : [ + aws_s3_bucket.pe_db_backups_bucket.arn, + "${aws_s3_bucket.pe_db_backups_bucket.arn}/*" + ], + "Condition" : { + "Bool" : { + "aws:SecureTransport" : "false" + } + } + } + ] + }) +} + resource "aws_s3_bucket_acl" "pe_db_backups_bucket" { bucket = aws_s3_bucket.pe_db_backups_bucket.id acl = "private" diff --git a/infrastructure/frontend_bucket_policy.tpl b/infrastructure/frontend_bucket_policy.tpl index 37405793d..f418a8cf0 100644 --- a/infrastructure/frontend_bucket_policy.tpl +++ b/infrastructure/frontend_bucket_policy.tpl @@ -6,7 +6,23 @@ "Effect":"Allow", "Principal": "*", "Action":["s3:GetObject"], - "Resource":["arn:aws:s3:::${bucket_name}/*","arn:aws:s3:::${bucket_name}/*/"] + "Resource":[ + "arn:aws:s3:::${bucket_name}/*", + "arn:aws:s3:::${bucket_name}/*/" + ] + }, + { + "Sid": "RequireSSLRequests", + "Effect": "Deny", + "Resource": [ + "arn:aws:s3:::${bucket_name}", + "arn:aws:s3:::${bucket_name}/*" + ], + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + } } ] } diff --git a/infrastructure/main.tf b/infrastructure/main.tf index d01a621d1..3eb46ccb3 100644 --- a/infrastructure/main.tf +++ b/infrastructure/main.tf @@ -32,6 +32,26 @@ resource "aws_s3_bucket" "logging_bucket" { } } +resource "aws_s3_bucket_policy" "logging_bucket" { + bucket = aws_s3_bucket.logging_bucket.id + policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [{ + "Sid" : "RequireSSLRequests", + "Effect" : "Deny", + "Resource" : [ + aws_s3_bucket.logging_bucket.arn, + "${aws_s3_bucket.logging_bucket.arn}/*" + ], + "Condition" : { + "Bool" : { + "aws:SecureTransport" : "false" + } + } + }] + }) +} + resource "aws_s3_bucket_acl" "logging_bucket" { bucket = aws_s3_bucket.logging_bucket.id acl = "private" diff --git a/infrastructure/worker.tf b/infrastructure/worker.tf index 0ca8fd842..f4943c967 100644 --- a/infrastructure/worker.tf +++ b/infrastructure/worker.tf @@ -351,6 +351,30 @@ resource "aws_s3_bucket" "export_bucket" { } } +resource "aws_s3_bucket_policy" "export_bucket" { + bucket = var.export_bucket_name + policy = jsonencode({ + "Version" = "2012-10-17" + "Statement" = [ + { + Sid = "RequireSSLRequests" + Effect = "Deny" + Principal = "*" + Action = "s3:*" + Resource = [ + aws_s3_bucket.export_bucket.arn, + "${aws_s3_bucket.export_bucket.arn}/*" + ] + Condition = { + Bool = { + "aws:SecureTransport" : false + } + } + } + ] + }) +} + resource "aws_s3_bucket_acl" "export_bucket" { bucket = aws_s3_bucket.export_bucket.id acl = "private" From b9f86c196a80e75ba68dbd00f97b7631eac5a33f Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 4 Oct 2023 12:28:06 -0400 Subject: [PATCH 09/42] Edit accessor permission to be inline instead of a new policy --- backend/src/tasks/functions.yml | 4 ++-- infrastructure/database.tf | 12 +++--------- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/backend/src/tasks/functions.yml b/backend/src/tasks/functions.yml index 8d6b54209..c12b3cfbe 100644 --- a/backend/src/tasks/functions.yml +++ b/backend/src/tasks/functions.yml @@ -31,14 +31,14 @@ makeGlobalAdmin: scanExecution: handler: src/tasks/scanExecution.handler - timeout: 600 # 10 minutes + timeout: 300 # 5 minutes events: - sqs: arn: Fn::GetAtt: - ${file(env.yml):${self:provider.stage}.SQS_QUEUE_NAME, ''} - Arn - batchSize: 5 # Number of messages the lambda can continue to process while a fargate is still running + batchSize: 5 # Number of messages the lambda can continue to process while a Fargate is still running updateScanTaskStatus: handler: src/tasks/updateScanTaskStatus.handler diff --git a/infrastructure/database.tf b/infrastructure/database.tf index bbdf78339..615f930ba 100644 --- a/infrastructure/database.tf +++ b/infrastructure/database.tf @@ -164,9 +164,9 @@ resource "aws_iam_role_policy" "db_accessor_s3_policy" { EOF } -resource "aws_iam_policy" "sqs_send_message_policy" { - name = "ec2-send-sqs-message-${var.stage}" - description = "IAM policy to allow sending messages to SQS queue" +resource "aws_iam_role_policy" "sqs_send_message_policy" { + name_prefix = "ec2-send-sqs-message-${var.stage}" + role = aws_iam_role.db_accessor.id policy = jsonencode({ Version = "2012-10-17", Statement = [ @@ -186,12 +186,6 @@ resource "aws_iam_policy" "sqs_send_message_policy" { }) } -resource "aws_iam_policy_attachment" "db_accessor_3" { - name = "crossfeed-db-accessor-${var.stage}" - roles = [aws_iam_role.db_accessor.name] - policy_arn = aws_iam_policy.sqs_send_message_policy.arn -} - # Lambda and Fargate SSM Parameters resource "aws_ssm_parameter" "lambda_sg_id" { name = var.ssm_lambda_sg From 40a95076272cdd57175b313cf0092510af8d417f Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 4 Oct 2023 12:31:28 -0400 Subject: [PATCH 10/42] Format terraform --- infrastructure/database.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/infrastructure/database.tf b/infrastructure/database.tf index a50c24462..0090c6907 100644 --- a/infrastructure/database.tf +++ b/infrastructure/database.tf @@ -165,8 +165,8 @@ EOF } resource "aws_iam_role_policy" "sqs_send_message_policy" { - name_prefix = "ec2-send-sqs-message-${var.stage}" - role = aws_iam_role.db_accessor.id + name_prefix = "ec2-send-sqs-message-${var.stage}" + role = aws_iam_role.db_accessor.id policy = jsonencode({ Version = "2012-10-17", Statement = [ From 1cc8f38f3fd3e86c4e7f830834eb115ca37cd8db Mon Sep 17 00:00:00 2001 From: aloftus23 <79927030+aloftus23@users.noreply.github.com> Date: Wed, 4 Oct 2023 12:40:10 -0400 Subject: [PATCH 11/42] SQS POC: Add queue and lambda function to trigger scans in fargate *Fix Policy (#2279) * Edit accessor permission to be inline instead of a new policy * Format terraform --- backend/src/tasks/functions.yml | 4 ++-- infrastructure/database.tf | 12 +++--------- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/backend/src/tasks/functions.yml b/backend/src/tasks/functions.yml index 8d6b54209..c12b3cfbe 100644 --- a/backend/src/tasks/functions.yml +++ b/backend/src/tasks/functions.yml @@ -31,14 +31,14 @@ makeGlobalAdmin: scanExecution: handler: src/tasks/scanExecution.handler - timeout: 600 # 10 minutes + timeout: 300 # 5 minutes events: - sqs: arn: Fn::GetAtt: - ${file(env.yml):${self:provider.stage}.SQS_QUEUE_NAME, ''} - Arn - batchSize: 5 # Number of messages the lambda can continue to process while a fargate is still running + batchSize: 5 # Number of messages the lambda can continue to process while a Fargate is still running updateScanTaskStatus: handler: src/tasks/updateScanTaskStatus.handler diff --git a/infrastructure/database.tf b/infrastructure/database.tf index a2510fd61..0090c6907 100644 --- a/infrastructure/database.tf +++ b/infrastructure/database.tf @@ -164,9 +164,9 @@ resource "aws_iam_role_policy" "db_accessor_s3_policy" { EOF } -resource "aws_iam_policy" "sqs_send_message_policy" { - name = "ec2-send-sqs-message-${var.stage}" - description = "IAM policy to allow sending messages to SQS queue" +resource "aws_iam_role_policy" "sqs_send_message_policy" { + name_prefix = "ec2-send-sqs-message-${var.stage}" + role = aws_iam_role.db_accessor.id policy = jsonencode({ Version = "2012-10-17", Statement = [ @@ -186,12 +186,6 @@ resource "aws_iam_policy" "sqs_send_message_policy" { }) } -resource "aws_iam_policy_attachment" "db_accessor_3" { - name = "crossfeed-db-accessor-${var.stage}" - roles = [aws_iam_role.db_accessor.name] - policy_arn = aws_iam_policy.sqs_send_message_policy.arn -} - # Lambda and Fargate SSM Parameters resource "aws_ssm_parameter" "lambda_sg_id" { name = var.ssm_lambda_sg From f5fefefe8e499594f10c724ca56df128f1416899 Mon Sep 17 00:00:00 2001 From: Matthew <106278637+Matthew-Grayson@users.noreply.github.com> Date: Wed, 4 Oct 2023 12:08:53 -0500 Subject: [PATCH 12/42] 2267 s3 buckets should deny non ssl requests (#2280) * Add Principal to RequireSSL policies. --- infrastructure/cloudtrail_bucket_policy.tpl | 1 + infrastructure/cloudwatch.tf | 1 + infrastructure/database.tf | 2 ++ infrastructure/frontend_bucket_policy.tpl | 1 + infrastructure/main.tf | 1 + infrastructure/worker.tf | 18 +++++++++--------- 6 files changed, 15 insertions(+), 9 deletions(-) diff --git a/infrastructure/cloudtrail_bucket_policy.tpl b/infrastructure/cloudtrail_bucket_policy.tpl index a277ae497..15dbc0da6 100644 --- a/infrastructure/cloudtrail_bucket_policy.tpl +++ b/infrastructure/cloudtrail_bucket_policy.tpl @@ -30,6 +30,7 @@ { "Sid": "RequireSSLRequests", "Effect": "Deny", + "Principal": "*", "Resource": [ "arn:aws:s3:::${bucketName}", "arn:aws:s3:::${bucketName}/*" diff --git a/infrastructure/cloudwatch.tf b/infrastructure/cloudwatch.tf index 93d8f7ea5..667236bbe 100644 --- a/infrastructure/cloudwatch.tf +++ b/infrastructure/cloudwatch.tf @@ -51,6 +51,7 @@ resource "aws_s3_bucket_policy" "cloudwatch_bucket" { { "Sid" : "RequireSSLRequests", "Effect" : "Deny", + "Principal" : "*", "Resource" : [ aws_s3_bucket.cloudwatch_bucket.arn, "${aws_s3_bucket.cloudwatch_bucket.arn}/*" diff --git a/infrastructure/database.tf b/infrastructure/database.tf index 0090c6907..e21640eec 100644 --- a/infrastructure/database.tf +++ b/infrastructure/database.tf @@ -270,6 +270,7 @@ resource "aws_s3_bucket_policy" "reports_bucket" { { "Sid" : "RequireSSLRequests", "Effect" : "Deny", + "Principal" : "*", "Resource" : [ aws_s3_bucket.reports_bucket.arn, "${aws_s3_bucket.reports_bucket.arn}/*" @@ -328,6 +329,7 @@ resource "aws_s3_bucket_policy" "pe_db_backups_bucket" { { "Sid" : "RequireSSLRequests", "Effect" : "Deny", + "Principal" : "*", "Resource" : [ aws_s3_bucket.pe_db_backups_bucket.arn, "${aws_s3_bucket.pe_db_backups_bucket.arn}/*" diff --git a/infrastructure/frontend_bucket_policy.tpl b/infrastructure/frontend_bucket_policy.tpl index f418a8cf0..416f198ec 100644 --- a/infrastructure/frontend_bucket_policy.tpl +++ b/infrastructure/frontend_bucket_policy.tpl @@ -14,6 +14,7 @@ { "Sid": "RequireSSLRequests", "Effect": "Deny", + "Principal": "*", "Resource": [ "arn:aws:s3:::${bucket_name}", "arn:aws:s3:::${bucket_name}/*" diff --git a/infrastructure/main.tf b/infrastructure/main.tf index 3eb46ccb3..bfccb4fcb 100644 --- a/infrastructure/main.tf +++ b/infrastructure/main.tf @@ -39,6 +39,7 @@ resource "aws_s3_bucket_policy" "logging_bucket" { "Statement" : [{ "Sid" : "RequireSSLRequests", "Effect" : "Deny", + "Principal" : "*", "Resource" : [ aws_s3_bucket.logging_bucket.arn, "${aws_s3_bucket.logging_bucket.arn}/*" diff --git a/infrastructure/worker.tf b/infrastructure/worker.tf index f4943c967..7ed0d77e8 100644 --- a/infrastructure/worker.tf +++ b/infrastructure/worker.tf @@ -354,19 +354,19 @@ resource "aws_s3_bucket" "export_bucket" { resource "aws_s3_bucket_policy" "export_bucket" { bucket = var.export_bucket_name policy = jsonencode({ - "Version" = "2012-10-17" - "Statement" = [ + "Version" : "2012-10-17" + "Statement" : [ { - Sid = "RequireSSLRequests" - Effect = "Deny" - Principal = "*" - Action = "s3:*" - Resource = [ + Sid : "RequireSSLRequests" + Effect : "Deny" + Principal : "*" + Action : "s3:*" + Resource : [ aws_s3_bucket.export_bucket.arn, "${aws_s3_bucket.export_bucket.arn}/*" ] - Condition = { - Bool = { + Condition : { + Bool : { "aws:SecureTransport" : false } } From fc737a51badc3b260fc3c7f282ffdfc9a6fc21dc Mon Sep 17 00:00:00 2001 From: Matthew <106278637+Matthew-Grayson@users.noreply.github.com> Date: Wed, 4 Oct 2023 12:31:04 -0500 Subject: [PATCH 13/42] 2267 s3 buckets should deny non ssl requests (#2281) * Add 'Action': 's3:*' to RequireSSL permisions. --- infrastructure/cloudtrail_bucket_policy.tpl | 1 + infrastructure/cloudwatch.tf | 1 + infrastructure/database.tf | 2 ++ infrastructure/frontend_bucket_policy.tpl | 1 + infrastructure/main.tf | 1 + infrastructure/worker.tf | 14 +++++++------- 6 files changed, 13 insertions(+), 7 deletions(-) diff --git a/infrastructure/cloudtrail_bucket_policy.tpl b/infrastructure/cloudtrail_bucket_policy.tpl index 15dbc0da6..5067a5c4a 100644 --- a/infrastructure/cloudtrail_bucket_policy.tpl +++ b/infrastructure/cloudtrail_bucket_policy.tpl @@ -29,6 +29,7 @@ }, { "Sid": "RequireSSLRequests", + "Action": "s3:*", "Effect": "Deny", "Principal": "*", "Resource": [ diff --git a/infrastructure/cloudwatch.tf b/infrastructure/cloudwatch.tf index 667236bbe..848f97e93 100644 --- a/infrastructure/cloudwatch.tf +++ b/infrastructure/cloudwatch.tf @@ -50,6 +50,7 @@ resource "aws_s3_bucket_policy" "cloudwatch_bucket" { }, { "Sid" : "RequireSSLRequests", + "Action" : "s3:*", "Effect" : "Deny", "Principal" : "*", "Resource" : [ diff --git a/infrastructure/database.tf b/infrastructure/database.tf index e21640eec..125fc33be 100644 --- a/infrastructure/database.tf +++ b/infrastructure/database.tf @@ -269,6 +269,7 @@ resource "aws_s3_bucket_policy" "reports_bucket" { "Statement" : [ { "Sid" : "RequireSSLRequests", + "Action" : "s3:*", "Effect" : "Deny", "Principal" : "*", "Resource" : [ @@ -328,6 +329,7 @@ resource "aws_s3_bucket_policy" "pe_db_backups_bucket" { "Statement" : [ { "Sid" : "RequireSSLRequests", + "Action" : "s3:*", "Effect" : "Deny", "Principal" : "*", "Resource" : [ diff --git a/infrastructure/frontend_bucket_policy.tpl b/infrastructure/frontend_bucket_policy.tpl index 416f198ec..8beacee63 100644 --- a/infrastructure/frontend_bucket_policy.tpl +++ b/infrastructure/frontend_bucket_policy.tpl @@ -13,6 +13,7 @@ }, { "Sid": "RequireSSLRequests", + "Action": "s3:*", "Effect": "Deny", "Principal": "*", "Resource": [ diff --git a/infrastructure/main.tf b/infrastructure/main.tf index bfccb4fcb..7cacdb573 100644 --- a/infrastructure/main.tf +++ b/infrastructure/main.tf @@ -38,6 +38,7 @@ resource "aws_s3_bucket_policy" "logging_bucket" { "Version" : "2012-10-17", "Statement" : [{ "Sid" : "RequireSSLRequests", + "Action" : "s3:*", "Effect" : "Deny", "Principal" : "*", "Resource" : [ diff --git a/infrastructure/worker.tf b/infrastructure/worker.tf index 7ed0d77e8..e30ae4722 100644 --- a/infrastructure/worker.tf +++ b/infrastructure/worker.tf @@ -357,16 +357,16 @@ resource "aws_s3_bucket_policy" "export_bucket" { "Version" : "2012-10-17" "Statement" : [ { - Sid : "RequireSSLRequests" - Effect : "Deny" - Principal : "*" - Action : "s3:*" - Resource : [ + "Sid" : "RequireSSLRequests" + "Action" : "s3:*", + "Effect" : "Deny" + "Principal" : "*" + "Resource" : [ aws_s3_bucket.export_bucket.arn, "${aws_s3_bucket.export_bucket.arn}/*" ] - Condition : { - Bool : { + "Condition" : { + "Bool" : { "aws:SecureTransport" : false } } From c1794f4c813a74aa9aa4a76dd3e2d56e05458afe Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 4 Oct 2023 13:45:47 -0400 Subject: [PATCH 14/42] Fix SQS in serverless deploy --- backend/env.yml | 4 ++-- backend/src/tasks/functions.yml | 3 +-- infrastructure/prod.tfvars | 4 +--- infrastructure/sqs.tf | 7 +++++++ infrastructure/stage.tfvars | 4 +--- infrastructure/vars.tf | 24 ++++++------------------ 6 files changed, 18 insertions(+), 28 deletions(-) diff --git a/backend/env.yml b/backend/env.yml index ac16d4ddc..9f140e4e5 100644 --- a/backend/env.yml +++ b/backend/env.yml @@ -41,7 +41,7 @@ staging: EXPORT_BUCKET_NAME: cisa-crossfeed-staging-exports PE_API_URL: ${ssm:/crossfeed/staging/PE_API_URL} REPORTS_BUCKET_NAME: cisa-crossfeed-staging-reports - SQS_QUEUE_NAME: crossfeed-staging-worker-queue + SQS_QUEUE_ARN: ${ssm:/crossfeed/staging/SQS_QUEUE_ARN} CLOUDWATCH_BUCKET_NAME: cisa-crossfeed-staging-cloudwatch prod: @@ -78,7 +78,7 @@ prod: EXPORT_BUCKET_NAME: cisa-crossfeed-prod-exports PE_API_URL: ${ssm:/crossfeed/staging/PE_API_URL} REPORTS_BUCKET_NAME: cisa-crossfeed-prod-reports - SQS_QUEUE_NAME: crossfeed-prod-worker-queue + SQS_QUEUE_ARN: ${ssm:/crossfeed/prod/SQS_QUEUE_ARN} CLOUDWATCH_BUCKET_NAME: cisa-crossfeed-prod-cloudwatch dev-vpc: diff --git a/backend/src/tasks/functions.yml b/backend/src/tasks/functions.yml index c12b3cfbe..8a219641f 100644 --- a/backend/src/tasks/functions.yml +++ b/backend/src/tasks/functions.yml @@ -36,8 +36,7 @@ scanExecution: - sqs: arn: Fn::GetAtt: - - ${file(env.yml):${self:provider.stage}.SQS_QUEUE_NAME, ''} - - Arn + - ${self:custom.sqsEnvVar}.${self:provider.stage}.SQS_QUEUE_ARN batchSize: 5 # Number of messages the lambda can continue to process while a Fargate is still running updateScanTaskStatus: diff --git a/infrastructure/prod.tfvars b/infrastructure/prod.tfvars index 3db2dd255..ba9997689 100644 --- a/infrastructure/prod.tfvars +++ b/infrastructure/prod.tfvars @@ -42,12 +42,9 @@ worker_ecs_task_definition_family = "crossfeed-prod-worker" worker_ecs_log_group_name = "crossfeed-prod-worker" worker_ecs_role_name = "crossfeed-prod-worker" logging_bucket_name = "cisa-crossfeed-prod-logging" -cloudtrail_name = "crossfeed-prod-all-events" cloudtrail_bucket_name = "cisa-crossfeed-prod-cloudtrail" cloudtrail_role_name = "cisa-crossfeed-prod-cloudtrail" cloudtrail_log_group_name = "cisa-crossfeed-prod-cloudtrail" -cloudwatch_bucket_name = "cisa-crossfeed-prod-cloudwatch" -cloudwatch_log_group_name = "crossfeed-prod-cloudwatch-bucket" export_bucket_name = "cisa-crossfeed-prod-exports" reports_bucket_name = "cisa-crossfeed-prod-reports" pe_db_backups_bucket_name = "cisa-crossfeed-prod-pe-db-backups" @@ -71,3 +68,4 @@ db_accessor_instance_class = "t3.2xlarge" create_elk_instance = false elk_instance_class = "t3.2xlarge" sqs_queue_name = "crossfeed-prod-worker-queue" +ssm_sqs_queue_arn = "/crossfeed/prod/SQS_QUEUE_ARN" diff --git a/infrastructure/sqs.tf b/infrastructure/sqs.tf index 2a62f8449..a7e182501 100644 --- a/infrastructure/sqs.tf +++ b/infrastructure/sqs.tf @@ -11,4 +11,11 @@ resource "aws_sqs_queue" "terraform_queue" { Project = var.project Stage = var.stage } +} + +resource "aws_ssm_parameter" "sqs_queue_arn" { + name = var.ssm_sqs_queue_arn + description = "ARN of the SQS queue" + type = "String" + value = aws_sqs_queue.terraform_queue.arn } \ No newline at end of file diff --git a/infrastructure/stage.tfvars b/infrastructure/stage.tfvars index 6eac78731..7c6cb4b24 100644 --- a/infrastructure/stage.tfvars +++ b/infrastructure/stage.tfvars @@ -42,12 +42,9 @@ worker_ecs_task_definition_family = "crossfeed-staging-worker" worker_ecs_log_group_name = "crossfeed-staging-worker" worker_ecs_role_name = "crossfeed-staging-worker" logging_bucket_name = "cisa-crossfeed-staging-logging" -cloudtrail_name = "crossfeed-staging-all-events" cloudtrail_bucket_name = "cisa-crossfeed-staging-cloudtrail" cloudtrail_role_name = "cisa-crossfeed-staging-cloudtrail" cloudtrail_log_group_name = "cisa-crossfeed-staging-cloudtrail" -cloudwatch_bucket_name = "cisa-crossfeed-staging-cloudwatch" -cloudwatch_log_group_name = "crossfeed-staging-cloudwatch-bucket" export_bucket_name = "cisa-crossfeed-staging-exports" reports_bucket_name = "cisa-crossfeed-staging-reports" pe_db_backups_bucket_name = "cisa-crossfeed-staging-pe-db-backups" @@ -71,3 +68,4 @@ db_accessor_instance_class = "t3.2xlarge" create_elk_instance = true elk_instance_class = "t3.2xlarge" sqs_queue_name = "crossfeed-staging-worker-queue" +ssm_sqs_queue_arn = "/crossfeed/staging/SQS_QUEUE_ARN" diff --git a/infrastructure/vars.tf b/infrastructure/vars.tf index b80769e84..00b23d750 100644 --- a/infrastructure/vars.tf +++ b/infrastructure/vars.tf @@ -117,6 +117,12 @@ variable "ssm_worker_subnet" { default = "/crossfeed/staging/WORKER_SUBNET_ID" } +variable "ssm_sqs_queue_arn" { + description = "ssm_sqs_queue_arn" + type = string + default = "/crossfeed/staging/SQS_QUEUE_ARN" +} + variable "ssm_worker_arn" { description = "ssm_worker_arn" type = string @@ -262,12 +268,6 @@ variable "logging_bucket_name" { default = "cisa-crossfeed-staging-logging" } -variable "cloudtrail_name" { - description = "cloudtrail_name" - type = string - default = "crossfeed-staging-all-events" -} - variable "cloudtrail_bucket_name" { description = "cloudtrail_bucket_name" type = string @@ -286,18 +286,6 @@ variable "cloudtrail_log_group_name" { default = "crossfeed-staging-cloudtrail-logs" } -variable "cloudwatch_bucket_name" { - description = "cloudwatch_bucket_name" - type = string - default = "cisa-crossfeed-staging-cloudwatch" -} - -variable "cloudwatch_log_group_name" { - description = "cloudwatch_log_group_name" - type = string - default = "crossfeed-staging-cloudwatch-bucket" -} - variable "export_bucket_name" { description = "export_bucket_name" type = string From fcebb6f155fef4a3d2572fc4fb1c7bc219e3465d Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 4 Oct 2023 13:48:08 -0400 Subject: [PATCH 15/42] Run terraform fmt --- infrastructure/sqs.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/infrastructure/sqs.tf b/infrastructure/sqs.tf index a7e182501..b023c4e88 100644 --- a/infrastructure/sqs.tf +++ b/infrastructure/sqs.tf @@ -17,5 +17,5 @@ resource "aws_ssm_parameter" "sqs_queue_arn" { name = var.ssm_sqs_queue_arn description = "ARN of the SQS queue" type = "String" - value = aws_sqs_queue.terraform_queue.arn + value = aws_sqs_queue.terraform_queue.arn } \ No newline at end of file From 0735f20369f9c0f8637f3067a28332addb8f196d Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 4 Oct 2023 13:53:01 -0400 Subject: [PATCH 16/42] Fix merge conflict error --- infrastructure/prod.tfvars | 3 +++ infrastructure/stage.tfvars | 3 +++ infrastructure/vars.tf | 18 ++++++++++++++++++ 3 files changed, 24 insertions(+) diff --git a/infrastructure/prod.tfvars b/infrastructure/prod.tfvars index ba9997689..54de413ad 100644 --- a/infrastructure/prod.tfvars +++ b/infrastructure/prod.tfvars @@ -42,9 +42,12 @@ worker_ecs_task_definition_family = "crossfeed-prod-worker" worker_ecs_log_group_name = "crossfeed-prod-worker" worker_ecs_role_name = "crossfeed-prod-worker" logging_bucket_name = "cisa-crossfeed-prod-logging" +cloudtrail_name = "crossfeed-prod-all-events" cloudtrail_bucket_name = "cisa-crossfeed-prod-cloudtrail" cloudtrail_role_name = "cisa-crossfeed-prod-cloudtrail" cloudtrail_log_group_name = "cisa-crossfeed-prod-cloudtrail" +cloudwatch_bucket_name = "cisa-crossfeed-prod-cloudwatch" +cloudwatch_log_group_name = "crossfeed-prod-cloudwatch-bucket" export_bucket_name = "cisa-crossfeed-prod-exports" reports_bucket_name = "cisa-crossfeed-prod-reports" pe_db_backups_bucket_name = "cisa-crossfeed-prod-pe-db-backups" diff --git a/infrastructure/stage.tfvars b/infrastructure/stage.tfvars index 7c6cb4b24..52d17d558 100644 --- a/infrastructure/stage.tfvars +++ b/infrastructure/stage.tfvars @@ -42,9 +42,12 @@ worker_ecs_task_definition_family = "crossfeed-staging-worker" worker_ecs_log_group_name = "crossfeed-staging-worker" worker_ecs_role_name = "crossfeed-staging-worker" logging_bucket_name = "cisa-crossfeed-staging-logging" +cloudtrail_name = "crossfeed-staging-all-events" cloudtrail_bucket_name = "cisa-crossfeed-staging-cloudtrail" cloudtrail_role_name = "cisa-crossfeed-staging-cloudtrail" cloudtrail_log_group_name = "cisa-crossfeed-staging-cloudtrail" +cloudwatch_bucket_name = "cisa-crossfeed-staging-cloudwatch" +cloudwatch_log_group_name = "crossfeed-staging-cloudwatch-bucket" export_bucket_name = "cisa-crossfeed-staging-exports" reports_bucket_name = "cisa-crossfeed-staging-reports" pe_db_backups_bucket_name = "cisa-crossfeed-staging-pe-db-backups" diff --git a/infrastructure/vars.tf b/infrastructure/vars.tf index 00b23d750..3e4ea5005 100644 --- a/infrastructure/vars.tf +++ b/infrastructure/vars.tf @@ -268,6 +268,12 @@ variable "logging_bucket_name" { default = "cisa-crossfeed-staging-logging" } +variable "cloudtrail_name" { + description = "cloudtrail_name" + type = string + default = "crossfeed-staging-all-events" +} + variable "cloudtrail_bucket_name" { description = "cloudtrail_bucket_name" type = string @@ -286,6 +292,18 @@ variable "cloudtrail_log_group_name" { default = "crossfeed-staging-cloudtrail-logs" } +variable "cloudwatch_bucket_name" { + description = "cloudwatch_bucket_name" + type = string + default = "cisa-crossfeed-staging-cloudwatch" +} + +variable "cloudwatch_log_group_name" { + description = "cloudwatch_log_group_name" + type = string + default = "crossfeed-staging-cloudwatch-bucket" +} + variable "export_bucket_name" { description = "export_bucket_name" type = string From b4169d6a1cbf81d92ff1e662317f212aace2f682 Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 4 Oct 2023 13:59:22 -0400 Subject: [PATCH 17/42] simplify lambda definition --- backend/src/tasks/functions.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/backend/src/tasks/functions.yml b/backend/src/tasks/functions.yml index 8a219641f..afde85895 100644 --- a/backend/src/tasks/functions.yml +++ b/backend/src/tasks/functions.yml @@ -34,9 +34,7 @@ scanExecution: timeout: 300 # 5 minutes events: - sqs: - arn: - Fn::GetAtt: - - ${self:custom.sqsEnvVar}.${self:provider.stage}.SQS_QUEUE_ARN + arn: ${self:custom.sqsEnvVar}.${self:provider.stage}.SQS_QUEUE_ARN batchSize: 5 # Number of messages the lambda can continue to process while a Fargate is still running updateScanTaskStatus: From 380522ff318766077e717735fd13f12a171eab0e Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 4 Oct 2023 14:10:41 -0400 Subject: [PATCH 18/42] fix arn call --- backend/src/tasks/functions.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/src/tasks/functions.yml b/backend/src/tasks/functions.yml index afde85895..9cd391d7b 100644 --- a/backend/src/tasks/functions.yml +++ b/backend/src/tasks/functions.yml @@ -34,7 +34,7 @@ scanExecution: timeout: 300 # 5 minutes events: - sqs: - arn: ${self:custom.sqsEnvVar}.${self:provider.stage}.SQS_QUEUE_ARN + arn: ${file(env.yml):${self:provider.stage}.SQS_QUEUE_ARN batchSize: 5 # Number of messages the lambda can continue to process while a Fargate is still running updateScanTaskStatus: From c8e661911a277bb0d1f998370986c1f4c0395b61 Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 4 Oct 2023 14:18:27 -0400 Subject: [PATCH 19/42] fix typo --- backend/src/tasks/functions.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/src/tasks/functions.yml b/backend/src/tasks/functions.yml index 9cd391d7b..456d6872f 100644 --- a/backend/src/tasks/functions.yml +++ b/backend/src/tasks/functions.yml @@ -34,7 +34,7 @@ scanExecution: timeout: 300 # 5 minutes events: - sqs: - arn: ${file(env.yml):${self:provider.stage}.SQS_QUEUE_ARN + arn: ${file(env.yml):${self:provider.stage}.SQS_QUEUE_ARN, ''} batchSize: 5 # Number of messages the lambda can continue to process while a Fargate is still running updateScanTaskStatus: From f2404bf4ac3640e16e186b5b41ab57edaed06549 Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 4 Oct 2023 14:32:18 -0400 Subject: [PATCH 20/42] fix typo --- backend/src/tasks/functions.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/backend/src/tasks/functions.yml b/backend/src/tasks/functions.yml index 456d6872f..2208ea57b 100644 --- a/backend/src/tasks/functions.yml +++ b/backend/src/tasks/functions.yml @@ -34,7 +34,8 @@ scanExecution: timeout: 300 # 5 minutes events: - sqs: - arn: ${file(env.yml):${self:provider.stage}.SQS_QUEUE_ARN, ''} + arn: + - ${file(env.yml):${self:provider.stage}.SQS_QUEUE_ARN, ''} batchSize: 5 # Number of messages the lambda can continue to process while a Fargate is still running updateScanTaskStatus: From 6df8f35bbccb90d2a39187bfc484322af258fb8f Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 4 Oct 2023 15:03:39 -0400 Subject: [PATCH 21/42] fix sqs arn definition --- backend/src/tasks/functions.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/src/tasks/functions.yml b/backend/src/tasks/functions.yml index 2208ea57b..538241ad4 100644 --- a/backend/src/tasks/functions.yml +++ b/backend/src/tasks/functions.yml @@ -35,7 +35,7 @@ scanExecution: events: - sqs: arn: - - ${file(env.yml):${self:provider.stage}.SQS_QUEUE_ARN, ''} + Fn::ImportValue: ${file(env.yml):${self:provider.stage}.SQS_QUEUE_ARN, ''} batchSize: 5 # Number of messages the lambda can continue to process while a Fargate is still running updateScanTaskStatus: From 3f3316afbe27b0f44dedf588fa8ae4319f4291ea Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 4 Oct 2023 15:18:02 -0400 Subject: [PATCH 22/42] Create the queue in Serverless instead of terraform --- backend/env.yml | 2 -- backend/serverless.yml | 7 +++++++ backend/src/tasks/functions.yml | 4 +++- infrastructure/prod.tfvars | 2 -- infrastructure/sqs.tf | 21 --------------------- infrastructure/stage.tfvars | 2 -- infrastructure/vars.tf | 13 ------------- 7 files changed, 10 insertions(+), 41 deletions(-) delete mode 100644 infrastructure/sqs.tf diff --git a/backend/env.yml b/backend/env.yml index 9f140e4e5..4a922725a 100644 --- a/backend/env.yml +++ b/backend/env.yml @@ -41,7 +41,6 @@ staging: EXPORT_BUCKET_NAME: cisa-crossfeed-staging-exports PE_API_URL: ${ssm:/crossfeed/staging/PE_API_URL} REPORTS_BUCKET_NAME: cisa-crossfeed-staging-reports - SQS_QUEUE_ARN: ${ssm:/crossfeed/staging/SQS_QUEUE_ARN} CLOUDWATCH_BUCKET_NAME: cisa-crossfeed-staging-cloudwatch prod: @@ -78,7 +77,6 @@ prod: EXPORT_BUCKET_NAME: cisa-crossfeed-prod-exports PE_API_URL: ${ssm:/crossfeed/staging/PE_API_URL} REPORTS_BUCKET_NAME: cisa-crossfeed-prod-reports - SQS_QUEUE_ARN: ${ssm:/crossfeed/prod/SQS_QUEUE_ARN} CLOUDWATCH_BUCKET_NAME: cisa-crossfeed-prod-cloudwatch dev-vpc: diff --git a/backend/serverless.yml b/backend/serverless.yml index 0524e0af4..c8e4f94d3 100644 --- a/backend/serverless.yml +++ b/backend/serverless.yml @@ -79,6 +79,13 @@ provider: - logs:StopLiveTail Resource: '*' +resources: + Resources: + MySQSQueue: + Type: AWS::SQS::Queue + Properties: + QueueName: ${self:provider.stage}-worker-queue + functions: - ${file(./src/tasks/functions.yml)} - ${file(./src/api/functions.yml)} diff --git a/backend/src/tasks/functions.yml b/backend/src/tasks/functions.yml index 538241ad4..cc373c320 100644 --- a/backend/src/tasks/functions.yml +++ b/backend/src/tasks/functions.yml @@ -35,7 +35,9 @@ scanExecution: events: - sqs: arn: - Fn::ImportValue: ${file(env.yml):${self:provider.stage}.SQS_QUEUE_ARN, ''} + Fn::GetAtt: + - ${self:provider.stage}-worker-queue + - Arn batchSize: 5 # Number of messages the lambda can continue to process while a Fargate is still running updateScanTaskStatus: diff --git a/infrastructure/prod.tfvars b/infrastructure/prod.tfvars index 54de413ad..db987357d 100644 --- a/infrastructure/prod.tfvars +++ b/infrastructure/prod.tfvars @@ -70,5 +70,3 @@ create_db_accessor_instance = true db_accessor_instance_class = "t3.2xlarge" create_elk_instance = false elk_instance_class = "t3.2xlarge" -sqs_queue_name = "crossfeed-prod-worker-queue" -ssm_sqs_queue_arn = "/crossfeed/prod/SQS_QUEUE_ARN" diff --git a/infrastructure/sqs.tf b/infrastructure/sqs.tf deleted file mode 100644 index b023c4e88..000000000 --- a/infrastructure/sqs.tf +++ /dev/null @@ -1,21 +0,0 @@ - -# SQS Queue -resource "aws_sqs_queue" "terraform_queue" { - name = var.sqs_queue_name - delay_seconds = 90 - max_message_size = 262144 - message_retention_seconds = 345600 # 4 days - receive_wait_time_seconds = 10 - - tags = { - Project = var.project - Stage = var.stage - } -} - -resource "aws_ssm_parameter" "sqs_queue_arn" { - name = var.ssm_sqs_queue_arn - description = "ARN of the SQS queue" - type = "String" - value = aws_sqs_queue.terraform_queue.arn -} \ No newline at end of file diff --git a/infrastructure/stage.tfvars b/infrastructure/stage.tfvars index 52d17d558..a0a05cef0 100644 --- a/infrastructure/stage.tfvars +++ b/infrastructure/stage.tfvars @@ -70,5 +70,3 @@ create_db_accessor_instance = true db_accessor_instance_class = "t3.2xlarge" create_elk_instance = true elk_instance_class = "t3.2xlarge" -sqs_queue_name = "crossfeed-staging-worker-queue" -ssm_sqs_queue_arn = "/crossfeed/staging/SQS_QUEUE_ARN" diff --git a/infrastructure/vars.tf b/infrastructure/vars.tf index 3e4ea5005..c3d05237b 100644 --- a/infrastructure/vars.tf +++ b/infrastructure/vars.tf @@ -117,12 +117,6 @@ variable "ssm_worker_subnet" { default = "/crossfeed/staging/WORKER_SUBNET_ID" } -variable "ssm_sqs_queue_arn" { - description = "ssm_sqs_queue_arn" - type = string - default = "/crossfeed/staging/SQS_QUEUE_ARN" -} - variable "ssm_worker_arn" { description = "ssm_worker_arn" type = string @@ -435,10 +429,3 @@ variable "create_elk_instance" { type = bool default = false } - -variable "sqs_queue_name" { - description = "sqs_queue_name" - type = string - default = "crossfeed-staging-worker-queue" -} - From a06a90623b6a1f4342f8f1f81800937c474e6a01 Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 4 Oct 2023 15:20:12 -0400 Subject: [PATCH 23/42] fix sqs policy for db accessor ec2 --- infrastructure/database.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/infrastructure/database.tf b/infrastructure/database.tf index 125fc33be..fd516fb92 100644 --- a/infrastructure/database.tf +++ b/infrastructure/database.tf @@ -180,7 +180,7 @@ resource "aws_iam_role_policy" "sqs_send_message_policy" { "sqs:GetQueueUrl" ], Effect = "Allow", - Resource = aws_sqs_queue.terraform_queue.arn + Resource = "*" } ] }) From 3d1733bdc6813da98dfb4467f94e2cf117c48d4d Mon Sep 17 00:00:00 2001 From: aloftus23 <79927030+aloftus23@users.noreply.github.com> Date: Wed, 4 Oct 2023 15:35:21 -0400 Subject: [PATCH 24/42] Move SQS to Serverless to fix lambda mapping and deploy (#2282) * Create the queue in Serverless instead of terraform * fix sqs policy for db accessor ec2 --- backend/env.yml | 2 -- backend/serverless.yml | 7 +++++++ backend/src/tasks/functions.yml | 2 +- infrastructure/database.tf | 2 +- infrastructure/prod.tfvars | 1 - infrastructure/sqs.tf | 14 -------------- infrastructure/stage.tfvars | 1 - infrastructure/vars.tf | 7 ------- 8 files changed, 9 insertions(+), 27 deletions(-) delete mode 100644 infrastructure/sqs.tf diff --git a/backend/env.yml b/backend/env.yml index ac16d4ddc..4a922725a 100644 --- a/backend/env.yml +++ b/backend/env.yml @@ -41,7 +41,6 @@ staging: EXPORT_BUCKET_NAME: cisa-crossfeed-staging-exports PE_API_URL: ${ssm:/crossfeed/staging/PE_API_URL} REPORTS_BUCKET_NAME: cisa-crossfeed-staging-reports - SQS_QUEUE_NAME: crossfeed-staging-worker-queue CLOUDWATCH_BUCKET_NAME: cisa-crossfeed-staging-cloudwatch prod: @@ -78,7 +77,6 @@ prod: EXPORT_BUCKET_NAME: cisa-crossfeed-prod-exports PE_API_URL: ${ssm:/crossfeed/staging/PE_API_URL} REPORTS_BUCKET_NAME: cisa-crossfeed-prod-reports - SQS_QUEUE_NAME: crossfeed-prod-worker-queue CLOUDWATCH_BUCKET_NAME: cisa-crossfeed-prod-cloudwatch dev-vpc: diff --git a/backend/serverless.yml b/backend/serverless.yml index 0524e0af4..c8e4f94d3 100644 --- a/backend/serverless.yml +++ b/backend/serverless.yml @@ -79,6 +79,13 @@ provider: - logs:StopLiveTail Resource: '*' +resources: + Resources: + MySQSQueue: + Type: AWS::SQS::Queue + Properties: + QueueName: ${self:provider.stage}-worker-queue + functions: - ${file(./src/tasks/functions.yml)} - ${file(./src/api/functions.yml)} diff --git a/backend/src/tasks/functions.yml b/backend/src/tasks/functions.yml index c12b3cfbe..cc373c320 100644 --- a/backend/src/tasks/functions.yml +++ b/backend/src/tasks/functions.yml @@ -36,7 +36,7 @@ scanExecution: - sqs: arn: Fn::GetAtt: - - ${file(env.yml):${self:provider.stage}.SQS_QUEUE_NAME, ''} + - ${self:provider.stage}-worker-queue - Arn batchSize: 5 # Number of messages the lambda can continue to process while a Fargate is still running diff --git a/infrastructure/database.tf b/infrastructure/database.tf index 125fc33be..fd516fb92 100644 --- a/infrastructure/database.tf +++ b/infrastructure/database.tf @@ -180,7 +180,7 @@ resource "aws_iam_role_policy" "sqs_send_message_policy" { "sqs:GetQueueUrl" ], Effect = "Allow", - Resource = aws_sqs_queue.terraform_queue.arn + Resource = "*" } ] }) diff --git a/infrastructure/prod.tfvars b/infrastructure/prod.tfvars index 3db2dd255..db987357d 100644 --- a/infrastructure/prod.tfvars +++ b/infrastructure/prod.tfvars @@ -70,4 +70,3 @@ create_db_accessor_instance = true db_accessor_instance_class = "t3.2xlarge" create_elk_instance = false elk_instance_class = "t3.2xlarge" -sqs_queue_name = "crossfeed-prod-worker-queue" diff --git a/infrastructure/sqs.tf b/infrastructure/sqs.tf deleted file mode 100644 index 2a62f8449..000000000 --- a/infrastructure/sqs.tf +++ /dev/null @@ -1,14 +0,0 @@ - -# SQS Queue -resource "aws_sqs_queue" "terraform_queue" { - name = var.sqs_queue_name - delay_seconds = 90 - max_message_size = 262144 - message_retention_seconds = 345600 # 4 days - receive_wait_time_seconds = 10 - - tags = { - Project = var.project - Stage = var.stage - } -} \ No newline at end of file diff --git a/infrastructure/stage.tfvars b/infrastructure/stage.tfvars index 6eac78731..a0a05cef0 100644 --- a/infrastructure/stage.tfvars +++ b/infrastructure/stage.tfvars @@ -70,4 +70,3 @@ create_db_accessor_instance = true db_accessor_instance_class = "t3.2xlarge" create_elk_instance = true elk_instance_class = "t3.2xlarge" -sqs_queue_name = "crossfeed-staging-worker-queue" diff --git a/infrastructure/vars.tf b/infrastructure/vars.tf index b80769e84..c3d05237b 100644 --- a/infrastructure/vars.tf +++ b/infrastructure/vars.tf @@ -429,10 +429,3 @@ variable "create_elk_instance" { type = bool default = false } - -variable "sqs_queue_name" { - description = "sqs_queue_name" - type = string - default = "crossfeed-staging-worker-queue" -} - From 0c6913bf05df02ce49df606b75d911e9bfa114ef Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 4 Oct 2023 15:57:14 -0400 Subject: [PATCH 25/42] Fix reference to SQS queue --- backend/serverless.yml | 2 +- backend/src/tasks/functions.yml | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/backend/serverless.yml b/backend/serverless.yml index c8e4f94d3..dcd57fd65 100644 --- a/backend/serverless.yml +++ b/backend/serverless.yml @@ -81,7 +81,7 @@ provider: resources: Resources: - MySQSQueue: + WorkerQueue: Type: AWS::SQS::Queue Properties: QueueName: ${self:provider.stage}-worker-queue diff --git a/backend/src/tasks/functions.yml b/backend/src/tasks/functions.yml index cc373c320..504dd17c1 100644 --- a/backend/src/tasks/functions.yml +++ b/backend/src/tasks/functions.yml @@ -32,11 +32,13 @@ makeGlobalAdmin: scanExecution: handler: src/tasks/scanExecution.handler timeout: 300 # 5 minutes + environment: + SQS_QUEUE_NAME: ${self:provider.stage}-worker-queue events: - sqs: arn: Fn::GetAtt: - - ${self:provider.stage}-worker-queue + - WorkerQueue - Arn batchSize: 5 # Number of messages the lambda can continue to process while a Fargate is still running From acecca14bd29c184e44ce891cff1923e6d657879 Mon Sep 17 00:00:00 2001 From: aloftus23 <79927030+aloftus23@users.noreply.github.com> Date: Wed, 4 Oct 2023 16:15:17 -0400 Subject: [PATCH 26/42] Fix SQS/Lambda Mapping (#2285) * Fix reference to SQS queue --- backend/serverless.yml | 2 +- backend/src/tasks/functions.yml | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/backend/serverless.yml b/backend/serverless.yml index c8e4f94d3..dcd57fd65 100644 --- a/backend/serverless.yml +++ b/backend/serverless.yml @@ -81,7 +81,7 @@ provider: resources: Resources: - MySQSQueue: + WorkerQueue: Type: AWS::SQS::Queue Properties: QueueName: ${self:provider.stage}-worker-queue diff --git a/backend/src/tasks/functions.yml b/backend/src/tasks/functions.yml index cc373c320..504dd17c1 100644 --- a/backend/src/tasks/functions.yml +++ b/backend/src/tasks/functions.yml @@ -32,11 +32,13 @@ makeGlobalAdmin: scanExecution: handler: src/tasks/scanExecution.handler timeout: 300 # 5 minutes + environment: + SQS_QUEUE_NAME: ${self:provider.stage}-worker-queue events: - sqs: arn: Fn::GetAtt: - - ${self:provider.stage}-worker-queue + - WorkerQueue - Arn batchSize: 5 # Number of messages the lambda can continue to process while a Fargate is still running From 651d46e830ddef957923c044edf88d857b16c28f Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 4 Oct 2023 16:33:28 -0400 Subject: [PATCH 27/42] Add visibility timeout to SQS --- backend/src/tasks/functions.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/backend/src/tasks/functions.yml b/backend/src/tasks/functions.yml index 504dd17c1..be638ff86 100644 --- a/backend/src/tasks/functions.yml +++ b/backend/src/tasks/functions.yml @@ -41,6 +41,7 @@ scanExecution: - WorkerQueue - Arn batchSize: 5 # Number of messages the lambda can continue to process while a Fargate is still running + visibilityTimeout: 300 # Should match or exceed function timeout updateScanTaskStatus: handler: src/tasks/updateScanTaskStatus.handler From ede6b97272175089a4ad4fffe976f85820f97773 Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 4 Oct 2023 16:49:11 -0400 Subject: [PATCH 28/42] Add other properties --- backend/serverless.yml | 3 +++ backend/src/tasks/functions.yml | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/backend/serverless.yml b/backend/serverless.yml index dcd57fd65..defa5aa9f 100644 --- a/backend/serverless.yml +++ b/backend/serverless.yml @@ -85,6 +85,9 @@ resources: Type: AWS::SQS::Queue Properties: QueueName: ${self:provider.stage}-worker-queue + VisibilityTimeout: 300 # Should match or exceed function timeout + MaximumMessageSize: 262144 # 256 KB + MessageRetentionPeriod: 604800 # 7 days functions: - ${file(./src/tasks/functions.yml)} diff --git a/backend/src/tasks/functions.yml b/backend/src/tasks/functions.yml index be638ff86..504dd17c1 100644 --- a/backend/src/tasks/functions.yml +++ b/backend/src/tasks/functions.yml @@ -41,7 +41,6 @@ scanExecution: - WorkerQueue - Arn batchSize: 5 # Number of messages the lambda can continue to process while a Fargate is still running - visibilityTimeout: 300 # Should match or exceed function timeout updateScanTaskStatus: handler: src/tasks/updateScanTaskStatus.handler From e5f0654a3ba2afea0c68d1c3274ba95708917f97 Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Thu, 5 Oct 2023 10:19:58 -0400 Subject: [PATCH 29/42] Add handler and more logging to SQS lambda --- backend/src/tasks/scanExecution.ts | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/backend/src/tasks/scanExecution.ts b/backend/src/tasks/scanExecution.ts index ded5194b9..c3d959984 100644 --- a/backend/src/tasks/scanExecution.ts +++ b/backend/src/tasks/scanExecution.ts @@ -1,14 +1,17 @@ -import { SQSEvent, SQSRecord } from 'aws-lambda'; +import { Handler, SQSRecord } from 'aws-lambda'; import * as AWS from 'aws-sdk'; const ecs = new AWS.ECS(); const sqs = new AWS.SQS(); -export const invokeFargateTask = async (event: SQSEvent): Promise => { +export const handler: Handler = async (event) => { try { + // Get the SQS record and message body const sqsRecord: SQSRecord = event.Records[0]; const commandOptions: string = sqsRecord.body; + console.log(commandOptions); + // Get the ARN of the SQS queue from the event const sqsQueueArn: string | undefined = sqsRecord.eventSourceARN; @@ -24,11 +27,12 @@ export const invokeFargateTask = async (event: SQSEvent): Promise => { .getQueueAttributes(sqsQueue) .promise(); const sqsQueueUrl = queueAttributesResponse.Attributes?.QueueUrl; - + console.log(sqsQueueUrl); if (!sqsQueueUrl) { throw new Error('SQS Queue URL not found'); } + // Run command in queue message in Fargate const params: AWS.ECS.RunTaskRequest = { cluster: process.env.FARGATE_CLUSTER_NAME!, taskDefinition: process.env.FARGATE_TASK_DEFINITION_NAME!, @@ -50,7 +54,6 @@ export const invokeFargateTask = async (event: SQSEvent): Promise => { ] } }; - const data = await ecs.runTask(params).promise(); console.log('Fargate task started:', data); From e28e8725979b5c53235a80cf3d0037d8c3293ebf Mon Sep 17 00:00:00 2001 From: Matthew <106278637+Matthew-Grayson@users.noreply.github.com> Date: Thu, 5 Oct 2023 09:44:25 -0500 Subject: [PATCH 30/42] Add additional logging to cloudWatchToS3 lambda for troubleshooting. (#2272) * Add additional logging to cloudWatchToS3 lambda for troubleshooting. * Add permissions to lambda role to access ssm parameters and bucket acls. * Add resource to lambda role permissions. --- backend/serverless.yml | 10 ++++++++++ backend/src/tasks/cloudwatchToS3.ts | 11 ++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/backend/serverless.yml b/backend/serverless.yml index defa5aa9f..4f8a3f317 100644 --- a/backend/serverless.yml +++ b/backend/serverless.yml @@ -56,6 +56,8 @@ provider: - s3:GetObjectAcl - s3:PutObject - s3:PutObjectAcl + - s3:PutBucketAcl + - s3:GetBucketAcl Resource: '*' - Effect: Allow Action: @@ -78,6 +80,14 @@ provider: - logs:StartLiveTail - logs:StopLiveTail Resource: '*' + - Effect: Allow + Action: + - ssm:DescribeParameters + - ssm:GetParameter + - ssm:GetParameters + - ssm:GetParametersByPath + - ssm:PutParameter + Resource: '*' resources: Resources: diff --git a/backend/src/tasks/cloudwatchToS3.ts b/backend/src/tasks/cloudwatchToS3.ts index 7c106cb36..7364cecb2 100644 --- a/backend/src/tasks/cloudwatchToS3.ts +++ b/backend/src/tasks/cloudwatchToS3.ts @@ -34,8 +34,9 @@ export const handler = async () => { while (true) { const response = await logs.send(new DescribeLogGroupsCommand(extra_args)); + console.log(`response: ${JSON.stringify(response)}`); log_groups = log_groups.concat(response.logGroups!); - + console.log(`log_groups: ${JSON.stringify(log_groups)}`); if (!response.nextToken) { break; } @@ -46,16 +47,23 @@ export const handler = async () => { const command = new ListTagsForResourceCommand({ resourceArn: `arn:aws:logs:${region}:${accountId}:log-group:${log_group.logGroupName}` }); + console.log(`Processing log group: ${log_group.logGroupName}`); + console.log(`command: ${JSON.stringify(command)}`); const response = await logs.send(command); + console.log(`log group response: ${JSON.stringify(response)}`); const log_group_tags = response.tags || {}; if (log_group_tags.ExportToS3 === 'true') { log_groups_to_export.push(log_group.logGroupName!); } + console.log( + `log_groups_to_export: ${JSON.stringify(log_groups_to_export)}` + ); await delay(10 * 1000); // prevents LimitExceededException (AWS allows only one export task at a time) } for (const log_group_name of log_groups_to_export) { + console.log('Processing log group: ' + log_group_name); const ssm_parameter_name = ( '/log-exporter-last-export/' + log_group_name ).replace('//', '/'); @@ -70,6 +78,7 @@ export const handler = async () => { if (error.name !== 'ParameterNotFound') { console.error('Error fetching SSM parameter: ' + error.message); } + console.error(`error: ${error.message}`); } const export_to_time = Math.round(Date.now()); From 1e5853eb17fb76b6cfe459427dab1af0bc84579f Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Thu, 5 Oct 2023 12:37:30 -0400 Subject: [PATCH 31/42] Create test for scanExecution lambda --- backend/env.yml | 2 + backend/src/tasks/scanExecution.ts | 18 +-------- backend/src/tasks/test/scanExecution.test.ts | 39 ++++++++++++++++++++ 3 files changed, 43 insertions(+), 16 deletions(-) create mode 100644 backend/src/tasks/test/scanExecution.test.ts diff --git a/backend/env.yml b/backend/env.yml index 4a922725a..851e241ac 100644 --- a/backend/env.yml +++ b/backend/env.yml @@ -42,6 +42,7 @@ staging: PE_API_URL: ${ssm:/crossfeed/staging/PE_API_URL} REPORTS_BUCKET_NAME: cisa-crossfeed-staging-reports CLOUDWATCH_BUCKET_NAME: cisa-crossfeed-staging-cloudwatch + SQS_QUEUE_URL: { Ref: WorkerQueue } prod: DB_DIALECT: 'postgres' @@ -78,6 +79,7 @@ prod: PE_API_URL: ${ssm:/crossfeed/staging/PE_API_URL} REPORTS_BUCKET_NAME: cisa-crossfeed-prod-reports CLOUDWATCH_BUCKET_NAME: cisa-crossfeed-prod-cloudwatch + SQS_QUEUE_URL: { Ref: WorkerQueue } dev-vpc: securityGroupIds: diff --git a/backend/src/tasks/scanExecution.ts b/backend/src/tasks/scanExecution.ts index c3d959984..658a447d0 100644 --- a/backend/src/tasks/scanExecution.ts +++ b/backend/src/tasks/scanExecution.ts @@ -12,23 +12,9 @@ export const handler: Handler = async (event) => { console.log(commandOptions); - // Get the ARN of the SQS queue from the event - const sqsQueueArn: string | undefined = sqsRecord.eventSourceARN; - - if (!sqsQueueArn) { - throw new Error('SQS Queue ARN not found in event'); - } - - // Describe the SQS queue to get its URL - const sqsQueue = { - QueueUrl: sqsQueueArn // Use the ARN as the QueueUrl - }; - const queueAttributesResponse = await sqs - .getQueueAttributes(sqsQueue) - .promise(); - const sqsQueueUrl = queueAttributesResponse.Attributes?.QueueUrl; + const sqsQueueUrl = process.env.SQS_QUEUE_URL!; console.log(sqsQueueUrl); - if (!sqsQueueUrl) { + if (sqsQueueUrl) { throw new Error('SQS Queue URL not found'); } diff --git a/backend/src/tasks/test/scanExecution.test.ts b/backend/src/tasks/test/scanExecution.test.ts new file mode 100644 index 000000000..ea4942d33 --- /dev/null +++ b/backend/src/tasks/test/scanExecution.test.ts @@ -0,0 +1,39 @@ +import { handler } from '../scanExecution'; +import { SQSRecord } from 'aws-lambda'; + +// Mock the AWS SDK methods using aws-sdk-mock +jest.mock('aws-sdk', () => { + return { + ECS: jest.fn(() => ({ + runTask: jest.fn().mockReturnThis(), + promise: jest.fn() + })), + SQS: jest.fn(() => ({ + sendMessage: jest.fn().mockReturnThis(), + promise: jest.fn() + })) + }; +}); + +describe('Scan Execution', () => { + it('should handle the event', async () => { + const event = { + Records: [ + { + body: 'test command', + eventSourceARN: 'YourSQSQueueARN' + } as SQSRecord + ] + }; + + const context = {} as any; + const callback = () => void 0; + const result = await handler(event, context, callback); + + // Add your assertions here + expect(result.statusCode).toEqual(200); + expect(result.body).toContain( + 'Fargate task started and message sent to SQS queue' + ); + }); +}); From b30bc630362aa84d02795f79a703ae4188319961 Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Thu, 5 Oct 2023 12:59:05 -0400 Subject: [PATCH 32/42] Fix formatting --- backend/src/tasks/test/scanExecution.test.ts | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/backend/src/tasks/test/scanExecution.test.ts b/backend/src/tasks/test/scanExecution.test.ts index ea4942d33..d6dc10a27 100644 --- a/backend/src/tasks/test/scanExecution.test.ts +++ b/backend/src/tasks/test/scanExecution.test.ts @@ -21,14 +21,12 @@ describe('Scan Execution', () => { Records: [ { body: 'test command', - eventSourceARN: 'YourSQSQueueARN' + eventSourceARN: 'SQSQueueARN' } as SQSRecord ] }; - const context = {} as any; - const callback = () => void 0; - const result = await handler(event, context, callback); + const result = await handler(event, {} as any, () => void 0); // Add your assertions here expect(result.statusCode).toEqual(200); From 5dfb3e69be881d9bdb16b8e1eed7dc581fa6e664 Mon Sep 17 00:00:00 2001 From: aloftus23 <79927030+aloftus23@users.noreply.github.com> Date: Thu, 5 Oct 2023 13:41:13 -0400 Subject: [PATCH 33/42] Create test for scanExecution lambda (#2289) * Create test for scanExecution lambda * Fix formatting --- backend/env.yml | 2 ++ backend/src/tasks/scanExecution.ts | 18 ++-------- backend/src/tasks/test/scanExecution.test.ts | 37 ++++++++++++++++++++ 3 files changed, 41 insertions(+), 16 deletions(-) create mode 100644 backend/src/tasks/test/scanExecution.test.ts diff --git a/backend/env.yml b/backend/env.yml index 4a922725a..851e241ac 100644 --- a/backend/env.yml +++ b/backend/env.yml @@ -42,6 +42,7 @@ staging: PE_API_URL: ${ssm:/crossfeed/staging/PE_API_URL} REPORTS_BUCKET_NAME: cisa-crossfeed-staging-reports CLOUDWATCH_BUCKET_NAME: cisa-crossfeed-staging-cloudwatch + SQS_QUEUE_URL: { Ref: WorkerQueue } prod: DB_DIALECT: 'postgres' @@ -78,6 +79,7 @@ prod: PE_API_URL: ${ssm:/crossfeed/staging/PE_API_URL} REPORTS_BUCKET_NAME: cisa-crossfeed-prod-reports CLOUDWATCH_BUCKET_NAME: cisa-crossfeed-prod-cloudwatch + SQS_QUEUE_URL: { Ref: WorkerQueue } dev-vpc: securityGroupIds: diff --git a/backend/src/tasks/scanExecution.ts b/backend/src/tasks/scanExecution.ts index c3d959984..658a447d0 100644 --- a/backend/src/tasks/scanExecution.ts +++ b/backend/src/tasks/scanExecution.ts @@ -12,23 +12,9 @@ export const handler: Handler = async (event) => { console.log(commandOptions); - // Get the ARN of the SQS queue from the event - const sqsQueueArn: string | undefined = sqsRecord.eventSourceARN; - - if (!sqsQueueArn) { - throw new Error('SQS Queue ARN not found in event'); - } - - // Describe the SQS queue to get its URL - const sqsQueue = { - QueueUrl: sqsQueueArn // Use the ARN as the QueueUrl - }; - const queueAttributesResponse = await sqs - .getQueueAttributes(sqsQueue) - .promise(); - const sqsQueueUrl = queueAttributesResponse.Attributes?.QueueUrl; + const sqsQueueUrl = process.env.SQS_QUEUE_URL!; console.log(sqsQueueUrl); - if (!sqsQueueUrl) { + if (sqsQueueUrl) { throw new Error('SQS Queue URL not found'); } diff --git a/backend/src/tasks/test/scanExecution.test.ts b/backend/src/tasks/test/scanExecution.test.ts new file mode 100644 index 000000000..d6dc10a27 --- /dev/null +++ b/backend/src/tasks/test/scanExecution.test.ts @@ -0,0 +1,37 @@ +import { handler } from '../scanExecution'; +import { SQSRecord } from 'aws-lambda'; + +// Mock the AWS SDK methods using aws-sdk-mock +jest.mock('aws-sdk', () => { + return { + ECS: jest.fn(() => ({ + runTask: jest.fn().mockReturnThis(), + promise: jest.fn() + })), + SQS: jest.fn(() => ({ + sendMessage: jest.fn().mockReturnThis(), + promise: jest.fn() + })) + }; +}); + +describe('Scan Execution', () => { + it('should handle the event', async () => { + const event = { + Records: [ + { + body: 'test command', + eventSourceARN: 'SQSQueueARN' + } as SQSRecord + ] + }; + + const result = await handler(event, {} as any, () => void 0); + + // Add your assertions here + expect(result.statusCode).toEqual(200); + expect(result.body).toContain( + 'Fargate task started and message sent to SQS queue' + ); + }); +}); From 8fd67880c225b034cc2dcfabb54e60e85905b36d Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Thu, 5 Oct 2023 15:03:47 -0400 Subject: [PATCH 34/42] Fix Lambda function throw error --- backend/src/tasks/scanExecution.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/src/tasks/scanExecution.ts b/backend/src/tasks/scanExecution.ts index 658a447d0..163d9e6d6 100644 --- a/backend/src/tasks/scanExecution.ts +++ b/backend/src/tasks/scanExecution.ts @@ -14,7 +14,7 @@ export const handler: Handler = async (event) => { const sqsQueueUrl = process.env.SQS_QUEUE_URL!; console.log(sqsQueueUrl); - if (sqsQueueUrl) { + if (!sqsQueueUrl) { throw new Error('SQS Queue URL not found'); } From bbf81aae793dcb036254f951473931a32b8e9d3b Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Thu, 5 Oct 2023 15:09:25 -0400 Subject: [PATCH 35/42] Fix throw error in SQS lambda --- backend/src/tasks/scanExecution.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/src/tasks/scanExecution.ts b/backend/src/tasks/scanExecution.ts index 658a447d0..163d9e6d6 100644 --- a/backend/src/tasks/scanExecution.ts +++ b/backend/src/tasks/scanExecution.ts @@ -14,7 +14,7 @@ export const handler: Handler = async (event) => { const sqsQueueUrl = process.env.SQS_QUEUE_URL!; console.log(sqsQueueUrl); - if (sqsQueueUrl) { + if (!sqsQueueUrl) { throw new Error('SQS Queue URL not found'); } From a98421215001c6e3e6c859d23e4fb7a92423d339 Mon Sep 17 00:00:00 2001 From: aloftus23 <79927030+aloftus23@users.noreply.github.com> Date: Thu, 5 Oct 2023 15:10:40 -0400 Subject: [PATCH 36/42] Fix throw error in scanExecution Lambda (#2290) * Fix throw error in SQS lambda --- backend/src/tasks/scanExecution.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/src/tasks/scanExecution.ts b/backend/src/tasks/scanExecution.ts index 658a447d0..163d9e6d6 100644 --- a/backend/src/tasks/scanExecution.ts +++ b/backend/src/tasks/scanExecution.ts @@ -14,7 +14,7 @@ export const handler: Handler = async (event) => { const sqsQueueUrl = process.env.SQS_QUEUE_URL!; console.log(sqsQueueUrl); - if (sqsQueueUrl) { + if (!sqsQueueUrl) { throw new Error('SQS Queue URL not found'); } From 5cce17d2ba7a641df0ecc4207c65c2ecc20973bc Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Thu, 5 Oct 2023 21:50:17 -0400 Subject: [PATCH 37/42] Fix scanExecution test --- backend/src/tasks/test/scanExecution.test.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/backend/src/tasks/test/scanExecution.test.ts b/backend/src/tasks/test/scanExecution.test.ts index d6dc10a27..781c604c3 100644 --- a/backend/src/tasks/test/scanExecution.test.ts +++ b/backend/src/tasks/test/scanExecution.test.ts @@ -16,6 +16,7 @@ jest.mock('aws-sdk', () => { }); describe('Scan Execution', () => { + process.env.SQS_QUEUE_URL = 'YOUR_SQS_QUEUE_URL'; it('should handle the event', async () => { const event = { Records: [ From f1ad663b3b242be3a1d95c1ed746c4127620eecb Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 11 Oct 2023 08:38:20 -0400 Subject: [PATCH 38/42] Remove Start processing message back --- backend/src/tasks/scanExecution.ts | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/backend/src/tasks/scanExecution.ts b/backend/src/tasks/scanExecution.ts index 163d9e6d6..1eec806bc 100644 --- a/backend/src/tasks/scanExecution.ts +++ b/backend/src/tasks/scanExecution.ts @@ -12,12 +12,6 @@ export const handler: Handler = async (event) => { console.log(commandOptions); - const sqsQueueUrl = process.env.SQS_QUEUE_URL!; - console.log(sqsQueueUrl); - if (!sqsQueueUrl) { - throw new Error('SQS Queue URL not found'); - } - // Run command in queue message in Fargate const params: AWS.ECS.RunTaskRequest = { cluster: process.env.FARGATE_CLUSTER_NAME!, @@ -43,13 +37,6 @@ export const handler: Handler = async (event) => { const data = await ecs.runTask(params).promise(); console.log('Fargate task started:', data); - // Send a message to the SQS queue to trigger processing - const sqsParams: AWS.SQS.SendMessageRequest = { - MessageBody: 'Start processing...', - QueueUrl: sqsQueueUrl - }; - await sqs.sendMessage(sqsParams).promise(); - return { statusCode: 200, body: JSON.stringify('Fargate task started and message sent to SQS queue') From 4d5b1abaca81b8c1fc5171532f519c6533bff2dd Mon Sep 17 00:00:00 2001 From: aloftus23 Date: Wed, 11 Oct 2023 08:58:09 -0400 Subject: [PATCH 39/42] add shodan test script --- backend/src/tasks/scanExecution.ts | 10 ++++++++-- backend/worker/shodan.sh | 11 +++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) create mode 100644 backend/worker/shodan.sh diff --git a/backend/src/tasks/scanExecution.ts b/backend/src/tasks/scanExecution.ts index 1eec806bc..f6c1eb881 100644 --- a/backend/src/tasks/scanExecution.ts +++ b/backend/src/tasks/scanExecution.ts @@ -8,10 +8,16 @@ export const handler: Handler = async (event) => { try { // Get the SQS record and message body const sqsRecord: SQSRecord = event.Records[0]; - const commandOptions: string = sqsRecord.body; + const body: string = sqsRecord.body; - console.log(commandOptions); + console.log(body); + let commandOptions; + if (body === 'SHODAN') { + commandOptions = './worker/shodan.sh'; + } else { + commandOptions = body; + } // Run command in queue message in Fargate const params: AWS.ECS.RunTaskRequest = { cluster: process.env.FARGATE_CLUSTER_NAME!, diff --git a/backend/worker/shodan.sh b/backend/worker/shodan.sh new file mode 100644 index 000000000..d73535fa8 --- /dev/null +++ b/backend/worker/shodan.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -e + +cd /app/pe-reports + +echo "Starting Shodan" + +pe-source shodan --orgs=DHS --soc_med_included + +echo "Done" \ No newline at end of file From d8741fe04ca5d285f2363ddeebd5ef31f74b88b2 Mon Sep 17 00:00:00 2001 From: Matthew <106278637+Matthew-Grayson@users.noreply.github.com> Date: Thu, 12 Oct 2023 10:52:49 -0500 Subject: [PATCH 40/42] Fix cloudwatchToS3 Lambda (#2302) * Refactor lambda to export all logs (not just those with ExportToS3 tag); refactor variable names for consistency with project. * Remove console logs used for troubleshooting. --- backend/src/tasks/cloudwatchToS3.ts | 102 ++++++++++------------------ 1 file changed, 35 insertions(+), 67 deletions(-) diff --git a/backend/src/tasks/cloudwatchToS3.ts b/backend/src/tasks/cloudwatchToS3.ts index 7364cecb2..e7452be6c 100644 --- a/backend/src/tasks/cloudwatchToS3.ts +++ b/backend/src/tasks/cloudwatchToS3.ts @@ -3,7 +3,6 @@ import { DescribeLogGroupsCommand, DescribeLogGroupsRequest, LogGroup, - ListTagsForResourceCommand, CreateExportTaskCommand } from '@aws-sdk/client-cloudwatch-logs'; import { @@ -14,66 +13,42 @@ import { const logs = new CloudWatchLogsClient({}); const ssm = new SSMClient({}); -const region = process.env.AWS_REGION || 'us-east-1'; -const accountId = process.env.AWS_ACCOUNT_ID || '957221700844'; const delay = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)); export const handler = async () => { - const extra_args: DescribeLogGroupsRequest = {}; - let log_groups: LogGroup[] = []; - const log_groups_to_export: string[] = []; + const args: DescribeLogGroupsRequest = {}; + let logGroups: LogGroup[] = []; + const logBucketName = process.env.CLOUDWATCH_BUCKET_NAME; - if (!process.env.CLOUDWATCH_BUCKET_NAME) { - console.error('Error: CLOUDWATCH_BUCKET_NAME not defined'); + if (!logBucketName) { + console.error('Error: logBucketName not defined'); return; } - console.log( - '--> CLOUDWATCH_BUCKET_NAME=' + process.env.CLOUDWATCH_BUCKET_NAME - ); + console.log('--> logBucketName=' + logBucketName); while (true) { - const response = await logs.send(new DescribeLogGroupsCommand(extra_args)); - console.log(`response: ${JSON.stringify(response)}`); - log_groups = log_groups.concat(response.logGroups!); - console.log(`log_groups: ${JSON.stringify(log_groups)}`); + const response = await logs.send(new DescribeLogGroupsCommand(args)); + logGroups = logGroups.concat(response.logGroups!); if (!response.nextToken) { break; } - extra_args.nextToken = response.nextToken; + args.nextToken = response.nextToken; } - for (const log_group of log_groups) { - const command = new ListTagsForResourceCommand({ - resourceArn: `arn:aws:logs:${region}:${accountId}:log-group:${log_group.logGroupName}` - }); - console.log(`Processing log group: ${log_group.logGroupName}`); - console.log(`command: ${JSON.stringify(command)}`); - const response = await logs.send(command); - console.log(`log group response: ${JSON.stringify(response)}`); - const log_group_tags = response.tags || {}; - - if (log_group_tags.ExportToS3 === 'true') { - log_groups_to_export.push(log_group.logGroupName!); - } - console.log( - `log_groups_to_export: ${JSON.stringify(log_groups_to_export)}` - ); - await delay(10 * 1000); // prevents LimitExceededException (AWS allows only one export task at a time) - } - - for (const log_group_name of log_groups_to_export) { - console.log('Processing log group: ' + log_group_name); - const ssm_parameter_name = ( - '/log-exporter-last-export/' + log_group_name + for (const logGroup of logGroups) { + const logGroupName = logGroup.logGroupName!; + console.log('Processing log group: ' + logGroupName); + const ssmParameterName = ( + '/log-exporter-last-export/' + logGroupName ).replace('//', '/'); - let ssm_value = '0'; + let ssmValue = '0'; try { - const ssm_response = await ssm.send( - new GetParameterCommand({ Name: ssm_parameter_name }) + const ssmResponse = await ssm.send( + new GetParameterCommand({ Name: ssmParameterName }) ); - ssm_value = ssm_response.Parameter?.Value || '0'; + ssmValue = ssmResponse.Parameter?.Value || '0'; } catch (error) { if (error.name !== 'ParameterNotFound') { console.error('Error fetching SSM parameter: ' + error.message); @@ -81,31 +56,25 @@ export const handler = async () => { console.error(`error: ${error.message}`); } - const export_to_time = Math.round(Date.now()); + const exportTime = Math.round(Date.now()); - console.log( - '--> Exporting ' + - log_group_name + - ' to ' + - process.env.CLOUDWATCH_BUCKET_NAME - ); + console.log('--> Exporting ' + logGroupName + ' to ' + logBucketName); - if (export_to_time - parseInt(ssm_value) < 24 * 60 * 60 * 1000) { - // Haven't been 24hrs from the last export of this log group - console.log(' Skipped until 24hrs from last export is completed'); + if (exportTime - parseInt(ssmValue) < 24 * 60 * 60 * 1000) { + console.log( + 'Skipped: log group was already exported in the last 24 hours' + ); continue; } try { const response = await logs.send( new CreateExportTaskCommand({ - logGroupName: log_group_name, - from: parseInt(ssm_value), - to: export_to_time, - destination: process.env.CLOUDWATCH_BUCKET_NAME, - destinationPrefix: log_group_name - .replace(/^\//, '') - .replace(/\/$/, '') + logGroupName: logGroupName, + from: parseInt(ssmValue), + to: exportTime, + destination: logBucketName, + destinationPrefix: logGroupName.replace(/^\//, '').replace(/\/$/, '') }) ); @@ -113,14 +82,12 @@ export const handler = async () => { await new Promise((resolve) => setTimeout(resolve, 5000)); } catch (error) { if (error.name === 'LimitExceededException') { - console.log( - ' Need to wait until all tasks are finished (LimitExceededException). Continuing later...' - ); + console.log(error.message); return; } console.error( - ' Error exporting ' + - log_group_name + + 'Error exporting ' + + logGroupName + ': ' + (error.message || JSON.stringify(error)) ); @@ -129,11 +96,12 @@ export const handler = async () => { await ssm.send( new PutParameterCommand({ - Name: ssm_parameter_name, + Name: ssmParameterName, Type: 'String', - Value: export_to_time.toString(), + Value: exportTime.toString(), Overwrite: true }) ); } + await delay(10 * 1000); // prevents LimitExceededException (AWS allows only one export task at a time) }; From b8f271720a3bd7f4088c732d3fbd535ef2d1483b Mon Sep 17 00:00:00 2001 From: Matthew <106278637+Matthew-Grayson@users.noreply.github.com> Date: Thu, 12 Oct 2023 15:03:17 -0500 Subject: [PATCH 41/42] Cloudwatch to s3 lambda: Add log export/stream permissions to lambda role (#2303) * Add cloudwatch permissions to lambda role; remove redundant logs:GetLogEvents. --- backend/serverless.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/backend/serverless.yml b/backend/serverless.yml index 4f8a3f317..eca3425e4 100644 --- a/backend/serverless.yml +++ b/backend/serverless.yml @@ -44,7 +44,6 @@ provider: - ecs:RunTask - ecs:ListTasks - iam:PassRole - - logs:GetLogEvents Resource: '*' - Effect: Allow Action: @@ -70,9 +69,12 @@ provider: Resource: '*' - Effect: Allow Action: + - logs:CreateExportTask + - logs:CreateLogStream - logs:Describe* - logs:Get* - logs:List* + - logs:PutLogEvents - logs:StartQuery - logs:StopQuery - logs:TestMetricFilter From 2e6cdc586f246a81603e130259e2ee02c2ccc8d8 Mon Sep 17 00:00:00 2001 From: Matthew <106278637+Matthew-Grayson@users.noreply.github.com> Date: Fri, 13 Oct 2023 13:39:35 -0500 Subject: [PATCH 42/42] Cloudwatch to s3 lambda (#2305) * Add aws_s3_bucket_logging resource so that changes to cloudwatch_bucket are saved to logging bucket. * Update lambda to export based on log group's stage tag. * Add additional console and error logs. * Add todo for delay. --- backend/src/tasks/cloudwatchToS3.ts | 68 ++++++++++++++++++----------- infrastructure/cloudwatch.tf | 6 +++ 2 files changed, 49 insertions(+), 25 deletions(-) diff --git a/backend/src/tasks/cloudwatchToS3.ts b/backend/src/tasks/cloudwatchToS3.ts index e7452be6c..d715b20c3 100644 --- a/backend/src/tasks/cloudwatchToS3.ts +++ b/backend/src/tasks/cloudwatchToS3.ts @@ -2,6 +2,7 @@ import { CloudWatchLogsClient, DescribeLogGroupsCommand, DescribeLogGroupsRequest, + ListTagsForResourceCommand, LogGroup, CreateExportTaskCommand } from '@aws-sdk/client-cloudwatch-logs'; @@ -19,46 +20,64 @@ export const handler = async () => { const args: DescribeLogGroupsRequest = {}; let logGroups: LogGroup[] = []; const logBucketName = process.env.CLOUDWATCH_BUCKET_NAME; + const stage = process.env.STAGE; - if (!logBucketName) { - console.error('Error: logBucketName not defined'); + console.log(`logBucketName=${logBucketName}, stage=${stage}`); + + if (!logBucketName || !stage) { + console.error(`Error: logBucketName or stage not defined`); return; } - console.log('--> logBucketName=' + logBucketName); - while (true) { - const response = await logs.send(new DescribeLogGroupsCommand(args)); - logGroups = logGroups.concat(response.logGroups!); - if (!response.nextToken) { + const describeLogGroupsResponse = await logs.send( + new DescribeLogGroupsCommand(args) + ); + logGroups = logGroups.concat(describeLogGroupsResponse.logGroups!); + if (!describeLogGroupsResponse.nextToken) { break; } - args.nextToken = response.nextToken; + args.nextToken = describeLogGroupsResponse.nextToken; } for (const logGroup of logGroups) { + const listTagsResponse = await logs.send( + new ListTagsForResourceCommand({ + resourceArn: logGroup.arn + }) + ); + console.log(`listTagsResponse: ${JSON.stringify(listTagsResponse)}`); + const logGroupTags = listTagsResponse.tags || {}; + if (logGroupTags.Stage !== stage) { + console.log( + `Skipping log group: ${logGroup.logGroupName} (no ${stage} tag)` + ); + continue; + } const logGroupName = logGroup.logGroupName!; - console.log('Processing log group: ' + logGroupName); - const ssmParameterName = ( - '/log-exporter-last-export/' + logGroupName - ).replace('//', '/'); + console.log(`Processing log group: ${logGroupName}`); + const ssmParameterName = `last-export-to-s3/${logGroupName}`.replace( + '//', + '/' + ); let ssmValue = '0'; try { const ssmResponse = await ssm.send( new GetParameterCommand({ Name: ssmParameterName }) ); + console.log(`ssmResponse: ${JSON.stringify(ssmResponse)}`); ssmValue = ssmResponse.Parameter?.Value || '0'; } catch (error) { if (error.name !== 'ParameterNotFound') { - console.error('Error fetching SSM parameter: ' + error.message); + console.error(`Error fetching SSM parameter: ${JSON.stringify(error)}`); } - console.error(`error: ${error.message}`); + console.error(`ssm.send error: ${JSON.stringify(error)}`); } const exportTime = Math.round(Date.now()); - console.log('--> Exporting ' + logGroupName + ' to ' + logBucketName); + console.log(`--> Exporting ${logGroupName} to ${logBucketName}`); if (exportTime - parseInt(ssmValue) < 24 * 60 * 60 * 1000) { console.log( @@ -68,28 +87,25 @@ export const handler = async () => { } try { - const response = await logs.send( + const exportTaskResponse = await logs.send( new CreateExportTaskCommand({ logGroupName: logGroupName, from: parseInt(ssmValue), to: exportTime, destination: logBucketName, - destinationPrefix: logGroupName.replace(/^\//, '').replace(/\/$/, '') + destinationPrefix: logGroupName.replace(/^\/|\/$/g, '') }) ); - - console.log(' Task created: ' + response.taskId); + console.log(`exportTaskResponse: ${JSON.stringify(exportTaskResponse)}`); + console.log(`Task created: ${exportTaskResponse.taskId}`); await new Promise((resolve) => setTimeout(resolve, 5000)); } catch (error) { if (error.name === 'LimitExceededException') { - console.log(error.message); + console.log(JSON.stringify(error)); return; } console.error( - 'Error exporting ' + - logGroupName + - ': ' + - (error.message || JSON.stringify(error)) + `Error exporting ${logGroupName}: ${JSON.stringify(error)}` ); continue; } @@ -102,6 +118,8 @@ export const handler = async () => { Overwrite: true }) ); + console.log(`SSM parameter updated: ${ssmParameterName}`); } - await delay(10 * 1000); // prevents LimitExceededException (AWS allows only one export task at a time) + // TODO: reevaluate the delay time after the first set of exports + await delay(30 * 1000); // mitigates LimitExceededException (AWS allows only one export task at a time) }; diff --git a/infrastructure/cloudwatch.tf b/infrastructure/cloudwatch.tf index 848f97e93..355b398af 100644 --- a/infrastructure/cloudwatch.tf +++ b/infrastructure/cloudwatch.tf @@ -65,4 +65,10 @@ resource "aws_s3_bucket_policy" "cloudwatch_bucket" { } ] }) +} + +resource "aws_s3_bucket_logging" "cloudwatch_bucket" { + bucket = aws_s3_bucket.cloudwatch_bucket.id + target_bucket = aws_s3_bucket.logging_bucket.id + target_prefix = "cloudwatch_bucket" } \ No newline at end of file