Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Minor tweaks and improvements #104

Merged
merged 2 commits into from
Aug 3, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions cdk-lib/viewer-stacks/viewer-nodes-stack.ts
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ export class ViewerNodesStack extends cdk.Stack {
const lb = new elbv2.ApplicationLoadBalancer(this, 'LB', {
vpc: props.viewerVpc,
internetFacing: true,
loadBalancerName: `${props.clusterName}-Viewer` // Receives a random suffix, which minimizes DNS collisions
loadBalancerName: `${props.clusterName}-Viewer`.toLowerCase() // Receives a random suffix, which minimizes DNS collisions
});
const listener = lb.addListener('Listener', {
protocol: elbv2.ApplicationProtocol.HTTP,
Expand Down Expand Up @@ -138,7 +138,7 @@ export class ViewerNodesStack extends cdk.Stack {
containerPort: viewerPort
})],
healthCheck: {
healthyHttpCodes: '200,401',
healthyHttpCodes: '200,302,401',
path: '/',
unhealthyThresholdCount: 2,
healthyThresholdCount: 5,
Expand All @@ -160,7 +160,7 @@ export class ViewerNodesStack extends cdk.Stack {
containerPort: viewerPort
})],
healthCheck: {
healthyHttpCodes: '200,401',
healthyHttpCodes: '200,302,401',
path: '/',
unhealthyThresholdCount: 2,
healthyThresholdCount: 5,
Expand Down
12 changes: 10 additions & 2 deletions docker-viewer-node/run_viewer_node.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,15 @@ source /bootstrap_config.sh
source /initialize_arkime.sh

# Start Arkime Viewer
echo "Running Arkime Viewer process ..."
cd /opt/arkime/viewer
/opt/arkime/bin/node addUser.js $VIEWER_USER $VIEWER_USER $VIEWER_PASS --admin --packetSearch
/opt/arkime/bin/node viewer.js -c /opt/arkime/etc/config.ini

set +e # Don't exit the outer script if the viewer process dies
while true; do
echo "Running Arkime Viewer process ..."

/opt/arkime/bin/node viewer.js -c /opt/arkime/etc/config.ini

echo "Arkime Viewer crashed with exit code $?. Restarting..."
sleep 1
done
16 changes: 14 additions & 2 deletions manage_arkime.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,11 +161,23 @@ def vpc_remove(ctx, cluster_name, vpc_id):

@click.command(help="Updates specified Arkime Cluster's Capture/Viewer configuration")
@click.option("--cluster-name", help="The name of the Arkime Cluster to update", required=True)
@click.option("--force-bounce-capture",
help="Forces a bounce of the Capture Nodes, regardless of whether there is new config.",
is_flag=True,
show_default=True,
default=False
)
@click.option("--force-bounce-viewer",
help="Forces a bounce of the Viewer Nodes, regardless of whether there is new config.",
is_flag=True,
show_default=True,
default=False
)
@click.pass_context
def config_update(ctx, cluster_name):
def config_update(ctx, cluster_name, force_bounce_capture, force_bounce_viewer):
profile = ctx.obj.get("profile")
region = ctx.obj.get("region")
cmd_config_update(profile, region, cluster_name)
cmd_config_update(profile, region, cluster_name, force_bounce_capture, force_bounce_viewer)
cli.add_command(config_update)

def main():
Expand Down
6 changes: 3 additions & 3 deletions manage_arkime/commands/config_update.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

logger = logging.getLogger(__name__)

def cmd_config_update(profile: str, region: str, cluster_name: str):
def cmd_config_update(profile: str, region: str, cluster_name: str, force_bounce_capture: bool, force_bounce_viewer: bool):
logger.debug(f"Invoking config-update with profile '{profile}' and region '{region}'")

# Update Capture/Viewer config in the cloud, if there's a new version locally. Bounce the associated ECS Tasks
Expand All @@ -33,7 +33,7 @@ def cmd_config_update(profile: str, region: str, cluster_name: str):
config_wrangling.get_capture_config_archive,
aws_provider
)
if should_bounce_capture_nodes:
if should_bounce_capture_nodes or force_bounce_capture:
raw_capture_details = ssm_ops.get_ssm_param_value(
constants.get_capture_details_ssm_param_name(cluster_name),
aws_provider
Expand All @@ -55,7 +55,7 @@ def cmd_config_update(profile: str, region: str, cluster_name: str):
config_wrangling.get_viewer_config_archive,
aws_provider
)
if should_bounce_viewer_nodes:
if should_bounce_viewer_nodes or force_bounce_viewer:
raw_viewer_details = ssm_ops.get_ssm_param_value(
constants.get_viewer_details_ssm_param_name(cluster_name),
aws_provider
Expand Down
79 changes: 77 additions & 2 deletions test_manage_arkime/commands/test_config_update.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def test_WHEN_cmd_config_update_called_AND_happy_path_THEN_as_expected(mock_prov
]

# Run our test
cmd_config_update("profile", "region", cluster_name)
cmd_config_update("profile", "region", cluster_name, False, False)

# Check our results
expected_update_config_calls = [
Expand Down Expand Up @@ -109,7 +109,7 @@ def test_WHEN_cmd_config_update_called_AND_shouldnt_bounce_THEN_as_expected(mock
mock_update_config.side_effect = [False, False]

# Run our test
cmd_config_update("profile", "region", cluster_name)
cmd_config_update("profile", "region", cluster_name, False, False)

# Check our results
expected_update_config_calls = [
Expand Down Expand Up @@ -139,6 +139,81 @@ def test_WHEN_cmd_config_update_called_AND_shouldnt_bounce_THEN_as_expected(mock
expected_bounce_calls = []
assert expected_bounce_calls == mock_bounce.call_args_list

@mock.patch("commands.config_update._bounce_ecs_service")
@mock.patch("commands.config_update.ssm_ops.get_ssm_param_value")
@mock.patch("commands.config_update._update_config_if_necessary")
@mock.patch("commands.config_update.AwsClientProvider")
def test_WHEN_cmd_config_update_called_AND_force_bounce_THEN_as_expected(mock_provider_cls, mock_update_config,
mock_get_param, mock_bounce):
# Set up our mock
aws_env = AwsEnvironment("XXXXXXXXXXXX", "region", "profile")
cluster_name = "cluster_name"
bucket_name = constants.get_config_bucket_name(aws_env.aws_account, aws_env.aws_region, cluster_name)

mock_provider = mock.Mock()
mock_provider.get_aws_env.return_value = aws_env
mock_provider_cls.return_value = mock_provider

mock_update_config.side_effect = [False, False]
mock_get_param.side_effect = [
'{"ecsCluster": "cluster-name-cap", "ecsService": "service-name-cap"}',
'{"dns": "dns-v", "ecsCluster": "cluster-name-v", "ecsService": "service-name-v", "passwordArn": "pass-arn", "user": "user-v"}',
]

# Run our test
cmd_config_update("profile", "region", cluster_name, True, True)

# Check our results
expected_update_config_calls = [
mock.call(
cluster_name,
bucket_name,
constants.get_capture_config_s3_key,
constants.get_capture_config_details_ssm_param_name(cluster_name),
config_wrangling.get_capture_config_archive,
mock_provider

),
mock.call(
cluster_name,
bucket_name,
constants.get_viewer_config_s3_key,
constants.get_viewer_config_details_ssm_param_name(cluster_name),
config_wrangling.get_viewer_config_archive,
mock_provider
),
]
assert expected_update_config_calls == mock_update_config.call_args_list

expected_get_param_calls = [
mock.call(
constants.get_capture_details_ssm_param_name(cluster_name),
mock_provider

),
mock.call(
constants.get_viewer_details_ssm_param_name(cluster_name),
mock_provider
),
]
assert expected_get_param_calls == mock_get_param.call_args_list

expected_bounce_calls = [
mock.call(
"cluster-name-cap",
"service-name-cap",
constants.get_capture_config_details_ssm_param_name(cluster_name),
mock_provider
),
mock.call(
"cluster-name-v",
"service-name-v",
constants.get_viewer_config_details_ssm_param_name(cluster_name),
mock_provider
),
]
assert expected_bounce_calls == mock_bounce.call_args_list

@mock.patch("commands.config_update.ssm_ops.put_ssm_param")
@mock.patch("commands.config_update.ssm_ops.get_ssm_param_value")
def test_WHEN_revert_arkime_config_called_AND_happy_path_THEN_as_expected(mock_get_ssm_param, mock_put_ssm_param):
Expand Down