Skip to content

Commit

Permalink
successfully ran tests
Browse files Browse the repository at this point in the history
  • Loading branch information
razvan committed Dec 11, 2024
1 parent 4025aa7 commit 3333172
Show file tree
Hide file tree
Showing 5 changed files with 30 additions and 16 deletions.
36 changes: 24 additions & 12 deletions rust/crd/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -326,18 +326,18 @@ impl SparkApplication {
.with_config_map(log_config_map)
.build(),
);

result.push(
VolumeBuilder::new(VOLUME_MOUNT_NAME_LOG)
.with_empty_dir(
None::<String>,
Some(product_logging::framework::calculate_log_volume_size_limit(
&[MAX_SPARK_LOG_FILES_SIZE, MAX_INIT_LOG_FILES_SIZE],
)),
)
.build(),
);
}
// This volume is also used by the containerdebug process so it must always be there.
result.push(
VolumeBuilder::new(VOLUME_MOUNT_NAME_LOG)
.with_empty_dir(
None::<String>,
Some(product_logging::framework::calculate_log_volume_size_limit(
&[MAX_SPARK_LOG_FILES_SIZE, MAX_INIT_LOG_FILES_SIZE],
)),
)
.build(),
);

if !self.packages().is_empty() {
result.push(
Expand Down Expand Up @@ -397,6 +397,12 @@ impl SparkApplication {
mount_path: VOLUME_MOUNT_PATH_EXECUTOR_POD_TEMPLATES.into(),
..VolumeMount::default()
},
// This is used at least by the containerdebug process
VolumeMount {
name: VOLUME_MOUNT_NAME_LOG.into(),
mount_path: VOLUME_MOUNT_PATH_LOG.into(),
..VolumeMount::default()
},
];

tmpl_mounts = self.add_common_volume_mounts(tmpl_mounts, s3conn, logdir, false);
Expand Down Expand Up @@ -675,7 +681,7 @@ impl SparkApplication {
submit_cmd.extend(self.spec.args.clone());

Ok(vec![
format!("containerdebug --output={VOLUME_MOUNT_PATH_LOG}/containerdebug/containerdebug-state.json --loop &"),
format!("containerdebug --output={VOLUME_MOUNT_PATH_LOG}/containerdebug-state.json --loop &"),
submit_cmd.join(" "),
])
}
Expand Down Expand Up @@ -1386,6 +1392,12 @@ mod tests {
name: "executor-pod-template".into(),
..VolumeMount::default()
},
VolumeMount {
mount_path: "/stackable/log".into(),
mount_propagation: None,
name: "log".into(),
..VolumeMount::default()
},
VolumeMount {
mount_path: "/kerberos".into(),
mount_propagation: None,
Expand Down
2 changes: 1 addition & 1 deletion rust/operator-binary/src/history/history_controller.rs
Original file line number Diff line number Diff line change
Expand Up @@ -757,7 +757,7 @@ fn command_args(logdir: &ResolvedLogDir) -> Vec<String> {
}

command.extend(vec![
format!("containerdebug --output={VOLUME_MOUNT_PATH_LOG}/containerdebug/containerdebug-state.json --loop &"),
format!("containerdebug --output={VOLUME_MOUNT_PATH_LOG}/containerdebug-state.json --loop &"),
format!("/stackable/spark/sbin/start-history-server.sh --properties-file {VOLUME_MOUNT_PATH_CONFIG}/{SPARK_DEFAULTS_FILE_NAME}"),
]);
vec![command.join("\n")]
Expand Down
4 changes: 3 additions & 1 deletion rust/operator-binary/src/spark_k8s_controller.rs
Original file line number Diff line number Diff line change
Expand Up @@ -614,7 +614,9 @@ fn pod_template(

cb.add_env_var(
"_STACKABLE_PRE_HOOK",
format!("containerdebug --output={VOLUME_MOUNT_PATH_LOG}/containerdebug/containerdebug-state.json --loop &"),
format!(
"containerdebug --output={VOLUME_MOUNT_PATH_LOG}/containerdebug-state.json --loop &"
),
);

if config.logging.enable_vector_agent {
Expand Down
2 changes: 1 addition & 1 deletion tests/templates/kuttl/smoke/40-assert.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@ apiVersion: kuttl.dev/v1beta1
kind: TestAssert
timeout: 60
commands:
- script: kubectl exec -n $NAMESPACE --container spark-history spark-history-node-default-0 -- cat /stackable/log/containerdebug/containerdebug-state.json | jq --exit-status
- script: kubectl exec -n $NAMESPACE --container spark-history spark-history-node-default-0 -- cat /stackable/log/containerdebug-state.json | jq --exit-status
2 changes: 1 addition & 1 deletion tests/templates/kuttl/smoke/50-assert.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,4 @@ kind: TestAssert
commands:
- script: |
SPARK_SUBMIT_POD=$(kubectl get -n $NAMESPACE pods --field-selector=status.phase=Running --selector batch.kubernetes.io/job-name=spark-pi-s3-1 -o jsonpath='{.items[0].metadata.name}')
kubectl exec -n $NAMESPACE --container spark-submit $SPARK_SUBMIT_POD -- cat /stackable/log/containerdebug/containerdebug-state.json | jq --exit-status
kubectl exec -n $NAMESPACE --container spark-submit $SPARK_SUBMIT_POD -- cat /stackable/log/containerdebug-state.json | jq --exit-status

0 comments on commit 3333172

Please sign in to comment.