Skip to content

Commit

Permalink
added e2e test for custom and default metric collectors
Browse files Browse the repository at this point in the history
Signed-off-by: Prakhar Singhal <[email protected]>
  • Loading branch information
prakhar479 committed Aug 27, 2024
1 parent 83ecb32 commit 2d8ecd0
Showing 1 changed file with 57 additions and 5 deletions.
62 changes: 57 additions & 5 deletions test/e2e/v1beta1/scripts/gh-actions/run-e2e-tune-api.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@
# The default logging config.
logging.basicConfig(level=logging.INFO)


def run_e2e_experiment_create_by_tune(
def run_e2e_experiment_create_by_tune_custom_metrics_collector(
katib_client: KatibClient,
exp_name: str,
exp_namespace: str,
Expand Down Expand Up @@ -74,6 +73,51 @@ def objective(parameters):
logging.debug(katib_client.get_experiment(exp_name, exp_namespace))
logging.debug(katib_client.get_suggestion(exp_name, exp_namespace))

def run_e2e_experiment_create_by_tune_default_metrics_collector(
katib_client: KatibClient,
exp_name: str,
exp_namespace: str,
):
# Create Katib Experiment and wait until it is finished.
logging.debug("Creating Experiment: {}/{}".format(exp_namespace, exp_name))

# Use the test case from get-started tutorial.
# https://www.kubeflow.org/docs/components/katib/getting-started/#getting-started-with-katib-python-sdk
# [1] Create an objective function.
def objective(parameters):
import time
time.sleep(5)
result = 4 * int(parameters["a"]) - float(parameters["b"]) ** 2
print(f"result={result}")

# [2] Create hyperparameter search space.
parameters = {
"a": search.int(min=10, max=20),
"b": search.double(min=0.1, max=0.2)
}

# [4] Create Katib Experiment with 4 Trials and 2 CPUs per Trial.
# And Wait until Experiment reaches Succeeded condition.
katib_client.tune(
name=exp_name,
namespace=exp_namespace,
objective=objective,
parameters=parameters,
objective_metric_name="result",
max_trial_count=4,
resources_per_trial={"cpu": "2"},
)
experiment = katib_client.wait_for_experiment_condition(
exp_name, exp_namespace, timeout=EXPERIMENT_TIMEOUT
)

# Verify the Experiment results.
verify_experiment_results(katib_client, experiment, exp_name, exp_namespace)

# Print the Experiment and Suggestion.
logging.debug(katib_client.get_experiment(exp_name, exp_namespace))
logging.debug(katib_client.get_suggestion(exp_name, exp_namespace))


if __name__ == "__main__":
parser = argparse.ArgumentParser()
Expand All @@ -95,11 +139,19 @@ def objective(parameters):
namespace_labels['katib.kubeflow.org/metrics-collector-injection'] = 'enabled'
client.CoreV1Api().patch_namespace(args.namespace, {'metadata': {'labels': namespace_labels}})

# Test with run_e2e_experiment_create_by_tune
exp_name = "tune-example"
# Test with run_e2e_experiment_create_by_tune_default_metrics_collector
# and run_e2e_experiment_create_by_tune_custom_metrics_collector
exp_namespace = args.namespace
try:
run_e2e_experiment_create_by_tune(katib_client, exp_name, exp_namespace)
exp_name = "tune-example-default-metrics-collector"
logging.info(f"Runnning E2E for Experiment created by tune: {exp_namespace}/{exp_name}")
run_e2e_experiment_create_by_tune_default_metrics_collector(katib_client, exp_name, exp_namespace)
logging.info("---------------------------------------------------------------")
logging.info(f"E2E is succeeded for Experiment created by tune: {exp_namespace}/{exp_name}")

exp_name = "tune-example-custom-metrics-collector"
logging.info(f"Runnning E2E for Experiment created by tune: {exp_namespace}/{exp_name}")
run_e2e_experiment_create_by_tune_custom_metrics_collector(katib_client, exp_name, exp_namespace)
logging.info("---------------------------------------------------------------")
logging.info(f"E2E is succeeded for Experiment created by tune: {exp_namespace}/{exp_name}")
except Exception as e:
Expand Down

0 comments on commit 2d8ecd0

Please sign in to comment.