From 2d8ecd07fd4c066e8d532bea612dc2d7cbda43c9 Mon Sep 17 00:00:00 2001 From: Prakhar Singhal Date: Tue, 27 Aug 2024 18:01:10 +0000 Subject: [PATCH] added e2e test for custom and default metric collectors Signed-off-by: Prakhar Singhal --- .../scripts/gh-actions/run-e2e-tune-api.py | 62 +++++++++++++++++-- 1 file changed, 57 insertions(+), 5 deletions(-) diff --git a/test/e2e/v1beta1/scripts/gh-actions/run-e2e-tune-api.py b/test/e2e/v1beta1/scripts/gh-actions/run-e2e-tune-api.py index b245c9ecca6..47e6b50c2cc 100644 --- a/test/e2e/v1beta1/scripts/gh-actions/run-e2e-tune-api.py +++ b/test/e2e/v1beta1/scripts/gh-actions/run-e2e-tune-api.py @@ -12,8 +12,7 @@ # The default logging config. logging.basicConfig(level=logging.INFO) - -def run_e2e_experiment_create_by_tune( +def run_e2e_experiment_create_by_tune_custom_metrics_collector( katib_client: KatibClient, exp_name: str, exp_namespace: str, @@ -74,6 +73,51 @@ def objective(parameters): logging.debug(katib_client.get_experiment(exp_name, exp_namespace)) logging.debug(katib_client.get_suggestion(exp_name, exp_namespace)) +def run_e2e_experiment_create_by_tune_default_metrics_collector( + katib_client: KatibClient, + exp_name: str, + exp_namespace: str, +): + # Create Katib Experiment and wait until it is finished. + logging.debug("Creating Experiment: {}/{}".format(exp_namespace, exp_name)) + + # Use the test case from get-started tutorial. + # https://www.kubeflow.org/docs/components/katib/getting-started/#getting-started-with-katib-python-sdk + # [1] Create an objective function. + def objective(parameters): + import time + time.sleep(5) + result = 4 * int(parameters["a"]) - float(parameters["b"]) ** 2 + print(f"result={result}") + + # [2] Create hyperparameter search space. + parameters = { + "a": search.int(min=10, max=20), + "b": search.double(min=0.1, max=0.2) + } + + # [4] Create Katib Experiment with 4 Trials and 2 CPUs per Trial. + # And Wait until Experiment reaches Succeeded condition. + katib_client.tune( + name=exp_name, + namespace=exp_namespace, + objective=objective, + parameters=parameters, + objective_metric_name="result", + max_trial_count=4, + resources_per_trial={"cpu": "2"}, + ) + experiment = katib_client.wait_for_experiment_condition( + exp_name, exp_namespace, timeout=EXPERIMENT_TIMEOUT + ) + + # Verify the Experiment results. + verify_experiment_results(katib_client, experiment, exp_name, exp_namespace) + + # Print the Experiment and Suggestion. + logging.debug(katib_client.get_experiment(exp_name, exp_namespace)) + logging.debug(katib_client.get_suggestion(exp_name, exp_namespace)) + if __name__ == "__main__": parser = argparse.ArgumentParser() @@ -95,11 +139,19 @@ def objective(parameters): namespace_labels['katib.kubeflow.org/metrics-collector-injection'] = 'enabled' client.CoreV1Api().patch_namespace(args.namespace, {'metadata': {'labels': namespace_labels}}) - # Test with run_e2e_experiment_create_by_tune - exp_name = "tune-example" + # Test with run_e2e_experiment_create_by_tune_default_metrics_collector + # and run_e2e_experiment_create_by_tune_custom_metrics_collector exp_namespace = args.namespace try: - run_e2e_experiment_create_by_tune(katib_client, exp_name, exp_namespace) + exp_name = "tune-example-default-metrics-collector" + logging.info(f"Runnning E2E for Experiment created by tune: {exp_namespace}/{exp_name}") + run_e2e_experiment_create_by_tune_default_metrics_collector(katib_client, exp_name, exp_namespace) + logging.info("---------------------------------------------------------------") + logging.info(f"E2E is succeeded for Experiment created by tune: {exp_namespace}/{exp_name}") + + exp_name = "tune-example-custom-metrics-collector" + logging.info(f"Runnning E2E for Experiment created by tune: {exp_namespace}/{exp_name}") + run_e2e_experiment_create_by_tune_custom_metrics_collector(katib_client, exp_name, exp_namespace) logging.info("---------------------------------------------------------------") logging.info(f"E2E is succeeded for Experiment created by tune: {exp_namespace}/{exp_name}") except Exception as e: