Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update single-binary.yml end2end_execute #6061

Merged
merged 5 commits into from
Dec 6, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .github/workflows/single-binary.yml
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,9 @@ jobs:
--version ${{ env.FLYTESNACKS_VERSION }} \
flytesnacks/$line;
done < flytesnacks/flyte_tests.txt
- name: Install Pytest
run: |
pip install pytest
- name: End2End
run: |
make end2end_execute
Expand Down
8 changes: 6 additions & 2 deletions boilerplate/flyte/end2end/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,12 @@
end2end_execute: export FLYTESNACKS_PRIORITIES ?= P0
end2end_execute: export FLYTESNACKS_VERSION ?= $(shell curl --silent "https://api.github.com/repos/flyteorg/flytesnacks/releases/latest" | jq -r .tag_name)
end2end_execute:
./boilerplate/flyte/end2end/end2end.sh ./boilerplate/flyte/end2end/functional-test-config.yaml --return_non_zero_on_failure

pytest ./boilerplate/flyte/end2end/test_run.py \
--flytesnacks_release_tag=$(FLYTESNACKS_VERSION) \
--priorities=$(FLYTESNACKS_PRIORITIES) \
--config_file=./boilerplate/flyte/end2end/functional-test-config.yaml \
--return_non_zero_on_failure

.PHONY: k8s_integration_execute
k8s_integration_execute:
echo "pass"
47 changes: 47 additions & 0 deletions boilerplate/flyte/end2end/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import pytest

def pytest_addoption(parser):
parser.addoption("--flytesnacks_release_tag", required=True)
parser.addoption("--priorities", required=True)
parser.addoption("--config_file", required=True)
parser.addoption(
"--return_non_zero_on_failure",
action="store_true",
default=False,
help="Return a non-zero exit status if any workflow fails",
)
parser.addoption(
"--terminate_workflow_on_failure",
action="store_true",
default=False,
help="Abort failing workflows upon exit",
)
parser.addoption(
"--test_project_name",
default="flytesnacks",
help="Name of project to run functional tests on"
)
parser.addoption(
"--test_project_domain",
default="development",
help="Name of domain in project to run functional tests on"
)
parser.addoption(
"--cluster_pool_name",
required=False,
type=str,
default=None,
)

@pytest.fixture
def setup_flytesnacks_env(pytestconfig):
return {
"flytesnacks_release_tag": pytestconfig.getoption("--flytesnacks_release_tag"),
"priorities": pytestconfig.getoption("--priorities"),
"config_file": pytestconfig.getoption("--config_file"),
"return_non_zero_on_failure": pytestconfig.getoption("--return_non_zero_on_failure"),
"terminate_workflow_on_failure": pytestconfig.getoption("--terminate_workflow_on_failure"),
"test_project_name": pytestconfig.getoption("--test_project_name"),
"test_project_domain": pytestconfig.getoption("--test_project_domain"),
"cluster_pool_name": pytestconfig.getoption("--cluster_pool_name"),
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: can you add a newline?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

okay ! Thank you for your review. ~

12 changes: 0 additions & 12 deletions boilerplate/flyte/end2end/end2end.sh

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import traceback
from typing import Dict, List, Optional

import click
import pytest
import requests
from flytekit.configuration import Config
from flytekit.models.core.execution import WorkflowExecutionPhase
Expand All @@ -15,7 +15,6 @@
WAIT_TIME = 10
MAX_ATTEMPTS = 200


def execute_workflow(
remote: FlyteRemote,
version,
Expand All @@ -27,7 +26,6 @@ def execute_workflow(
wf = remote.fetch_workflow(name=workflow_name, version=version)
return remote.execute(wf, inputs=inputs, wait=False, cluster_pool=cluster_pool_name)


def executions_finished(
executions_by_wfgroup: Dict[str, List[FlyteWorkflowExecution]]
) -> bool:
Expand All @@ -36,7 +34,6 @@ def executions_finished(
return False
return True


def sync_executions(
remote: FlyteRemote, executions_by_wfgroup: Dict[str, List[FlyteWorkflowExecution]]
):
Expand All @@ -50,13 +47,11 @@ def sync_executions(
print("GOT TO THE EXCEPT")
print("COUNT THIS!")


def report_executions(executions_by_wfgroup: Dict[str, List[FlyteWorkflowExecution]]):
for executions in executions_by_wfgroup.values():
for execution in executions:
print(execution)


def schedule_workflow_groups(
tag: str,
workflow_groups: List[str],
Expand All @@ -65,10 +60,6 @@ def schedule_workflow_groups(
parsed_manifest: List[dict],
cluster_pool_name: Optional[str] = None,
) -> Dict[str, bool]:
"""
Schedule workflows executions for all workflow groups and return True if all executions succeed, otherwise
return False.
"""
executions_by_wfgroup = {}
# Schedule executions for each workflow group,
for wf_group in workflow_groups:
Expand Down Expand Up @@ -120,30 +111,35 @@ def schedule_workflow_groups(
results[wf_group] = len(non_succeeded_executions) == 0
return results


def valid(workflow_group, parsed_manifest):
"""
Return True if a workflow group is contained in parsed_manifest,
False otherwise.
"""
return workflow_group in set(wf_group["name"] for wf_group in parsed_manifest)

def test_run(setup_flytesnacks_env):

env = setup_flytesnacks_env

flytesnacks_release_tag = env["flytesnacks_release_tag"]
priorities = env["priorities"]
config_file_path = env["config_file"]
terminate_workflow_on_failure = env["terminate_workflow_on_failure"]
test_project_name = env["test_project_name"]
test_project_domain = env["test_project_domain"]
cluster_pool_name = env["cluster_pool_name"]
return_non_zero_on_failure = env["return_non_zero_on_failure"]

def run(
flytesnacks_release_tag: str,
priorities: List[str],
config_file_path,
terminate_workflow_on_failure: bool,
test_project_name: str,
test_project_domain: str,
cluster_pool_name: Optional[str] = None,
) -> List[Dict[str, str]]:
remote = FlyteRemote(
Config.auto(config_file=config_file_path),
test_project_name,
test_project_domain,
)

# assert flytesnacks_release_tag == "v1.0.0"
# assert config_file_path == "/path/to/config.yaml"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you remove those?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

okay ! Thank you ~


# For a given release tag and priority, this function filters the workflow groups from the flytesnacks
# manifest file. For example, for the release tag "v0.2.224" and the priority "P0" it returns [ "core" ].
manifest_url = (
Expand All @@ -152,7 +148,6 @@ def run(
)
r = requests.get(manifest_url)
parsed_manifest = r.json()
workflow_groups = []
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why are we removing this?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I noticed that the workflow_groups parameter is immediately reassigned in the next line, so I just think it might not need to be repeated.

workflow_groups = (
["lite"]
if "lite" in priorities
Expand Down Expand Up @@ -210,75 +205,12 @@ def run(
"color": background_color,
}
results.append(result)
return results


@click.command()
@click.argument("flytesnacks_release_tag")
@click.argument("priorities")
@click.argument("config_file")
@click.option(
"--return_non_zero_on_failure",
default=False,
is_flag=True,
help="Return a non-zero exit status if any workflow fails",
)
@click.option(
"--terminate_workflow_on_failure",
default=False,
is_flag=True,
help="Abort failing workflows upon exit",
)
@click.option(
"--test_project_name",
default="flytesnacks",
type=str,
is_flag=False,
help="Name of project to run functional tests on",
)
@click.option(
"--test_project_domain",
default="development",
type=str,
is_flag=False,
help="Name of domain in project to run functional tests on",
)
@click.argument(
"cluster_pool_name",
required=False,
type=str,
default=None,
)
def cli(
flytesnacks_release_tag,
priorities,
config_file,
return_non_zero_on_failure,
terminate_workflow_on_failure,
test_project_name,
test_project_domain,
cluster_pool_name,
):
print(f"return_non_zero_on_failure={return_non_zero_on_failure}")
results = run(
flytesnacks_release_tag,
priorities,
config_file,
terminate_workflow_on_failure,
test_project_name,
test_project_domain,
cluster_pool_name,
)

# Write a json object in its own line describing the result of this run to stdout
print(f"Result of run:\n{json.dumps(results)}")

# Return a non-zero exit code if core fails
if return_non_zero_on_failure:
for result in results:
if result["status"] not in ("passing", "coming soon"):
sys.exit(1)

pytest.fail("Workflow execution failed")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what happens if multiple workflows fail?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I changed it to run all the tests and then print out any error messages.


if __name__ == "__main__":
cli()
return results
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Tests shouldn't really return anything. Can you instead assert on results?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay ! Thank you for your review ~

Loading