diff --git a/providers/openfeature-provider-flagd/spec b/providers/openfeature-provider-flagd/spec new file mode 160000 index 00000000..3c737a6e --- /dev/null +++ b/providers/openfeature-provider-flagd/spec @@ -0,0 +1 @@ +Subproject commit 3c737a6e86ae0aa9bd81fcbfe8b6ada9a33993a7 diff --git a/providers/openfeature-provider-flagd/src/openfeature/contrib/provider/flagd/resolvers/in_process.py b/providers/openfeature-provider-flagd/src/openfeature/contrib/provider/flagd/resolvers/in_process.py index 37c1608a..7f280b18 100644 --- a/providers/openfeature-provider-flagd/src/openfeature/contrib/provider/flagd/resolvers/in_process.py +++ b/providers/openfeature-provider-flagd/src/openfeature/contrib/provider/flagd/resolvers/in_process.py @@ -84,8 +84,6 @@ def resolve_float_details( evaluation_context: typing.Optional[EvaluationContext] = None, ) -> FlagResolutionDetails[float]: result = self._resolve(key, default_value, evaluation_context) - if not isinstance(result.value, float): - result.value = float(result.value) return result def resolve_integer_details( @@ -95,8 +93,6 @@ def resolve_integer_details( evaluation_context: typing.Optional[EvaluationContext] = None, ) -> FlagResolutionDetails[int]: result = self._resolve(key, default_value, evaluation_context) - if not isinstance(result.value, int): - result.value = int(result.value) return result def resolve_object_details( diff --git a/providers/openfeature-provider-flagd/src/openfeature/contrib/provider/flagd/resolvers/process/connector/grpc_watcher.py b/providers/openfeature-provider-flagd/src/openfeature/contrib/provider/flagd/resolvers/process/connector/grpc_watcher.py index f6ea3ecf..cee2a117 100644 --- a/providers/openfeature-provider-flagd/src/openfeature/contrib/provider/flagd/resolvers/process/connector/grpc_watcher.py +++ b/providers/openfeature-provider-flagd/src/openfeature/contrib/provider/flagd/resolvers/process/connector/grpc_watcher.py @@ -30,7 +30,14 @@ def __init__( ): self.flag_store = flag_store channel_factory = grpc.secure_channel if config.tls else grpc.insecure_channel - self.channel = channel_factory(f"{config.host}:{config.port}") + self.channel = channel_factory( + f"{config.host}:{config.port}", + options=( + ("grpc.max_reconnect_backoff_ms", 1000), + ("grpc.initial_reconnect_backoff_ms", 1000), + ("grpc.keepalive_time_ms", 1000), + ), + ) self.stub = sync_pb2_grpc.FlagSyncServiceStub(self.channel) self.timeout = config.timeout self.retry_backoff_seconds = config.retry_backoff_seconds @@ -62,11 +69,10 @@ def shutdown(self) -> None: self.active = False def sync_flags(self) -> None: - request = sync_pb2.SyncFlagsRequest(selector=self.selector) # type:ignore[attr-defined] - retry_delay = self.retry_backoff_seconds while self.active: try: + request = sync_pb2.SyncFlagsRequest(selector=self.selector) # type:ignore[attr-defined] logger.debug("Setting up gRPC sync flags connection") for flag_rsp in self.stub.SyncFlags(request): flag_str = flag_rsp.flag_configuration @@ -107,4 +113,4 @@ def sync_flags(self) -> None: ) logger.info(f"gRPC sync disconnected, reconnecting in {retry_delay}s") time.sleep(retry_delay) - retry_delay = min(2 * retry_delay, self.MAX_BACK_OFF) + retry_delay = min(2, self.MAX_BACK_OFF) diff --git a/providers/openfeature-provider-flagd/src/openfeature/contrib/provider/flagd/resolvers/process/flags.py b/providers/openfeature-provider-flagd/src/openfeature/contrib/provider/flagd/resolvers/process/flags.py index 889edac7..6b1ac0d1 100644 --- a/providers/openfeature-provider-flagd/src/openfeature/contrib/provider/flagd/resolvers/process/flags.py +++ b/providers/openfeature-provider-flagd/src/openfeature/contrib/provider/flagd/resolvers/process/flags.py @@ -72,6 +72,10 @@ def from_dict(cls, key: str, data: dict) -> "Flag": data["default_variant"] = data["defaultVariant"] del data["defaultVariant"] + if "source" in data: + del data["source"] + if "selector" in data: + del data["selector"] try: flag = cls(key=key, **data) return flag diff --git a/providers/openfeature-provider-flagd/test-harness b/providers/openfeature-provider-flagd/test-harness index c9e0be36..6197b3d9 160000 --- a/providers/openfeature-provider-flagd/test-harness +++ b/providers/openfeature-provider-flagd/test-harness @@ -1 +1 @@ -Subproject commit c9e0be36e89ad33aa99b8e32b40d67e9bf350f88 +Subproject commit 6197b3d956d358bf662e5b8e0aebdc4800480f6b diff --git a/providers/openfeature-provider-flagd/tests/e2e/conftest.py b/providers/openfeature-provider-flagd/tests/e2e/conftest.py index 5de9923f..34fcf2c1 100644 --- a/providers/openfeature-provider-flagd/tests/e2e/conftest.py +++ b/providers/openfeature-provider-flagd/tests/e2e/conftest.py @@ -1,312 +1,27 @@ -import logging -import time import typing import pytest -from pytest_bdd import parsers, then, when +from testcontainers.core.container import DockerContainer -from openfeature.client import OpenFeatureClient, ProviderEvent -from openfeature.evaluation_context import EvaluationContext +from tests.e2e.flagd_container import FlagDContainer +from tests.e2e.steps import * # noqa: F403 JsonPrimitive = typing.Union[str, bool, float, int] -def to_bool(s: str) -> bool: - return s.lower() == "true" - - -@pytest.fixture -def evaluation_context() -> EvaluationContext: - return EvaluationContext() - - -@when( - parsers.cfparse( - 'a zero-value boolean flag with key "{key}" is evaluated with default value "{default:bool}"', - extra_types={"bool": to_bool}, - ), - target_fixture="key_and_default", -) -@when( - parsers.cfparse( - 'a zero-value string flag with key "{key}" is evaluated with default value "{default}"', - ), - target_fixture="key_and_default", -) -@when( - parsers.cfparse( - 'a string flag with key "{key}" is evaluated with default value "{default}"' - ), - target_fixture="key_and_default", -) -@when( - parsers.cfparse( - 'a zero-value integer flag with key "{key}" is evaluated with default value {default:d}', - ), - target_fixture="key_and_default", -) -@when( - parsers.cfparse( - 'an integer flag with key "{key}" is evaluated with default value {default:d}', - ), - target_fixture="key_and_default", -) -@when( - parsers.cfparse( - 'a zero-value float flag with key "{key}" is evaluated with default value {default:f}', - ), - target_fixture="key_and_default", -) -def setup_key_and_default( - key: str, default: JsonPrimitive -) -> typing.Tuple[str, JsonPrimitive]: - return (key, default) - - -@when( - parsers.cfparse( - 'a context containing a targeting key with value "{targeting_key}"' - ), -) -def assign_targeting_context(evaluation_context: EvaluationContext, targeting_key: str): - """a context containing a targeting key with value .""" - evaluation_context.targeting_key = targeting_key - - -@when( - parsers.cfparse('a context containing a key "{key}", with value "{value}"'), -) -@when( - parsers.cfparse('a context containing a key "{key}", with value {value:d}'), -) -def update_context( - evaluation_context: EvaluationContext, key: str, value: JsonPrimitive -): - """a context containing a key and value.""" - evaluation_context.attributes[key] = value - - -@when( - parsers.cfparse( - 'a context containing a nested property with outer key "{outer}" and inner key "{inner}", with value "{value}"' - ), -) -@when( - parsers.cfparse( - 'a context containing a nested property with outer key "{outer}" and inner key "{inner}", with value {value:d}' - ), -) -def update_context_nested( - evaluation_context: EvaluationContext, - outer: str, - inner: str, - value: typing.Union[str, int], -): - """a context containing a nested property with outer key, and inner key, and value.""" - if outer not in evaluation_context.attributes: - evaluation_context.attributes[outer] = {} - evaluation_context.attributes[outer][inner] = value - - -@then( - parsers.cfparse( - 'the resolved boolean zero-value should be "{expected_value:bool}"', - extra_types={"bool": to_bool}, - ) -) -def assert_boolean_value( - client: OpenFeatureClient, - key_and_default: tuple, - expected_value: bool, - evaluation_context: EvaluationContext, -): - key, default = key_and_default - evaluation_result = client.get_boolean_value(key, default, evaluation_context) - assert evaluation_result == expected_value - - -@then( - parsers.cfparse( - "the resolved integer zero-value should be {expected_value:d}", - ) -) -@then(parsers.cfparse("the returned value should be {expected_value:d}")) -def assert_integer_value( - client: OpenFeatureClient, - key_and_default: tuple, - expected_value: bool, - evaluation_context: EvaluationContext, -): - key, default = key_and_default - evaluation_result = client.get_integer_value(key, default, evaluation_context) - assert evaluation_result == expected_value - - -@then( - parsers.cfparse( - "the resolved float zero-value should be {expected_value:f}", - ) -) -def assert_float_value( - client: OpenFeatureClient, - key_and_default: tuple, - expected_value: bool, - evaluation_context: EvaluationContext, -): - key, default = key_and_default - evaluation_result = client.get_float_value(key, default, evaluation_context) - assert evaluation_result == expected_value - - -@then(parsers.cfparse('the returned value should be "{expected_value}"')) -def assert_string_value( - client: OpenFeatureClient, - key_and_default: tuple, - expected_value: bool, - evaluation_context: EvaluationContext, -): - key, default = key_and_default - evaluation_result = client.get_string_value(key, default, evaluation_context) - assert evaluation_result == expected_value - - -@then( - parsers.cfparse( - 'the resolved string zero-value should be ""', - ) -) -def assert_empty_string( - client: OpenFeatureClient, - key_and_default: tuple, - evaluation_context: EvaluationContext, -): - key, default = key_and_default - evaluation_result = client.get_string_value(key, default, evaluation_context) - assert evaluation_result == "" - - -@then(parsers.cfparse('the returned reason should be "{reason}"')) -def assert_reason( - client: OpenFeatureClient, - key_and_default: tuple, - evaluation_context: EvaluationContext, - reason: str, -): - """the returned reason should be .""" - key, default = key_and_default - evaluation_result = client.get_string_details(key, default, evaluation_context) - assert evaluation_result.reason.value == reason - - -@pytest.fixture -def handles() -> list: - return [] - - -@when( - parsers.cfparse( - "a {event_type:ProviderEvent} handler is added", - extra_types={"ProviderEvent": ProviderEvent}, - ), - target_fixture="handles", -) -def add_event_handler( - client: OpenFeatureClient, event_type: ProviderEvent, handles: list -): - def handler(event): - logging.info((event_type, event)) - handles.append( - { - "type": event_type, - "event": event, - } - ) - - client.add_handler(event_type, handler) - return handles - - -@when( - parsers.cfparse( - "a {event_type:ProviderEvent} handler and a {event_type2:ProviderEvent} handler are added", - extra_types={"ProviderEvent": ProviderEvent}, - ), - target_fixture="handles", -) -def add_event_handlers( - client: OpenFeatureClient, - event_type: ProviderEvent, - event_type2: ProviderEvent, - handles: list, -): - add_event_handler(client, event_type, handles) - add_event_handler(client, event_type2, handles) - - -def assert_handlers( - handles, event_type: ProviderEvent, max_wait: int = 2, num_events: int = 1 -): - poll_interval = 0.05 - while max_wait > 0: - if sum([h["type"] == event_type for h in handles]) < num_events: - max_wait -= poll_interval - time.sleep(poll_interval) - continue - break - - logging.info(f"asserting num({event_type}) >= {num_events}: {handles}") - actual_num_events = sum([h["type"] == event_type for h in handles]) - assert ( - num_events <= actual_num_events - ), f"Expected {num_events} but got {actual_num_events}: {handles}" - - -@then( - parsers.cfparse( - "the {event_type:ProviderEvent} handler must run", - extra_types={"ProviderEvent": ProviderEvent}, - ) -) -@then( - parsers.cfparse( - "the {event_type:ProviderEvent} handler must run when the provider connects", - extra_types={"ProviderEvent": ProviderEvent}, - ) -) -def assert_handler_run(handles, event_type: ProviderEvent): - assert_handlers(handles, event_type, max_wait=3) - - -@then( - parsers.cfparse( - "the {event_type:ProviderEvent} handler must run when the provider's connection is lost", - extra_types={"ProviderEvent": ProviderEvent}, - ) -) -def assert_disconnect_handler(handles, event_type: ProviderEvent): - # docker sync upstream restarts every 5s, waiting 2 cycles reduces test noise - assert_handlers(handles, event_type, max_wait=10) - - -@then( - parsers.cfparse( - "when the connection is reestablished the {event_type:ProviderEvent} handler must run again", - extra_types={"ProviderEvent": ProviderEvent}, +@pytest.fixture(autouse=True, scope="module") +def setup(request, port, image): + container: DockerContainer = FlagDContainer( + image=image, + port=port, ) -) -def assert_disconnect_error(client: OpenFeatureClient, event_type: ProviderEvent): - reconnect_handles = [] - add_event_handler(client, event_type, reconnect_handles) - assert_handlers(reconnect_handles, event_type, max_wait=6) + # Setup code + c = container.start() + def fin(): + c.stop() -@then(parsers.cfparse('the event details must indicate "{key}" was altered')) -def assert_flag_changed(handles, key): - handle = None - for h in handles: - if h["type"] == ProviderEvent.PROVIDER_CONFIGURATION_CHANGED: - handle = h - break + # Teardown code + request.addfinalizer(fin) - assert handle is not None - assert key in handle["event"].flags_changed + return c.get_exposed_port(port) diff --git a/providers/openfeature-provider-flagd/tests/e2e/flagd_container.py b/providers/openfeature-provider-flagd/tests/e2e/flagd_container.py new file mode 100644 index 00000000..eb0d0c1c --- /dev/null +++ b/providers/openfeature-provider-flagd/tests/e2e/flagd_container.py @@ -0,0 +1,59 @@ +import time + +import grpc +from grpc_health.v1 import health_pb2, health_pb2_grpc +from testcontainers.core.container import DockerContainer +from testcontainers.core.waiting_utils import wait_container_is_ready, wait_for_logs + +HEALTH_CHECK = 8014 + + +class FlagDContainer(DockerContainer): + def __init__( + self, + image: str = "ghcr.io/open-feature/flagd-testbed:v0.5.13", + port: int = 8013, + **kwargs, + ) -> None: + super().__init__(image, **kwargs) + self.port = port + self.with_exposed_ports(self.port, HEALTH_CHECK) + + def start(self) -> "FlagDContainer": + super().start() + self._checker(self.get_container_host_ip(), self.get_exposed_port(HEALTH_CHECK)) + return self + + @wait_container_is_ready(ConnectionError) + def _checker(self, host: str, port: int) -> None: + # First we wait for Flagd to say it's listening + wait_for_logs( + self, + "listening", + 5, + ) + + time.sleep(1) + # Second we use the GRPC health check endpoint + with grpc.insecure_channel(host + ":" + port) as channel: + health_stub = health_pb2_grpc.HealthStub(channel) + + def health_check_call(stub: health_pb2_grpc.HealthStub): + request = health_pb2.HealthCheckRequest() + resp = stub.Check(request) + if resp.status == health_pb2.HealthCheckResponse.SERVING: + return True + elif resp.status == health_pb2.HealthCheckResponse.NOT_SERVING: + return False + + # Should succeed + # Check health status every 1 second for 30 seconds + ok = False + for _ in range(30): + ok = health_check_call(health_stub) + if ok: + break + time.sleep(1) + + if not ok: + raise ConnectionError("flagD not ready in time") diff --git a/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/conftest.py b/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/conftest.py deleted file mode 100644 index 81831c69..00000000 --- a/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/conftest.py +++ /dev/null @@ -1,17 +0,0 @@ -from pytest_bdd import given - -from openfeature import api -from openfeature.contrib.provider.flagd import FlagdProvider -from openfeature.contrib.provider.flagd.config import ResolverType - - -@given("a flagd provider is set", target_fixture="client") -def setup_provider(flag_file): - provider = FlagdProvider( - resolver_type=ResolverType.IN_PROCESS, - offline_flag_source_path=flag_file, - offline_poll_interval_seconds=0.1, - ) - api.set_provider(provider) - yield api.get_client() - provider.shutdown() diff --git a/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/test_inprocess_custom_ops.py b/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/test_inprocess_custom_ops.py deleted file mode 100644 index d4ee6db1..00000000 --- a/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/test_inprocess_custom_ops.py +++ /dev/null @@ -1,42 +0,0 @@ -import pytest -from pytest_bdd import scenario -from tests.conftest import setup_flag_file - -GHERKIN_FOLDER = "../../../../test-harness/gherkin/" - - -@pytest.fixture -def flag_file(tmp_path): - return setup_flag_file(tmp_path, "custom-ops.json") - - -@scenario( - f"{GHERKIN_FOLDER}flagd-json-evaluator.feature", - "Fractional operator", -) -def test_fractional_operator(): - """Fractional operator.""" - - -@scenario( - f"{GHERKIN_FOLDER}flagd-json-evaluator.feature", - "Semantic version operator numeric comparison", -) -def test_semantic_version_operator_numeric_comparison(): - """Semantic version operator numeric comparison.""" - - -@scenario( - f"{GHERKIN_FOLDER}flagd-json-evaluator.feature", - "Semantic version operator semantic comparison", -) -def test_semantic_version_operator_semantic_comparison(): - """Semantic version operator semantic comparison.""" - - -@scenario( - f"{GHERKIN_FOLDER}flagd-json-evaluator.feature", - "Substring operators", -) -def test_substring_operators(): - """Substring operators.""" diff --git a/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/test_inprocess_edge_cases.py b/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/test_inprocess_edge_cases.py deleted file mode 100644 index 976a5107..00000000 --- a/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/test_inprocess_edge_cases.py +++ /dev/null @@ -1,15 +0,0 @@ -import pytest -from pytest_bdd import scenario -from tests.conftest import setup_flag_file - -GHERKIN_FOLDER = "../../../../test-harness/gherkin/" - - -@pytest.fixture -def flag_file(tmp_path): - return setup_flag_file(tmp_path, "edge-case-flags.json") - - -@scenario(f"{GHERKIN_FOLDER}flagd-json-evaluator.feature", "Errors and edge cases") -def test_errors_and_edge_cases(): - """Errors and edge cases.""" diff --git a/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/test_inprocess_evaluator_reuse.py b/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/test_inprocess_evaluator_reuse.py deleted file mode 100644 index 5bc802f7..00000000 --- a/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/test_inprocess_evaluator_reuse.py +++ /dev/null @@ -1,15 +0,0 @@ -import pytest -from pytest_bdd import scenario -from tests.conftest import setup_flag_file - -GHERKIN_FOLDER = "../../../../test-harness/gherkin/" - - -@pytest.fixture -def flag_file(tmp_path): - return setup_flag_file(tmp_path, "evaluator-refs.json") - - -@scenario(f"{GHERKIN_FOLDER}flagd-json-evaluator.feature", "Evaluator reuse") -def test_evaluator_reuse(): - """Evaluator reuse.""" diff --git a/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/test_inprocess_events.py b/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/test_inprocess_events.py deleted file mode 100644 index 58603dcc..00000000 --- a/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/test_inprocess_events.py +++ /dev/null @@ -1,35 +0,0 @@ -import logging -import time -from pathlib import Path - -import pytest -from pytest_bdd import parsers, scenario, when -from tests.conftest import setup_flag_file - -GHERKIN_FOLDER = "../../../../test-harness/gherkin/" - - -@scenario(f"{GHERKIN_FOLDER}flagd.feature", "Provider ready event") -def test_ready_event(caplog): - """Provider ready event""" - caplog.set_level(logging.DEBUG) - - -@scenario(f"{GHERKIN_FOLDER}flagd.feature", "Flag change event") -def test_change_event(): - """Flag change event""" - - -@pytest.fixture -def flag_file(tmp_path): - return setup_flag_file(tmp_path, "changing-flag-bar.json") - - -@when(parsers.cfparse('a flag with key "{key}" is modified')) -def modify_flag(flag_file, key): - time.sleep(0.1) # guard against race condition - contents = ( - Path(__file__).parent / "../../../../test-harness/flags/changing-flag-foo.json" - ).read_text() - with open(flag_file, "w") as f: - f.write(contents) diff --git a/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/test_inprocess_testing_flags.py b/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/test_inprocess_testing_flags.py deleted file mode 100644 index 84e815a6..00000000 --- a/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/test_inprocess_testing_flags.py +++ /dev/null @@ -1,26 +0,0 @@ -import pytest -from pytest_bdd import scenario -from tests.conftest import setup_flag_file - -GHERKIN_FOLDER = "../../../../test-harness/gherkin/" - - -@pytest.fixture -def flag_file(tmp_path): - return setup_flag_file(tmp_path, "testing-flags.json") - - -@scenario( - f"{GHERKIN_FOLDER}flagd-json-evaluator.feature", - "Time-based operations", -) -def test_timebased_operations(): - """Time-based operations.""" - - -@scenario( - f"{GHERKIN_FOLDER}flagd-json-evaluator.feature", - "Targeting by targeting key", -) -def test_targeting_by_targeting_key(): - """Targeting by targeting key.""" diff --git a/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/test_inprocess_zero_evals.py b/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/test_inprocess_zero_evals.py deleted file mode 100644 index cebe6e10..00000000 --- a/providers/openfeature-provider-flagd/tests/e2e/inprocess/file/test_inprocess_zero_evals.py +++ /dev/null @@ -1,30 +0,0 @@ -import pytest -from pytest_bdd import scenario -from tests.conftest import setup_flag_file - -GHERKIN_FOLDER = "../../../../test-harness/gherkin/" - - -@scenario(f"{GHERKIN_FOLDER}flagd.feature", "Resolves boolean zero value") -def test_eval_boolean(): - """Resolve boolean zero value""" - - -@scenario(f"{GHERKIN_FOLDER}flagd.feature", "Resolves string zero value") -def test_eval_string(): - """Resolve string zero value""" - - -@scenario(f"{GHERKIN_FOLDER}flagd.feature", "Resolves integer zero value") -def test_eval_integer(): - """Resolve integer zero value""" - - -@scenario(f"{GHERKIN_FOLDER}flagd.feature", "Resolves float zero value") -def test_eval_float(): - """Resolve float zero value""" - - -@pytest.fixture -def flag_file(tmp_path): - return setup_flag_file(tmp_path, "zero-flags.json") diff --git a/providers/openfeature-provider-flagd/tests/e2e/inprocess/grpc/conftest.py b/providers/openfeature-provider-flagd/tests/e2e/inprocess/grpc/conftest.py deleted file mode 100644 index 3c15b0c8..00000000 --- a/providers/openfeature-provider-flagd/tests/e2e/inprocess/grpc/conftest.py +++ /dev/null @@ -1,49 +0,0 @@ -import pytest -from pytest_bdd import given, parsers, then, when -from tests.e2e.conftest import add_event_handler, assert_handlers - -from openfeature import api -from openfeature.client import OpenFeatureClient, ProviderEvent -from openfeature.contrib.provider.flagd import FlagdProvider -from openfeature.contrib.provider.flagd.config import ResolverType - - -@pytest.fixture -def port(): - # Port for flagd-sync, override to 9091 to test unstable version - return 9090 - - -@given("a flagd provider is set", target_fixture="client") -def setup_provider(port: int) -> OpenFeatureClient: - api.set_provider( - FlagdProvider( - resolver_type=ResolverType.IN_PROCESS, - port=port, - timeout=0.5, - retry_backoff_seconds=0.1, - ) - ) - return api.get_client() - - -@when(parsers.cfparse('a flag with key "{key}" is modified')) -def modify_flag(key): - # sync service will flip flag contents regularly - pass - - -@given("flagd is unavailable", target_fixture="client") -def flagd_unavailable(): - return setup_provider(99999) - - -@when("a flagd provider is set and initialization is awaited") -def flagd_init(client: OpenFeatureClient, handles): - add_event_handler(client, ProviderEvent.PROVIDER_ERROR, handles) - add_event_handler(client, ProviderEvent.PROVIDER_READY, handles) - - -@then("an error should be indicated within the configured deadline") -def flagd_error(handles): - assert_handlers(handles, ProviderEvent.PROVIDER_ERROR) diff --git a/providers/openfeature-provider-flagd/tests/e2e/inprocess/grpc/test_inprocess_grpc.py b/providers/openfeature-provider-flagd/tests/e2e/inprocess/grpc/test_inprocess_grpc.py deleted file mode 100644 index 926c2195..00000000 --- a/providers/openfeature-provider-flagd/tests/e2e/inprocess/grpc/test_inprocess_grpc.py +++ /dev/null @@ -1,6 +0,0 @@ -from pytest_bdd import scenarios - -GHERKIN_FOLDER = "../../../../test-harness/gherkin/" - -scenarios(f"{GHERKIN_FOLDER}flagd-json-evaluator.feature") -scenarios(f"{GHERKIN_FOLDER}flagd.feature") diff --git a/providers/openfeature-provider-flagd/tests/e2e/inprocess/grpc/test_inprocess_grpc_reconnect.py b/providers/openfeature-provider-flagd/tests/e2e/inprocess/grpc/test_inprocess_grpc_reconnect.py deleted file mode 100644 index e3e1b85d..00000000 --- a/providers/openfeature-provider-flagd/tests/e2e/inprocess/grpc/test_inprocess_grpc_reconnect.py +++ /dev/null @@ -1,12 +0,0 @@ -import pytest -from pytest_bdd import scenarios - -GHERKIN_FOLDER = "../../../../test-harness/gherkin/" - -scenarios(f"{GHERKIN_FOLDER}flagd-reconnect.feature") - - -@pytest.fixture -def port(): - # Port for flagd-sync-unstable, overrides main conftest port - return 9091 diff --git a/providers/openfeature-provider-flagd/tests/e2e/parsers.py b/providers/openfeature-provider-flagd/tests/e2e/parsers.py new file mode 100644 index 00000000..9d9560c6 --- /dev/null +++ b/providers/openfeature-provider-flagd/tests/e2e/parsers.py @@ -0,0 +1,7 @@ +def to_bool(s: str) -> bool: + return s.lower() == "true" + + +def to_list(s: str) -> list: + values = s.replace('"', "").split(",") + return [s.strip() for s in values] diff --git a/providers/openfeature-provider-flagd/tests/e2e/steps.py b/providers/openfeature-provider-flagd/tests/e2e/steps.py new file mode 100644 index 00000000..9e9ad792 --- /dev/null +++ b/providers/openfeature-provider-flagd/tests/e2e/steps.py @@ -0,0 +1,661 @@ +import logging +import time +import typing + +import pytest +from asserts import assert_equal, assert_in, assert_not_equal, assert_true +from pytest_bdd import given, parsers, then, when + +from openfeature import api +from openfeature.client import OpenFeatureClient +from openfeature.contrib.provider.flagd import FlagdProvider +from openfeature.contrib.provider.flagd.config import ResolverType +from openfeature.evaluation_context import EvaluationContext +from openfeature.event import ProviderEvent +from openfeature.flag_evaluation import ErrorCode, FlagEvaluationDetails, Reason +from openfeature.provider import ProviderStatus +from tests.e2e.parsers import to_bool, to_list + +JsonObject = typing.Union[dict, list] +JsonPrimitive = typing.Union[str, bool, float, int, JsonObject] + + +@pytest.fixture +def evaluation_context() -> EvaluationContext: + return EvaluationContext() + + +@given("a flagd provider is set", target_fixture="client") +@given("a provider is registered", target_fixture="client") +def setup_provider(setup, resolver_type, client_name) -> OpenFeatureClient: + api.set_provider( + FlagdProvider( + resolver_type=resolver_type, + port=setup, + timeout=0.5, + retry_backoff_seconds=0.1, + ), + client_name, + ) + client = api.get_client(client_name) + wait_for(lambda: client.get_provider_status() == ProviderStatus.READY) + return client + + +@when( + parsers.cfparse( + 'a {ignored:s?}boolean flag with key "{key}" is evaluated with {details:s?}default value "{default:bool}"', + extra_types={"bool": to_bool, "s": str}, + ), + target_fixture="key_and_default", +) +@when( + parsers.cfparse( + 'a {ignored:s?}string flag with key "{key}" is evaluated with {details:s?}default value "{default}"', + extra_types={"s": str}, + ), + target_fixture="key_and_default", +) +@when( + parsers.cfparse( + 'a{ignored:s?} integer flag with key "{key}" is evaluated with {details:s?}default value {default:d}', + extra_types={"s": str}, + ), + target_fixture="key_and_default", +) +@when( + parsers.cfparse( + 'a {ignored:s?}float flag with key "{key}" is evaluated with {details:s?}default value {default:f}', + extra_types={"s": str}, + ), + target_fixture="key_and_default", +) +@when( + parsers.cfparse( + 'a string flag with key "{key}" is evaluated as an integer, with details and a default value {default:d}', + ), + target_fixture="key_and_default", +) +@when( + parsers.cfparse( + 'a flag with key "{key}" is evaluated with default value "{default}"', + ), + target_fixture="key_and_default", +) +def setup_key_and_default( + key: str, default: JsonPrimitive +) -> typing.Tuple[str, JsonPrimitive]: + return (key, default) + + +@when( + parsers.cfparse( + 'an object flag with key "{key}" is evaluated with a null default value', + ), + target_fixture="key_and_default", +) +@when( + parsers.cfparse( + 'an object flag with key "{key}" is evaluated with details and a null default value', + ), + target_fixture="key_and_default", +) +def setup_key_and_default_for_object(key: str) -> typing.Tuple[str, JsonObject]: + return (key, {}) + + +@when( + parsers.cfparse( + 'a context containing a targeting key with value "{targeting_key}"' + ), +) +def assign_targeting_context(evaluation_context: EvaluationContext, targeting_key: str): + """a context containing a targeting key with value .""" + evaluation_context.targeting_key = targeting_key + + +@when( + parsers.cfparse( + 'context contains keys {fields:s} with values "{svalue}", "{svalue2}", {ivalue:d}, "{bvalue:bool}"', + extra_types={"bool": to_bool, "s": to_list}, + ), +) +def assign_targeting_context_2( + evaluation_context: EvaluationContext, + fields: list, + svalue: str, + svalue2: str, + ivalue: int, + bvalue: bool, +): + evaluation_context.attributes[fields[0]] = svalue + evaluation_context.attributes[fields[1]] = svalue2 + evaluation_context.attributes[fields[2]] = ivalue + evaluation_context.attributes[fields[3]] = bvalue + + +@when( + parsers.cfparse('a context containing a key "{key}", with value "{value}"'), +) +@when( + parsers.cfparse('a context containing a key "{key}", with value {value:d}'), +) +def update_context( + evaluation_context: EvaluationContext, key: str, value: JsonPrimitive +): + """a context containing a key and value.""" + evaluation_context.attributes[key] = value + + +@when( + parsers.cfparse( + 'a context containing a nested property with outer key "{outer}" and inner key "{inner}", with value "{value}"' + ), +) +@when( + parsers.cfparse( + 'a context containing a nested property with outer key "{outer}" and inner key "{inner}", with value {value:d}' + ), +) +def update_context_nested( + evaluation_context: EvaluationContext, + outer: str, + inner: str, + value: typing.Union[str, int], +): + """a context containing a nested property with outer key, and inner key, and value.""" + if outer not in evaluation_context.attributes: + evaluation_context.attributes[outer] = {} + evaluation_context.attributes[outer][inner] = value + + +@then( + parsers.cfparse( + 'the resolved boolean value should be "{expected_value:bool}"', + extra_types={"bool": to_bool}, + ) +) +@then( + parsers.cfparse( + 'the resolved boolean zero-value should be "{expected_value:bool}"', + extra_types={"bool": to_bool}, + ) +) +def assert_boolean_value( + client: OpenFeatureClient, + key_and_default: tuple, + expected_value: bool, + evaluation_context: EvaluationContext, +): + key, default = key_and_default + evaluation_result = client.get_boolean_value(key, default, evaluation_context) + assert_equal(evaluation_result, expected_value) + + +@then( + parsers.cfparse( + 'the resolved boolean details value should be "{expected_value:bool}", the variant should be "{variant}", and the reason should be "{reason}"', + extra_types={"bool": to_bool}, + ) +) +def assert_boolean_value_with_details( + client: OpenFeatureClient, + key_and_default: tuple, + expected_value: bool, + variant: str, + reason: str, + evaluation_context: EvaluationContext, +): + key, default = key_and_default + evaluation_result = client.get_boolean_details(key, default, evaluation_context) + assert_equal(evaluation_result.value, expected_value) + assert_equal(evaluation_result.reason, reason) + assert_equal(evaluation_result.variant, variant) + + +@then( + parsers.cfparse( + "the resolved integer {ignored:s?}value should be {expected_value:d}", + extra_types={"s": str}, + ) +) +@then(parsers.cfparse("the returned value should be {expected_value:d}")) +def assert_integer_value( + client: OpenFeatureClient, + key_and_default: tuple, + expected_value: bool, + evaluation_context: EvaluationContext, +): + key, default = key_and_default + evaluation_result = client.get_integer_value(key, default, evaluation_context) + assert_equal(evaluation_result, expected_value) + + +@then( + parsers.cfparse( + 'the resolved integer details value should be {expected_value:d}, the variant should be "{variant}", and the reason should be "{reason}"', + ) +) +def assert_integer_value_with_details( + client: OpenFeatureClient, + key_and_default: tuple, + expected_value: int, + variant: str, + reason: str, + evaluation_context: EvaluationContext, +): + key, default = key_and_default + evaluation_result = client.get_integer_details(key, default, evaluation_context) + assert_equal(evaluation_result.value, expected_value) + assert_equal(evaluation_result.reason, reason) + assert_equal(evaluation_result.variant, variant) + + +@then( + parsers.cfparse( + "the resolved float {ignored:s?}value should be {expected_value:f}", + extra_types={"s": str}, + ) +) +def assert_float_value( + client: OpenFeatureClient, + key_and_default: tuple, + expected_value: bool, + evaluation_context: EvaluationContext, +): + key, default = key_and_default + evaluation_result = client.get_float_value(key, default, evaluation_context) + assert_equal(evaluation_result, expected_value) + + +@then( + parsers.cfparse( + 'the resolved float details value should be {expected_value:f}, the variant should be "{variant}", and the reason should be "{reason}"', + ) +) +def assert_float_value_with_details( + client: OpenFeatureClient, + key_and_default: tuple, + expected_value: float, + variant: str, + reason: str, + evaluation_context: EvaluationContext, +): + key, default = key_and_default + evaluation_result = client.get_float_details(key, default, evaluation_context) + assert_equal(evaluation_result.value, expected_value) + assert_equal(evaluation_result.reason, reason) + assert_equal(evaluation_result.variant, variant) + + +@then(parsers.cfparse('the returned value should be "{expected_value}"')) +def assert_string_value( + client: OpenFeatureClient, + key_and_default: tuple, + expected_value: bool, + evaluation_context: EvaluationContext, +): + key, default = key_and_default + evaluation_details = client.get_string_details(key, default, evaluation_context) + assert_equal(evaluation_details.value, expected_value) + + +@then( + parsers.cfparse( + 'the resolved string zero-value should be ""', + ) +) +def assert_empty_string( + client: OpenFeatureClient, + key_and_default: tuple, + evaluation_context: EvaluationContext, +): + assert_string(client, key_and_default, evaluation_context, "") + + +@then( + parsers.cfparse( + 'the resolved string value should be "{expected_value}"', + ) +) +def assert_string( + client: OpenFeatureClient, + key_and_default: tuple, + evaluation_context: EvaluationContext, + expected_value: str, +): + key, default = key_and_default + evaluation_result = client.get_string_value(key, default, evaluation_context) + assert_equal(evaluation_result, expected_value) + + +@then( + parsers.cfparse( + 'the resolved string response should be "{expected_value}"', + ) +) +def assert_string_response( + client: OpenFeatureClient, + key_and_default: tuple, + evaluation_context: EvaluationContext, + expected_value: str, +): + key, default = key_and_default + evaluation_result = client.get_string_value(key, default, evaluation_context) + assert_equal(evaluation_result, expected_value) + + +@then( + parsers.cfparse( + 'the resolved flag value is "{expected_value}" when the context is empty', + ) +) +def assert_string_without_context( + client: OpenFeatureClient, + key_and_default: tuple, + evaluation_context: EvaluationContext, + expected_value: str, +): + key, default = key_and_default + evaluation_result = client.get_string_value(key, default, None) + assert_equal(evaluation_result, expected_value) + + +@then( + parsers.cfparse( + 'the resolved object {details:s?}value should be contain fields "{bool_field}", "{string_field}", and "{int_field}", with values "{bvalue:bool}", "{svalue}" and {ivalue:d}, respectively', + extra_types={"bool": to_bool, "s": str}, + ), + target_fixture="evaluation_details", +) +def assert_object( # noqa: PLR0913 + client: OpenFeatureClient, + key_and_default: tuple, + bool_field: str, + string_field: str, + int_field: str, + bvalue: bool, + svalue: str, + ivalue: int, + details: str, +) -> FlagEvaluationDetails: + key, default = key_and_default + if details: + evaluation_result = client.get_object_details(key, default) + value = evaluation_result.value + assert_in(bool_field, value) + assert_in(string_field, value) + assert_in(string_field, value) + assert_equal(value[bool_field], bvalue) + assert_equal(value[string_field], svalue) + assert_equal(value[int_field], ivalue) + return evaluation_result + else: + evaluation_result = client.get_object_value(key, default) + assert_in(bool_field, evaluation_result) + assert_in(string_field, evaluation_result) + assert_in(string_field, evaluation_result) + assert_equal(evaluation_result[bool_field], bvalue) + assert_equal(evaluation_result[string_field], svalue) + assert_equal(evaluation_result[int_field], ivalue) + assert_not_equal(evaluation_result, None) + + +@then( + parsers.cfparse( + 'the variant should be "{variant}", and the reason should be "{reason}"', + ) +) +def assert_for_variant_and_reason( + client: OpenFeatureClient, + evaluation_details: FlagEvaluationDetails, + variant: str, + reason: str, +): + assert_equal(evaluation_details.reason, Reason[reason]) + assert_equal(evaluation_details.variant, variant) + + +@then( + parsers.cfparse( + "the default string value should be returned", + ), + target_fixture="evaluation_details", +) +def assert_default_string( + client: OpenFeatureClient, + key_and_default: tuple, + evaluation_context: EvaluationContext, +) -> FlagEvaluationDetails[str]: + key, default = key_and_default + evaluation_result = client.get_string_details(key, default, evaluation_context) + assert_equal(evaluation_result.value, default) + return evaluation_result + + +@then( + parsers.cfparse( + "the default integer value should be returned", + ), + target_fixture="evaluation_details", +) +def assert_default_integer( + client: OpenFeatureClient, + key_and_default: tuple, + evaluation_context: EvaluationContext, +) -> FlagEvaluationDetails[int]: + key, default = key_and_default + evaluation_result = client.get_integer_details(key, default, evaluation_context) + assert_equal(evaluation_result.value, default) + return evaluation_result + + +@then( + parsers.cfparse( + 'the reason should indicate an error and the error code should indicate a missing flag with "{error}"', + ) +) +@then( + parsers.cfparse( + 'the reason should indicate an error and the error code should indicate a type mismatch with "{error}"', + ) +) +def assert_for_error( + client: OpenFeatureClient, + evaluation_details: FlagEvaluationDetails, + error: str, +): + assert_equal(evaluation_details.error_code, ErrorCode[error]) + assert_equal(evaluation_details.reason, Reason.ERROR) + + +@then( + parsers.cfparse( + 'the resolved string details value should be "{expected_value}", the variant should be "{variant}", and the reason should be "{reason}"', + extra_types={"bool": to_bool}, + ) +) +def assert_string_value_with_details( + client: OpenFeatureClient, + key_and_default: tuple, + expected_value: str, + variant: str, + reason: str, + evaluation_context: EvaluationContext, +): + key, default = key_and_default + evaluation_result = client.get_string_details(key, default, evaluation_context) + assert_equal(evaluation_result.value, expected_value) + assert_equal(evaluation_result.reason, reason) + assert_equal(evaluation_result.variant, variant) + + +@then(parsers.cfparse('the returned reason should be "{reason}"')) +def assert_reason( + client: OpenFeatureClient, + key_and_default: tuple, + evaluation_context: EvaluationContext, + reason: str, +): + """the returned reason should be .""" + key, default = key_and_default + evaluation_result = client.get_string_details(key, default, evaluation_context) + assert_equal(evaluation_result.reason, reason) + + +@pytest.fixture() +def event_handles() -> list: + return [] + + +@pytest.fixture() +def error_handles() -> list: + return [] + + +@when( + parsers.cfparse( + "a {event_type:ProviderEvent} handler is added", + extra_types={"ProviderEvent": ProviderEvent}, + ), +) +def add_event_handler( + client: OpenFeatureClient, event_type: ProviderEvent, event_handles: list +): + def handler(event): + logging.debug((event_type, event)) + event_handles.append( + { + "type": event_type, + "event": event, + } + ) + + client.add_handler(event_type, handler) + + +@when( + parsers.cfparse( + "a {event_type:ProviderEvent} handler and a {event_type2:ProviderEvent} handler are added", + extra_types={"ProviderEvent": ProviderEvent}, + ) +) +def add_event_handlers( + client: OpenFeatureClient, + event_type: ProviderEvent, + event_type2: ProviderEvent, + event_handles, + error_handles, +): + add_event_handler(client, event_type, event_handles) + add_event_handler(client, event_type2, error_handles) + + +def assert_handlers( + handles, event_type: ProviderEvent, max_wait: int = 2, num_events: int = 1 +): + poll_interval = 1 + while max_wait > 0: + if sum([h["type"] == event_type for h in handles]) < num_events: + max_wait -= poll_interval + time.sleep(poll_interval) + continue + break + + logging.info(f"asserting num({event_type}) >= {num_events}: {handles}") + actual_num_events = sum([h["type"] == event_type for h in handles]) + assert ( + num_events <= actual_num_events + ), f"Expected {num_events} but got {actual_num_events}: {handles}" + + +@then( + parsers.cfparse( + "the {event_type:ProviderEvent} handler must run", + extra_types={"ProviderEvent": ProviderEvent}, + ) +) +@then( + parsers.cfparse( + "the {event_type:ProviderEvent} handler must run when the provider connects", + extra_types={"ProviderEvent": ProviderEvent}, + ) +) +def assert_handler_run(event_type: ProviderEvent, event_handles): + assert_handlers(event_handles, event_type, max_wait=6) + + +@then( + parsers.cfparse( + "the {event_type:ProviderEvent} handler must run when the provider's connection is lost", + extra_types={"ProviderEvent": ProviderEvent}, + ) +) +def assert_disconnect_handler(error_handles, event_type: ProviderEvent): + # docker sync upstream restarts every 5s, waiting 2 cycles reduces test noise + assert_handlers(error_handles, event_type, max_wait=30) + + +@when( + parsers.cfparse('a flag with key "{flag_key}" is modified'), + target_fixture="changed_flag", +) +def changed_flag( + flag_key: str, +): + return flag_key + + +@then( + parsers.cfparse( + "when the connection is reestablished the {event_type:ProviderEvent} handler must run again", + extra_types={"ProviderEvent": ProviderEvent}, + ) +) +def assert_disconnect_error( + client: OpenFeatureClient, event_type: ProviderEvent, event_handles: list +): + assert_handlers(event_handles, event_type, max_wait=30, num_events=2) + + +@then(parsers.cfparse('the event details must indicate "{key}" was altered')) +def assert_flag_changed(event_handles, key): + handle = None + for h in event_handles: + if h["type"] == ProviderEvent.PROVIDER_CONFIGURATION_CHANGED: + handle = h + break + + assert handle is not None + assert key in handle["event"].flags_changed + + +def wait_for(pred, poll_sec=2, timeout_sec=10): + start = time.time() + while not (ok := pred()) and (time.time() - start < timeout_sec): + time.sleep(poll_sec) + assert_true(pred()) + return ok + + +@given("flagd is unavailable", target_fixture="client") +def flagd_unavailable(): + api.set_provider( + FlagdProvider( + resolver_type=ResolverType.IN_PROCESS, + port=99999, + ), + "unavailable", + ) + return api.get_client("unavailable") + + +@when("a flagd provider is set and initialization is awaited") +def flagd_init(client: OpenFeatureClient, event_handles, error_handles): + add_event_handler(client, ProviderEvent.PROVIDER_ERROR, error_handles) + add_event_handler(client, ProviderEvent.PROVIDER_READY, event_handles) + + +@then("an error should be indicated within the configured deadline") +def flagd_error(error_handles): + assert_handlers(error_handles, ProviderEvent.PROVIDER_ERROR) diff --git a/providers/openfeature-provider-flagd/tests/e2e/test_inprocess.py b/providers/openfeature-provider-flagd/tests/e2e/test_inprocess.py new file mode 100644 index 00000000..1f356f87 --- /dev/null +++ b/providers/openfeature-provider-flagd/tests/e2e/test_inprocess.py @@ -0,0 +1,37 @@ +import pytest +from pytest_bdd import scenario, scenarios + +from openfeature.contrib.provider.flagd.config import ResolverType + + +@pytest.fixture(autouse=True, scope="module") +def client_name() -> str: + return "in-process" + + +@pytest.fixture(autouse=True, scope="module") +def resolver_type() -> ResolverType: + return ResolverType.IN_PROCESS + + +@pytest.fixture(autouse=True, scope="module") +def port(): + return 8015 + + +@pytest.fixture(autouse=True, scope="module") +def image(): + return "ghcr.io/open-feature/flagd-testbed:v0.5.13" + + +@pytest.mark.skip(reason="0 float might be a int") +@scenario("../../test-harness/gherkin/flagd.feature", "Resolves float zero value") +def test_flag_change_event(): + """not implemented""" + + +scenarios( + "../../test-harness/gherkin/flagd.feature", + "../../test-harness/gherkin/flagd-json-evaluator.feature", + "../../spec/specification/assets/gherkin/evaluation.feature", +) diff --git a/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_file.py b/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_file.py new file mode 100644 index 00000000..d980a72d --- /dev/null +++ b/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_file.py @@ -0,0 +1,83 @@ +import json +import os +import tempfile +from os import listdir + +import pytest +import yaml +from pytest_bdd import given, scenario, scenarios + +from openfeature import api +from openfeature.client import OpenFeatureClient +from openfeature.contrib.provider.flagd import FlagdProvider +from openfeature.contrib.provider.flagd.config import ResolverType +from openfeature.provider import ProviderStatus +from tests.e2e.steps import wait_for + +KEY_EVALUATORS = "$evaluators" + +KEY_FLAGS = "flags" + +MERGED_FILE = "merged_file" + + +@pytest.fixture(params=["json", "yaml"], autouse=True) +def file_name(request): + extension = request.param + result = {KEY_FLAGS: {}, KEY_EVALUATORS: {}} + + path = os.path.abspath( + os.path.join(os.path.dirname(__file__), "../../test-harness/flags/") + ) + + for f in listdir(path): + with open(path + "/" + f, "rb") as infile: + loaded_json = json.load(infile) + result[KEY_FLAGS] = {**result[KEY_FLAGS], **loaded_json[KEY_FLAGS]} + if loaded_json.get(KEY_EVALUATORS): + result[KEY_EVALUATORS] = { + **result[KEY_EVALUATORS], + **loaded_json[KEY_EVALUATORS], + } + + with tempfile.NamedTemporaryFile( + "w", delete=False, suffix="." + extension + ) as outfile: + if extension == "json": + json.dump(result, outfile) + else: + yaml.dump(result, outfile) + + return outfile + + +@pytest.fixture(autouse=True, scope="module") +def setup(request): + pass + + +@given("a flagd provider is set", target_fixture="client") +@given("a provider is registered", target_fixture="client") +def setup_provider(setup, file_name) -> OpenFeatureClient: + api.set_provider( + FlagdProvider( + resolver_type=ResolverType.IN_PROCESS, + offline_flag_source_path=file_name.name, + ) + ) + client = api.get_client() + wait_for(lambda: client.get_provider_status() == ProviderStatus.READY) + return client + + +@pytest.mark.skip(reason="Eventing not implemented") +@scenario("../../test-harness/gherkin/flagd.feature", "Flag change event") +def test_flag_change_event(): + """not implemented""" + + +scenarios( + "../../test-harness/gherkin/flagd.feature", + "../../test-harness/gherkin/flagd-json-evaluator.feature", + "../../spec/specification/assets/gherkin/evaluation.feature", +) diff --git a/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_reconnect.py b/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_reconnect.py new file mode 100644 index 00000000..18fa1767 --- /dev/null +++ b/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_reconnect.py @@ -0,0 +1,35 @@ +import pytest +from pytest_bdd import scenarios + +from openfeature.contrib.provider.flagd.config import ResolverType + + +@pytest.fixture(autouse=True, scope="module") +def client_name() -> str: + return "in-process-reconnect" + + +@pytest.fixture(autouse=True, scope="module") +def resolver_type() -> ResolverType: + return ResolverType.IN_PROCESS + + +@pytest.fixture(autouse=True, scope="module") +def port(): + return 8015 + + +@pytest.fixture(autouse=True, scope="module") +def image(): + return "ghcr.io/open-feature/flagd-testbed-unstable:v0.5.13" + + +# @pytest.mark.skip(reason="Reconnect seems to be flacky") +# @scenario("../../test-harness/gherkin/flagd-reconnect.feature", "Provider reconnection") +# def test_flag_change_event(): +# """not implemented""" + + +scenarios( + "../../test-harness/gherkin/flagd-reconnect.feature", +) diff --git a/providers/openfeature-provider-flagd/tests/e2e/test_rpc.py b/providers/openfeature-provider-flagd/tests/e2e/test_rpc.py new file mode 100644 index 00000000..d0a0548e --- /dev/null +++ b/providers/openfeature-provider-flagd/tests/e2e/test_rpc.py @@ -0,0 +1,55 @@ +import pytest +from pytest_bdd import scenario, scenarios + +from openfeature.contrib.provider.flagd.config import ResolverType + + +@pytest.fixture(autouse=True, scope="module") +def client_name() -> str: + return "rpc" + + +@pytest.fixture(autouse=True, scope="module") +def resolver_type() -> ResolverType: + return ResolverType.GRPC + + +@pytest.fixture(autouse=True, scope="module") +def port(): + return 8013 + + +@pytest.fixture(autouse=True, scope="module") +def image(): + return "ghcr.io/open-feature/flagd-testbed:v0.5.13" + + +@pytest.mark.skip(reason="Eventing not implemented") +@scenario("../../test-harness/gherkin/flagd.feature", "Flag change event") +def test_flag_change_event(): + """not implemented""" + + +@pytest.mark.skip(reason="issue #102") +@scenario( + "../../spec/specification/assets/gherkin/evaluation.feature", + "Resolves object value", +) +def test_resolves_object_value(): + """not implemented""" + + +@pytest.mark.skip(reason="issue #102") +@scenario( + "../../spec/specification/assets/gherkin/evaluation.feature", + "Resolves object details", +) +def test_resolves_object_details(): + """not implemented""" + + +scenarios( + "../../test-harness/gherkin/flagd.feature", + "../../test-harness/gherkin/flagd-json-evaluator.feature", + "../../spec/specification/assets/gherkin/evaluation.feature", +) diff --git a/providers/openfeature-provider-flagd/tests/test_errors.py b/providers/openfeature-provider-flagd/tests/test_errors.py index 39ed91e2..3d234baa 100644 --- a/providers/openfeature-provider-flagd/tests/test_errors.py +++ b/providers/openfeature-provider-flagd/tests/test_errors.py @@ -1,3 +1,4 @@ +import os import time import pytest @@ -55,10 +56,11 @@ def test_file_load_errors(file_name: str): ], ) def test_json_logic_parse_errors(file_name: str): + path = os.path.abspath(os.path.join(os.path.dirname(__file__), "./flags/")) client = create_client( FlagdProvider( resolver_type=ResolverType.IN_PROCESS, - offline_flag_source_path=f"tests/flags/{file_name}", + offline_flag_source_path=f"{path}/{file_name}", ) ) @@ -69,10 +71,11 @@ def test_json_logic_parse_errors(file_name: str): def test_flag_disabled(): + path = os.path.abspath(os.path.join(os.path.dirname(__file__), "./flags/")) client = create_client( FlagdProvider( resolver_type=ResolverType.IN_PROCESS, - offline_flag_source_path="tests/flags/basic-flag-disabled.json", + offline_flag_source_path=f"{path}/basic-flag-disabled.json", ) ) diff --git a/providers/openfeature-provider-flagd/tests/test_file_store.py b/providers/openfeature-provider-flagd/tests/test_file_store.py index 12a1d976..0a152419 100644 --- a/providers/openfeature-provider-flagd/tests/test_file_store.py +++ b/providers/openfeature-provider-flagd/tests/test_file_store.py @@ -1,3 +1,4 @@ +import os from unittest.mock import Mock import pytest @@ -27,8 +28,9 @@ def test_file_load(file_name: str): emit_provider_ready = Mock() emit_provider_error = Mock() flag_store = FlagStore(emit_provider_configuration_changed) + path = os.path.abspath(os.path.join(os.path.dirname(__file__), "./flags/")) file_watcher = FileWatcher( - f"tests/flags/{file_name}", flag_store, emit_provider_ready, emit_provider_error + f"{path}/{file_name}", flag_store, emit_provider_ready, emit_provider_error ) file_watcher.initialize(None)