diff --git a/.github/workflows/run-end-to-end.yml b/.github/workflows/run-end-to-end.yml index 630a1f56b0..f4fcd97ef5 100644 --- a/.github/workflows/run-end-to-end.yml +++ b/.github/workflows/run-end-to-end.yml @@ -242,6 +242,9 @@ jobs: - name: Run APPSEC_RASP scenario if: always() && steps.build.outcome == 'success' && contains(inputs.scenarios, '"APPSEC_RASP"') run: ./run.sh APPSEC_RASP + - name: Run APPSEC_META_STRUCT_DISABLED scenario + if: always() && steps.build.outcome == 'success' && contains(inputs.scenarios, '"APPSEC_META_STRUCT_DISABLED"') + run: ./run.sh APPSEC_META_STRUCT_DISABLED - name: Run SAMPLING scenario if: always() && steps.build.outcome == 'success' && contains(inputs.scenarios, '"SAMPLING"') run: ./run.sh SAMPLING diff --git a/.github/workflows/run-lib-injection.yml b/.github/workflows/run-lib-injection.yml index 9b44d08997..1c10fcb72f 100644 --- a/.github/workflows/run-lib-injection.yml +++ b/.github/workflows/run-lib-injection.yml @@ -28,6 +28,7 @@ jobs: matrix: ${{ steps.compute-matrix.outputs.matrix }} matrix_supported_langs: ${{ steps.compute-matrix.outputs.matrix_supported_langs }} matrix_profiling_supported: ${{ steps.compute-matrix.outputs.matrix_profiling_supported }} + matrix_skip_basic: ${{ steps.compute-matrix.outputs.matrix_skip_basic }} init_image: ${{ steps.compute-matrix.outputs.init_image }} steps: - name: Compute matrix @@ -41,7 +42,9 @@ jobs: "cpp": [], "dotnet": [{"name":"dd-lib-dotnet-init-test-app","supported":"true"}], "golang": [], - "java": [{"name":"dd-lib-java-init-test-app","supported":"true"},{"name":"jdk7-app","supported":"false"}], + "java": [{"name":"dd-lib-java-init-test-app","supported":"true"}, + {"name":"jdk7-app","supported":"false"}, + {"name":"dd-djm-spark-test-app", "supported":"true", "skip-profiling":"true", "skip-basic":"true"}], "nodejs": [{"name":"sample-app","supported":"true"},{"name":"sample-app-node13","supported":"false"}], "php": [], "python": [{"name":"dd-lib-python-init-test-django","supported":"true"}, @@ -80,11 +83,14 @@ jobs: #Only supported weblog variants results_supported_langs = [] results_profiling_supported = [] + results_skip_basic = [] for weblog in weblogs["${{ inputs.library }}"]: if weblog["supported"] == "true": results_supported_langs.append(weblog["name"]) if "skip-profiling" not in weblog or weblog["skip-profiling"] != "true": results_profiling_supported.append(weblog["name"]) + if "skip-basic" in weblog and weblog["skip-basic"] == "true": + results_skip_basic.append(weblog["name"]) #Use the latest init image for prod version, latest_snapshot init image for dev version if "${{ inputs.version }}" == 'prod': @@ -97,11 +103,13 @@ jobs: print(f'init_image={json.dumps(result_init_image)}', file=fh) print(f'matrix_supported_langs={json.dumps(results_supported_langs)}', file=fh) print(f'matrix_profiling_supported={json.dumps(results_profiling_supported)}', file=fh) + print(f'matrix_skip_basic={json.dumps(results_skip_basic)}', file=fh) print(json.dumps(result, indent=2)) print(json.dumps(result_init_image, indent=2)) print(json.dumps(results_supported_langs, indent=2)) print(json.dumps(results_profiling_supported, indent=2)) + print(json.dumps(results_skip_basic, indent=2)) lib-injection-init-image-validator: if: inputs.library == 'dotnet' || inputs.library == 'java' || inputs.library == 'python' || inputs.library == 'ruby' || inputs.library == 'nodejs' @@ -116,6 +124,8 @@ jobs: matrix: weblog: ${{ fromJson(needs.compute-matrix.outputs.matrix) }} lib_init_image: ${{ fromJson(needs.compute-matrix.outputs.init_image) }} + exclude: + - weblog: {"name":"dd-djm-spark-test-app", "supported":"true", "skip-profiling":"true", "skip-basic":"true"} fail-fast: false env: TEST_LIBRARY: ${{ inputs.library }} @@ -183,7 +193,7 @@ jobs: matrix: weblog: ${{ fromJson(needs.compute-matrix.outputs.matrix_supported_langs) }} lib_init_image: ${{ fromJson(needs.compute-matrix.outputs.init_image) }} - cluster_agent_version: ['7.56.2', '7.57.0'] + cluster_agent_version: ['7.56.2', '7.57.0', '7.59.0'] fail-fast: false env: TEST_LIBRARY: ${{ inputs.library }} @@ -231,6 +241,7 @@ jobs: - name: Kubernetes lib-injection tests id: k8s-lib-injection-tests + if: ${{ !contains(fromJson(needs.compute-matrix.outputs.matrix_skip_basic), matrix.weblog) }} run: ./run.sh K8S_LIBRARY_INJECTION_BASIC - name: Kubernetes lib-injection profiling tests @@ -238,6 +249,11 @@ jobs: if: ${{ contains(fromJson(needs.compute-matrix.outputs.matrix_profiling_supported), matrix.weblog) }} run: ./run.sh K8S_LIBRARY_INJECTION_PROFILING + - name: Kubernetes lib-injection DJM tests + id: k8s-lib-injection-tests-djm + if: ${{ matrix.weblog == 'dd-djm-spark-test-app' }} + run: ./run.sh K8S_LIBRARY_INJECTION_DJM + - name: Compress logs id: compress_logs if: always() && steps.build.outcome == 'success' diff --git a/docs/edit/README.md b/docs/edit/README.md index 8495c096f1..d1976cf8f8 100644 --- a/docs/edit/README.md +++ b/docs/edit/README.md @@ -8,6 +8,10 @@ System tests allow developers define scenarios and ensure datadog libraries prod To make changes, you must be able to run tests locally. Instructions for running **end-to-end** tests can be found [here](https://github.com/DataDog/system-tests/blob/main/docs/execute/README.md#run-tests) and for **parametric**, [here](https://github.com/DataDog/system-tests/blob/main/docs/scenarios/parametric.md#running-the-tests). +**Note** + +For information on contributing to specifically **parametric** scenario, see [here](/docs/scenarios/parametric_contributing.md). + **Callout** You'll commonly need to run unmerged changes to your library against system tests (e.g. to ensure the feature is up to spec). Instructions for testing against unmerged changes can be found in [enable-test.md](./enable-test.md). diff --git a/docs/edit/add-new-test.md b/docs/edit/add-new-test.md index dad6766868..0a1b8a60a7 100644 --- a/docs/edit/add-new-test.md +++ b/docs/edit/add-new-test.md @@ -1,4 +1,4 @@ -Whether it's adding a new test or modifying an existing test, a moderate amount of effort will be required. The instructions below cater to end-to-end tests, refer to [placeholder] (TODO: LINK to parametric_contributing.md) for parametric-specific instructions. +Whether it's adding a new test or modifying an existing test, a moderate amount of effort will be required. The instructions below cater to end-to-end tests, refer to [the parametric contributing doc](/docs/scenarios/parametric_contributing.md)for parametric-specific instructions. Once the changes are complete, post them in a PR. diff --git a/docs/edit/enable-test.md b/docs/edit/enable-test.md index 42fb509f8b..406a231a7f 100644 --- a/docs/edit/enable-test.md +++ b/docs/edit/enable-test.md @@ -2,7 +2,7 @@ So, you have a branch that contains changes you'd like to test with system tests... -**Note**: the instructions below assume that the necessary test already exists in system-tests and your weblog or parametric app has the necessary endpoint for serving the test [TODO]: LINK TO CONTRIBUTING DOC +**Note**: the instructions below assume that the necessary test already exists in system-tests and your weblog or parametric app has the necessary endpoint for serving the test. 1. Post a PR to the dd-trace repo if you have not already. diff --git a/docs/edit/features.md b/docs/edit/features.md index 9b8ecf30ed..a5f1053da7 100644 --- a/docs/edit/features.md +++ b/docs/edit/features.md @@ -1,4 +1,4 @@ -System tests are feature-oriented; put another way, tests certify which features are supported in each client library (and the supported library versions). Each test class must belong to a "feature", where "features" map to entries in the [Feature Parity Dashboard](https://feature-parity.us1.prod.dog/). We use the @features decorators to achieve this. +System tests are feature-oriented; put another way, tests certify which features are supported in each client library (and the supported library versions). Each test class must belong to a "feature", where "features" map to entries in the [Feature Parity Dashboard](https://feature-parity.us1.prod.dog/). We use the `@features` decorators to achieve this. For example, you have a new feature called `Awesome feature`, which is part of a meta feature called `stuffs`. We add a new file called `tests/test_stuffs.py` and add a test class with some boilerplate code, and a basic test: @@ -12,3 +12,28 @@ class Test_AwesomeFeature: def test_basic(self) assert P==NP ``` + +Several key points: + +* Each new feature should be defined in [_features.py](/utils/_features.py). This consists of adding a feature in [Feature Parity Dashboard](https://feature-parity.us1.prod.dog/), get the feature id and copying one of the already added features, changing the name and the feature id in the url, and the feature number. In this case we'd add + +```python + + @staticmethod + def awesome_feature(test_object): + """ + Awesome Feature for Awesomeness + + https://feature-parity.us1.prod.dog/#/?feature=291 + """ + pytest.mark.features(feature_id=291)(test_object) + return test_object +``` + +* One class tests one feature +* One class can have several tests +* Files can be nested (`tests/test_product/test_stuffs.py::Test_AwesomeFeature`), and how files are organized does not make any difference. Use you common sense, or ask on [slack](https://dd.enterprise.slack.com/archives/C025TJ4RZ8X). + +## Skip tests + +See [skip-tests.md](/docs/edit/skip-tests.md) \ No newline at end of file diff --git a/docs/scenarios/parametric.md b/docs/scenarios/parametric.md index 78de051b09..94fa163b4a 100644 --- a/docs/scenarios/parametric.md +++ b/docs/scenarios/parametric.md @@ -50,10 +50,10 @@ def test_datadog_spans(library_env, test_library, test_agent): ``` - This test case runs against all the APM libraries and is parameterized with two different environments specifying two different values of the environment variable `DD_ENV`. -- The test case creates a new span and sets a tag on it using the shared GRPC/HTTP interface. -- The implementations of the GRPC/HTTP interface, by language, are in `utils/build/docker//parametric`. +- `test_library.dd_start_span` creates a new span using the shared HTTP interface. +- The request is sent to a HTTP server by language. Implementations can be found in `utils/build/docker//parametric`. More information in [Http Server Implementations](#http-server-implementations). - Data is flushed to the test agent after the with test_library block closes. -- Data is retrieved using the `test_agent` fixture and asserted on. +- Data (usually traces) are retrieved using the `test_agent` fixture and we assert that they look the way we'd expect. ## Usage @@ -93,7 +93,7 @@ TEST_LIBRARY=dotnet ./run.sh PARAMETRIC -k test_metrics_ Tests can be aborted using CTRL-C but note that containers maybe still be running and will have to be shut down. ### Running the tests for a custom tracer -To run tests against custom tracers, refer to the [Binaries Documentation](../execute/binaries.md) +To run tests against custom tracer builds, refer to the [Binaries Documentation](../execute/binaries.md) #### After Testing with a Custom Tracer: Note: Most of the ways to run system-tests with a custom tracer version involve modifying the binaries directory. Modifying the binaries will alter the tracer version used across your local computer. Once you're done testing with the custom tracer, ensure you **remove** it. For example for Python: @@ -199,19 +199,23 @@ See the steps below in the HTTP section to run the Python server and view the sp ### Shared Interface To view the available HTTP endpoints , follow these steps: +Note: These are based off of the Python tracer's http server which should be held as the standard example interface across implementations. + 1. `./utils/scripts/parametric/run_reference_http.sh` 2. Navigate to http://localhost:8000/docs in your web browser to access the documentation. 3. You can download the OpenAPI schema from http://localhost:8000/openapi.json. This schema can be imported into tools like [Postman](https://learning.postman.com/docs/integrations/available-integrations/working-with-openAPI/) or other API clients to facilitate development and testing. +Not all endpoint implementations per language are up to spec with regards to their parameters and return values. To view endpoints that are not up to spec, see the [feature parity board](https://feature-parity.us1.prod.dog/#/?runDateFilter=7d&feature=339) + ### Architecture: How System-tests work Below is an overview of how the testing architecture is structured: - Shared Tests in Python: We write shared test cases using Python's pytest framework. These tests are designed to be generic and interact with the tracers through an HTTP interface. -- HTTP Servers in Docker: For each language tracer, we build and run an HTTP server within a Docker container. These servers expose the required endpoints defined in the OpenAPI schema and handle the tracer-specific logic. +- [HTTP Servers in Docker](#http-server-implementations): For each language tracer, we build and run an HTTP server within a Docker container. These servers expose the required endpoints defined in the OpenAPI schema and handle the tracer-specific logic. - [Test Agent](https://github.com/DataDog/dd-apm-test-agent/) in Docker: We start a test agent in a separate Docker container. This agent collects data (such as spans and traces) submitted by the HTTP servers. It serves as a centralized point for aggregating and accessing test data. -- Test Execution: The Python test cases use an HTTP client to communicate with the servers. The servers generate data based on the interactions, which is then sent to the test agent. The tests can query the test agent to retrieve data (usually traces) and perform assertions to verify correct behavior. +- Test Execution: The Python test cases use a [HTTP client](/utils/parametric/_library_client.py) to communicate with the servers. The servers generate data based on the interactions, which is then sent to the test agent. The tests can query the test agent to retrieve data (often traces) and perform assertions to verify correct behavior. An example of how to get a span from the test agent: ```python @@ -220,6 +224,23 @@ span = find_only_span(test_agent.wait_for_num_traces(1)) This architecture allows us to ensure that all tracers conform to the same interface and behavior, making it easier to maintain consistency across different languages and implementations. +#### Http Server Implementations + +The http server implementations for each tracer can be found at the following locations: +*Note:* For some languages there is both an Otel and a Datadog server. This is simply to separate the available Otel endpoints from the available Datadog endpoints that can be hit by the client. If a language only has a single server, then both endpoints for Otel and Datadog exist there. + +* [Python](/utils/build/docker/python/parametric/apm_test_client/server.py) +* [Ruby](utils/build/docker/ruby/parametric/server.rb) +* [Php](utils/build/docker/php/parametric/server.php) +* [Nodejs](utils/build/docker/nodejs/parametric/server.js) +* [Java Datadog](utils/build/docker/java/parametric/src/main/java/com/datadoghq/trace/opentracing/controller/OpenTracingController.java) +* [Java Otel](utils/build/docker/java/parametric/src/main/java/com/datadoghq/trace/opentelemetry/controller/OpenTelemetryController.java) +* [Dotnet Datadog](utils/build/docker/dotnet/parametric/Endpoints/ApmTestApi.cs) +* [Dotnet Otel](utils/build/docker/dotnet/parametric/Endpoints/ApmTestApiOtel.cs) +* [Go Datadog](utils/build/docker/golang/parametric/main.go) +* [Go Otel](utils/build/docker/golang/parametric/otel.go) + + ![image](https://github.com/user-attachments/assets/fc144fc1-95aa-4d50-97c5-cda8fdbcefef) image diff --git a/docs/scenarios/parametric_contributing.md b/docs/scenarios/parametric_contributing.md new file mode 100644 index 0000000000..012b22c0e3 --- /dev/null +++ b/docs/scenarios/parametric_contributing.md @@ -0,0 +1,68 @@ +# Contributing to Parametric System-tests + +Note: a more in-depth overview of parametric system-tests can be found in [parametric.md](parametric.md). + +**MUST:** Acquaint yourself with [how system tests work](parametric.md#architecture-how-system-tests-work) before proceeding. + +## Use cases + +Let's figure out if your feature is a good candidate to be tested with parametric system-tests. + +System-tests in general are great for assuring uniform behavior between different dd-trace repos (tracing, ASM, DI, profiling, etc.). There are two types of system-tests, [end-to-end](/docs/README.md) and [parametric](/docs/scenarios/parametric.md). + +Parametric tests in the Datadog system test repository validate the behavior of APM Client Libraries by interacting only with their public interfaces. These tests ensure the telemetry generated (spans, metrics, instrumentation telemetry) is consistent and accurate when libraries handle different input parameters (e.g., calling a Tracer's startSpan method with a specific type) and configurations (e.g., sampling rates, distributed tracing header formats, remote settings). They run against web applications written in Ruby, Java, Go, Python, PHP, Node.js, C++, and .NET, which expose endpoints simulating real-world ddtrace usage. The generated telemetry is sent to a Datadog agent, queried, and verified by system tests to confirm proper library functionality across scenarios. + +If your usage does not require different parameter values, then [end-to-end system-tests](/docs/README.md) should be used as they will achieve the same level of behavior uniformity verification and test the feature on real world use cases, catching more issues. End-to-end tests are also what should be used for verify behavior between tracer integrations. +For more on the differences between end-to-end and parametric tests, see [here](/docs/scenarios/README.md#scenarios) +System-tests are **not** for testing internal or niche library behavior. Unit tests are a better fit for that case. + +## Getting set up + +We usually add new system tests when validating a new feature. To begin, set up the system-tests repo to run with a version of the library that has already implemented the feature you'd like to test (published or on a branch). +Follow [Binaries Documentation](../execute/binaries.md) for your particular tracer language to set this up. + +[Verify that you can run some (any) parametric tests with your custom tracer](parametric.md#running-the-tests). Make sure some pass — no need to run the whole suite (you can stop the tests from running with `ctrl+c`). If you have any issues, checkout the [debugging section](parametric.md#debugging) to troubleshoot. + +## Writing the tests + +Now that we're all setup with a working test suite and a tracer with the implemented feature, we can begin writing the new tests. + +**MUST:** If you haven't yet, please acquaint yourself with [how system tests work](parametric.md#architecture-how-system-tests-work) before proceeding and reference it throughout this section. + +Before writing a new test, check the [existing tests](/tests/parametric) to see if you can use the same methods or endpoints for similar scenarios; in many cases, new endpoints do not need to be added. + +For a list of client methods that already exist, refer to `class APMLibrary` in the [_library_client.py](/utils/parametric/_library_client.py). If you're wondering what the methods do, you can take at look at the respective endpoints they're calling in that same file in `class APMLibraryClient`. + +The endpoints (where the actual tracer code runs) are defined in the Http Server implementations per tracer [listed here](parametric.md#http-server-implementations). Click on the one for your language to take a look at the endpoints. In some cases you may need to just slightly modify an endpoint rather than add a new one. + +### If you need to add additional endpoints to test your new feature + +*Note:* please refer to the [architecture section](parametric.md#architecture-how-system-tests-work) if you're confused throughout this process. + +Then we need to do the following: + +* Determine what you want the endpoint to be called and what you need it to do, and add it to your tracer's http server. + +*Note:* If adding a new endpoint please let a Python tracer implementer know so they can add it as well [see](parametric.md#shared-interface) +*Note*: Only add new endpoints that operate on the public API and execute ONE operation. Endpoints that execute complex operations or validate tracer internals will not be accepted. +* In [_library_client.py](/utils/parametric/_library_client.py) Add both the endpoint call in `class APMLibraryClient` and the method that invokes it in `class APMLibrary`. Use other implementations for reference. + +* Ok we now have our new method! Use it in the tests you write using the [below section](#if-the-methods-you-need-to-run-your-tests-are-already-written) + +### If the methods you need to run your tests are already written + +If it makes sense to add your tests to a file that already exists, great! Otherwise make a new test file in `tests/parametric`. + +Next copy the testing code you want to use as a base/guideline (usually the class (if using a new file) and one of the test methods in it). + +Then: + +* [Change the name of the feature annotation it'll fit under for the feature parity board](/docs/edit/features.md) (Not always needed e.g. `@features.datadog_headers_propagation` is used for all the propagation features) +* Change the class and method name to fit what you're testing. +* [Change your tracer's respective manifest.yml file](/docs/edit/manifest.md) or else the script won't know to run your new test. If you're confused at how to do this properly, search for the file you copied the test from in the manifest file and see how it's specified, you can probably copy that for your new file (make sure the path is the same). +For the version value, to make sure your test runs, specify the current release your tracer is on. This is the minimum value that the script will run your test with. If you make it too high, the script will skip your test. +* Write the test pulling from examples of other tests written. Remember you're almost always follwing the pattern of making spans, getting them from the trace_agent, and then verifying values on them. + +**Finally:** +[Try running your test!](parametric.md#running-the-tests) +If you have an issue, checkout the [debugging section](parametric.md#debugging) to troubleshoot. diff --git a/lib-injection/build/docker/java/dd-djm-spark-test-app/Dockerfile b/lib-injection/build/docker/java/dd-djm-spark-test-app/Dockerfile new file mode 100644 index 0000000000..96cc099bd5 --- /dev/null +++ b/lib-injection/build/docker/java/dd-djm-spark-test-app/Dockerfile @@ -0,0 +1,11 @@ +FROM apache/spark:3.4.4 + +WORKDIR /opt/spark/work-dir + +USER root +COPY launch.sh /opt/spark/work-dir/launch.sh +RUN chown spark:spark /opt/spark/work-dir/launch.sh +RUN chmod +x /opt/spark/work-dir/launch.sh +USER spark + +CMD ["/opt/spark/work-dir/launch.sh"] diff --git a/lib-injection/build/docker/java/dd-djm-spark-test-app/launch.sh b/lib-injection/build/docker/java/dd-djm-spark-test-app/launch.sh new file mode 100644 index 0000000000..5208dfbae5 --- /dev/null +++ b/lib-injection/build/docker/java/dd-djm-spark-test-app/launch.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +if [ -z "${LIB_INIT_IMAGE}" ]; then + echo "LIB_INIT_IMAGE is not set" + exit 1 +else + echo "LIB_INIT_IMAGE is set to ${LIB_INIT_IMAGE}" +fi + +if [ -z "${AUTO_INJECT_VERSION}" ]; then + echo "AUTO_INJECT_VERSION is not set, default to latest" + AUTO_INJECT_VERSION="latest" # default to latest for now. +else + echo "AUTO_INJECT_VERSION is set to ${AUTO_INJECT_VERSION}" +fi + +# Submit a example spark job with DJM enabled +$SPARK_HOME/bin/spark-submit \ +--class org.apache.spark.examples.SparkPi \ + --master k8s://https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT \ + --conf spark.kubernetes.container.image=apache/spark:3.4.4 \ + --deploy-mode cluster \ + --conf spark.kubernetes.namespace=default \ + --conf spark.kubernetes.executor.deleteOnTermination=false \ + --conf spark.kubernetes.driver.label.admission.datadoghq.com/enabled=true \ + --conf spark.kubernetes.authenticate.driver.serviceAccountName=spark \ + --conf spark.kubernetes.authenticate.executor.serviceAccountName=spark \ + --conf spark.kubernetes.driver.annotation.admission.datadoghq.com/java-lib.custom-image=${LIB_INIT_IMAGE} \ + --conf spark.kubernetes.driverEnv.DD_APM_INSTRUMENTATION_DEBUG=true \ + --conf spark.kubernetes.driver.annotation.admission.datadoghq.com/apm-inject.version=${AUTO_INJECT_VERSION} \ + --conf spark.driver.extraJavaOptions="-Ddd.integrations.enabled=false -Ddd.data.jobs.enabled=true -Ddd.service=spark-pi-example -Ddd.env=test -Ddd.version=0.1.0 -Ddd.tags=team:djm" \ + --conf spark.kubernetes.driverEnv.HADOOP_HOME=/opt/hadoop/ \ + local:///opt/spark/examples/jars/spark-examples.jar 20 + +# start a long running server to keep the web-log up. +python3 -m http.server ${SERVER_PORT:-18080} \ No newline at end of file diff --git a/manifests/cpp.yml b/manifests/cpp.yml index d2a2e98602..f51304501a 100644 --- a/manifests/cpp.yml +++ b/manifests/cpp.yml @@ -80,6 +80,7 @@ tests/: test_identify.py: irrelevant (ASM is not implemented in C++) test_ip_blocking_full_denylist.py: irrelevant (ASM is not implemented in C++) test_logs.py: irrelevant (ASM is not implemented in C++) + test_metastruct.py: irrelevant (ASM is not implemented in C++) test_rate_limiter.py: irrelevant (ASM is not implemented in C++) test_remote_config_rule_changes.py: irrelevant (ASM is not implemented in C++) test_reports.py: irrelevant (ASM is not implemented in C++) diff --git a/manifests/dotnet.yml b/manifests/dotnet.yml index 69ca3a28d9..acf0be0b63 100644 --- a/manifests/dotnet.yml +++ b/manifests/dotnet.yml @@ -306,6 +306,7 @@ tests/: Test_AppSecIPBlockingFullDenylist: v2.16.0 test_logs.py: Test_StandardizationBlockMode: missing_feature + test_metastruct.py: missing_feature test_rate_limiter.py: Test_Main: v2.6.0 test_remote_config_rule_changes.py: diff --git a/manifests/golang.yml b/manifests/golang.yml index fb440f897f..87558c5694 100644 --- a/manifests/golang.yml +++ b/manifests/golang.yml @@ -361,6 +361,11 @@ tests/: test_logs.py: Test_Standardization: missing_feature Test_StandardizationBlockMode: missing_feature + test_metastruct.py: + Test_SecurityEvents_Appsec_Metastruct_Disabled: irrelevant (no fallback will be implemented) + Test_SecurityEvents_Appsec_Metastruct_Enabled: missing_feature + Test_SecurityEvents_Iast_Metastruct_Disabled: irrelevant (no fallback will be implemented) + Test_SecurityEvents_Iast_Metastruct_Enabled: missing_feature test_remote_config_rule_changes.py: Test_BlockingActionChangesWithRemoteConfig: v1.69.0-dev Test_UpdateRuleFileWithRemoteConfig: bug (APPSEC-55377) diff --git a/manifests/java.yml b/manifests/java.yml index 68baf0a4fe..923d7c3515 100644 --- a/manifests/java.yml +++ b/manifests/java.yml @@ -1187,6 +1187,7 @@ tests/: akka-http: v1.22.0 spring-boot-3-native: missing_feature (GraalVM. Tracing support only) Test_StandardizationBlockMode: missing_feature + test_metastruct.py: missing_feature test_rate_limiter.py: Test_Main: akka-http: v1.22.0 diff --git a/manifests/nodejs.yml b/manifests/nodejs.yml index cff475a302..6bdbe5b40e 100644 --- a/manifests/nodejs.yml +++ b/manifests/nodejs.yml @@ -520,6 +520,7 @@ tests/: test_logs.py: Test_Standardization: missing_feature Test_StandardizationBlockMode: missing_feature + test_metastruct.py: missing_feature test_rate_limiter.py: Test_Main: v2.0.0 test_remote_config_rule_changes.py: diff --git a/manifests/php.yml b/manifests/php.yml index e9048119f3..0a1ee45a40 100644 --- a/manifests/php.yml +++ b/manifests/php.yml @@ -228,6 +228,11 @@ tests/: Test_Basic: v0.85.0 test_logs.py: Test_StandardizationBlockMode: missing_feature + test_metastruct.py: + Test_SecurityEvents_Appsec_Metastruct_Disabled: irrelevant (no fallback will be implemented) + Test_SecurityEvents_Appsec_Metastruct_Enabled: missing_feature + Test_SecurityEvents_Iast_Metastruct_Disabled: irrelevant (no fallback will be implemented) + Test_SecurityEvents_Iast_Metastruct_Enabled: missing_feature test_remote_config_rule_changes.py: Test_BlockingActionChangesWithRemoteConfig: missing_feature Test_UpdateRuleFileWithRemoteConfig: missing_feature (v0.8.0 but lacks telemetry support) diff --git a/manifests/python.yml b/manifests/python.yml index 34de7a996c..47f23f94fb 100644 --- a/manifests/python.yml +++ b/manifests/python.yml @@ -533,6 +533,11 @@ tests/: test_logs.py: Test_Standardization: missing_feature Test_StandardizationBlockMode: missing_feature + test_metastruct.py: + Test_SecurityEvents_Appsec_Metastruct_Disabled: irrelevant (no fallback will be implemented) + Test_SecurityEvents_Appsec_Metastruct_Enabled: missing_feature + Test_SecurityEvents_Iast_Metastruct_Disabled: irrelevant (no fallback will be implemented) + Test_SecurityEvents_Iast_Metastruct_Enabled: missing_feature test_rate_limiter.py: Test_Main: v2.0.0 test_remote_config_rule_changes.py: diff --git a/manifests/ruby.yml b/manifests/ruby.yml index a7cee707ec..44934b94de 100644 --- a/manifests/ruby.yml +++ b/manifests/ruby.yml @@ -260,6 +260,11 @@ tests/: test_logs.py: Test_Standardization: missing_feature Test_StandardizationBlockMode: missing_feature + test_metastruct.py: + Test_SecurityEvents_Appsec_Metastruct_Disabled: irrelevant (no fallback will be implemented) + Test_SecurityEvents_Appsec_Metastruct_Enabled: missing_feature + Test_SecurityEvents_Iast_Metastruct_Disabled: irrelevant (no fallback will be implemented) + Test_SecurityEvents_Iast_Metastruct_Enabled: missing_feature test_remote_config_rule_changes.py: Test_BlockingActionChangesWithRemoteConfig: missing_feature Test_UpdateRuleFileWithRemoteConfig: missing_feature diff --git a/tests/appsec/iast/utils.py b/tests/appsec/iast/utils.py index d747cc8064..f22bb23cec 100644 --- a/tests/appsec/iast/utils.py +++ b/tests/appsec/iast/utils.py @@ -21,13 +21,14 @@ def _get_span_meta(request): assert spans, "No root span found" span = spans[0] meta = span.get("meta", {}) - return meta + meta_struct = span.get("meta_struct", {}) + return meta, meta_struct def get_iast_event(request): - meta = _get_span_meta(request=request) - assert "_dd.iast.json" in meta, "No _dd.iast.json tag in span" - return meta["_dd.iast.json"] + meta, meta_struct = _get_span_meta(request=request) + assert "_dd.iast.json" in meta or "iast" in meta_struct, "No IAST info found tag in span" + return meta.get("_dd.iast.json") or meta_struct.get("iast") def assert_iast_vulnerability( @@ -167,8 +168,8 @@ def assert_no_iast_event(request, tested_vulnerability_type=None): for data, _, span in interfaces.library.get_spans(request=request): logger.info(f"Looking for IAST events in {data['log_filename']}") - meta = span.get("meta", {}) - iast_json = meta.get("_dd.iast.json") + meta, meta_struct = _get_span_meta(request=request) + iast_json = meta.get("_dd.iast.json") if meta else meta_struct.get("iast") if iast_json is not None: if tested_vulnerability_type is None: logger.error(json.dumps(iast_json, indent=2)) @@ -453,5 +454,5 @@ def test_cookie_name_filter(self): assert_iast_vulnerability(request=self.req1, vulnerability_count=1, vulnerability_type=self.vulnerability_type) assert_iast_vulnerability(request=self.req2, vulnerability_count=1, vulnerability_type=self.vulnerability_type) - meta_req3 = _get_span_meta(self.req3) - assert "_dd.iast.json" not in meta_req3 + meta, meta_struct = _get_span_meta(self.req3) + assert "_dd.iast.json" not in meta and "iast" not in meta_struct, "No IAST info expected in span" diff --git a/tests/appsec/test_asm_standalone.py b/tests/appsec/test_asm_standalone.py index 22c9e3596d..336183f996 100644 --- a/tests/appsec/test_asm_standalone.py +++ b/tests/appsec/test_asm_standalone.py @@ -64,7 +64,7 @@ def _assert_tags_value(span, obj, expected_tags): def assert_product_is_enabled(request, product): product_enabled = False tags = "_dd.iast.json" if product == "iast" else "_dd.appsec.json" - meta_struct_key = "vulnerability" if product == "iast" else "appsec" + meta_struct_key = "iast" if product == "iast" else "appsec" for data, trace, span in interfaces.library.get_spans(request=request): # Check if the product is enabled in meta meta = span["meta"] diff --git a/tests/appsec/test_metastruct.py b/tests/appsec/test_metastruct.py new file mode 100644 index 0000000000..6792d8b97b --- /dev/null +++ b/tests/appsec/test_metastruct.py @@ -0,0 +1,110 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the the Apache License Version 2.0. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2021 Datadog, Inc. + +from utils import weblog, interfaces, rfc, scenarios, features + + +@rfc("https://docs.google.com/document/d/1iWQsOfT6Lg_IFyvQeqry9wVmXOE2Yav0X4MgOTk7mks") +@features.security_events_metastruct +class Test_SecurityEvents_Appsec_Metastruct_Enabled: + """Test to verify that appsec events are correctly set in meta struct when supported by the agent.""" + + def setup_appsec_event_use_metastruct(self): + self.r = weblog.get("/", headers={"User-Agent": "Arachni/v1"}) + + def test_appsec_event_use_metastruct(self): + spans = [s for _, s in interfaces.library.get_root_spans(request=self.r)] + assert spans + + for span in spans: + meta = span.get("meta", {}) + meta_struct = span.get("meta_struct", {}) + assert meta["appsec.event"] == "true" + assert "_dd.appsec.json" not in meta + assert "appsec" in meta_struct + + # The event is not null + assert meta_struct.get("appsec", {}) not in [None, {}] + + # There is at least one rule triggered + assert len(meta_struct["appsec"].get("triggers", [])) > 0 + + +@features.security_events_metastruct +class Test_SecurityEvents_Iast_Metastruct_Enabled: + """Test to verify that IAST events are correctly set in meta struct when supported by the agent.""" + + def setup_iast_event_use_metastruct(self): + # Triggers a vulnerability + self.r = weblog.get("/set_cookie", params={"name": "metastruct-yes", "value": "yes"}) + + def test_iast_event_use_metastruct(self): + spans = [s for _, s in interfaces.library.get_root_spans(request=self.r)] + assert spans + + for span in spans: + meta = span.get("meta", {}) + meta_struct = span.get("meta_struct", {}) + assert meta["_dd.iast.enabled"] == "1" + assert "_dd.iast.json" not in meta + assert "iast" in meta_struct + + # The event is not null + assert meta_struct.get("iast", {}) not in [None, {}] + + # There is at least one vulnerability detected + assert len(meta_struct["iast"].get("vulnerabilities", [])) > 0 + + +@features.security_events_metastruct +@scenarios.appsec_meta_struct_disabled +class Test_SecurityEvents_Appsec_Metastruct_Disabled: + """Fallback: Test to verify that Appsec events are set in the json tag when meta struct is not supported by the agent.""" + + def setup_appsec_event_fallback_json(self): + self.r = weblog.get("/", headers={"User-Agent": "Arachni/v1"}) + + def test_appsec_event_fallback_json(self): + spans = [s for _, s in interfaces.library.get_root_spans(request=self.r)] + assert spans + + for span in spans: + meta = span.get("meta", {}) + meta_struct = span.get("meta_struct", {}) + assert meta["appsec.event"] == "true" + assert "_dd.appsec.json" in meta + assert "appsec" not in meta_struct + + # The event is not null + assert meta.get("_dd.appsec.json", {}) not in [None, {}] + + # There is at least one rule triggered + assert len(meta["_dd.appsec.json"].get("triggers", [])) > 0 + + +@features.security_events_metastruct +@scenarios.appsec_meta_struct_disabled +class Test_SecurityEvents_Iast_Metastruct_Disabled: + """Fallback: Test to verify that IAST events are set in the json tag when meta struct is not supported by the agent.""" + + def setup_iast_event_fallback_json(self): + # Triggers a vulnerability + self.r = weblog.get("/set_cookie", params={"name": "metastruct-no", "value": "no"}) + + def test_iast_event_fallback_json(self): + spans = [s for _, s in interfaces.library.get_root_spans(request=self.r)] + assert spans + + for span in spans: + meta = span.get("meta", {}) + meta_struct = span.get("meta_struct", {}) + assert meta["_dd.iast.enabled"] == "1" + assert "_dd.iast.json" in meta + assert "iast" not in meta_struct + + # The event is not null + assert meta.get("_dd.iast.json", {}) not in [None, {}] + + # There is at least one vulnerability detected + assert len(meta["_dd.iast.json"].get("vulnerabilities", [])) > 0 diff --git a/tests/k8s_lib_injection/__init__.py b/tests/k8s_lib_injection/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/k8s_lib_injection/conftest.py b/tests/k8s_lib_injection/conftest.py index 235e1cf17b..41802b5366 100644 --- a/tests/k8s_lib_injection/conftest.py +++ b/tests/k8s_lib_injection/conftest.py @@ -96,9 +96,9 @@ def deploy_test_agent(self): def deploy_agent(self): self.test_agent.deploy_agent() - def deploy_weblog_as_pod(self, with_admission_controller=True, use_uds=False, env=None): + def deploy_weblog_as_pod(self, with_admission_controller=True, use_uds=False, env=None, service_account=None): if with_admission_controller: - self.test_weblog.install_weblog_pod_with_admission_controller(env=env) + self.test_weblog.install_weblog_pod_with_admission_controller(env=env, service_account=service_account) else: self.test_weblog.install_weblog_pod_without_admission_controller(use_uds, env=env) diff --git a/tests/k8s_lib_injection/test_k8s_djm_with_ssi.py b/tests/k8s_lib_injection/test_k8s_djm_with_ssi.py new file mode 100644 index 0000000000..c16bad6063 --- /dev/null +++ b/tests/k8s_lib_injection/test_k8s_djm_with_ssi.py @@ -0,0 +1,61 @@ +import time + +import requests +import json + +from utils import scenarios, features, context, irrelevant +from utils.tools import logger +from utils import scenarios, features +from utils.k8s_lib_injection.k8s_command_utils import execute_command_sync + +from tests.k8s_lib_injection.utils import get_dev_agent_traces + + +@features.djm_ssi_k8s +@scenarios.k8s_library_injection_djm +@irrelevant(condition=(context.library != "java"), reason="Data Jobs Monitoring requires Java library only.") +@irrelevant( + condition=(context.weblog_variant != "dd-djm-spark-test-app"), + reason="Data Jobs Monitoring tests are only applicable when using dd-djm-spark-test-app web-log variant.", +) +class TestK8sDJMWithSSI: + """ This test case validates java lib injection for Data Jobs Monitoring on k8s. + The tracer is injected using admission controller via annotations on submitted Spark application. + We then use the dev test agent to check if the Spark application is instrumented. + """ + + def _get_spark_application_traces(self, test_k8s_instance): + traces_json = get_dev_agent_traces(test_k8s_instance.k8s_kind_cluster) + logger.debug(f"Traces received: {traces_json}") + return [ + trace + for trace in traces_json + if any(span.get("name") == "spark.application" and span.get("type") == "spark" for span in trace) + ] + + def test_spark_instrumented_with_ssi(self, test_k8s_instance): + logger.info( + f"Launching test test_spark_instrumented_with_ssi: Weblog: [{test_k8s_instance.k8s_kind_cluster.get_weblog_port()}] Agent: [{test_k8s_instance.k8s_kind_cluster.get_agent_port()}]" + ) + + # create service account for launching spark application in k8s + execute_command_sync(f"kubectl create serviceaccount spark", test_k8s_instance.k8s_kind_cluster) + execute_command_sync( + f"kubectl create clusterrolebinding spark-role --clusterrole=edit --serviceaccount=default:spark --namespace=default", + test_k8s_instance.k8s_kind_cluster, + ) + + test_k8s_instance.deploy_test_agent() + test_k8s_instance.deploy_datadog_cluster_agent() + + extra_envs_for_djm_weblog_app = {"LIB_INIT_IMAGE": test_k8s_instance.library_init_image} + test_k8s_instance.deploy_weblog_as_pod(env=extra_envs_for_djm_weblog_app, service_account="spark") + + spark_traces = self._get_spark_application_traces(test_k8s_instance) + + logger.info(f"Spark application traces received: {spark_traces}") + with open(f"{test_k8s_instance.output_folder}/spark_traces.json", "w") as f: + f.write(json.dumps(spark_traces, indent=4)) + assert len(spark_traces) > 0, "No Data Jobs Monitoring Spark application traces found" + + logger.info(f"Test test_spark_instrumented_with_ssi finished") diff --git a/tests/k8s_lib_injection/test_k8s_manual_inject.py b/tests/k8s_lib_injection/test_k8s_manual_inject.py index 8804134f74..2b4b34b8d1 100644 --- a/tests/k8s_lib_injection/test_k8s_manual_inject.py +++ b/tests/k8s_lib_injection/test_k8s_manual_inject.py @@ -7,7 +7,9 @@ from utils.onboarding.weblog_interface import make_get_request, warmup_weblog from utils.onboarding.backend_interface import wait_backend_trace_id from utils.onboarding.wait_for_tcp_port import wait_for_port -from utils import scenarios, features +from utils import scenarios, features, irrelevant + +from tests.k8s_lib_injection.utils import get_dev_agent_traces class _TestAdmisionController: @@ -18,7 +20,7 @@ def test_inject_admission_controller(self, test_k8s_instance): test_k8s_instance.deploy_test_agent() test_k8s_instance.deploy_datadog_cluster_agent() test_k8s_instance.deploy_weblog_as_pod() - traces_json = self._get_dev_agent_traces(test_k8s_instance.k8s_kind_cluster) + traces_json = get_dev_agent_traces(test_k8s_instance.k8s_kind_cluster) assert len(traces_json) > 0, "No traces found" logger.info(f"Test _test_inject_admission_controller finished") @@ -29,7 +31,7 @@ def test_inject_uds_admission_controller(self, test_k8s_instance): test_k8s_instance.deploy_test_agent() test_k8s_instance.deploy_datadog_cluster_agent(use_uds=True) test_k8s_instance.deploy_weblog_as_pod() - traces_json = self._get_dev_agent_traces(test_k8s_instance.k8s_kind_cluster) + traces_json = get_dev_agent_traces(test_k8s_instance.k8s_kind_cluster) assert len(traces_json) > 0, "No traces found" logger.info(f"Test test_inject_uds_admission_controller finished") @@ -39,7 +41,7 @@ def test_inject_without_admission_controller(self, test_k8s_instance): ) test_k8s_instance.deploy_test_agent() test_k8s_instance.deploy_weblog_as_pod(with_admission_controller=False) - traces_json = self._get_dev_agent_traces(test_k8s_instance.k8s_kind_cluster) + traces_json = get_dev_agent_traces(test_k8s_instance.k8s_kind_cluster) assert len(traces_json) > 0, "No traces found" logger.info(f"Test _test_inject_without_admission_controller finished") @@ -49,23 +51,10 @@ def test_inject_uds_without_admission_controller(self, test_k8s_instance): ) test_k8s_instance.deploy_test_agent() test_k8s_instance.deploy_weblog_as_pod(with_admission_controller=False, use_uds=True) - traces_json = self._get_dev_agent_traces(test_k8s_instance.k8s_kind_cluster) + traces_json = get_dev_agent_traces(test_k8s_instance.k8s_kind_cluster) assert len(traces_json) > 0, "No traces found" logger.info(f"Test test_inject_uds_without_admission_controller finished") - def _get_dev_agent_traces(self, k8s_kind_cluster, retry=10): - for _ in range(retry): - logger.info(f"[Check traces] Checking traces:") - response = requests.get( - f"http://{k8s_kind_cluster.cluster_host_name}:{k8s_kind_cluster.get_agent_port()}/test/traces" - ) - traces_json = response.json() - if len(traces_json) > 0: - logger.debug(f"Test traces response: {traces_json}") - return traces_json - time.sleep(2) - return [] - # TODO delete or update this scenario to use test agent # @features.k8s_admission_controller @@ -198,5 +187,9 @@ def backend_trace_validator(trace_id, trace_data): @features.k8s_admission_controller @scenarios.k8s_library_injection_basic +@irrelevant( + condition=(context.weblog_variant == "dd-djm-spark-test-app"), + reason="djm-spark-test-app web-log variant is not supported in this scenario.", +) class TestAdmisionControllerBasic(_TestAdmisionController): pass diff --git a/tests/k8s_lib_injection/utils.py b/tests/k8s_lib_injection/utils.py new file mode 100644 index 0000000000..cb7c7408f6 --- /dev/null +++ b/tests/k8s_lib_injection/utils.py @@ -0,0 +1,19 @@ +import time + +import requests +from utils.tools import logger + + +def get_dev_agent_traces(k8s_kind_cluster, retry=10): + """get_dev_agent_traces fetches traces from the dev agent running in the k8s cluster.""" + for _ in range(retry): + logger.info(f"[Check traces] Checking traces:") + response = requests.get( + f"http://{k8s_kind_cluster.cluster_host_name}:{k8s_kind_cluster.get_agent_port()}/test/traces" + ) + traces_json = response.json() + if len(traces_json) > 0: + logger.debug(f"Test traces response: {traces_json}") + return traces_json + time.sleep(2) + return [] diff --git a/utils/_context/_scenarios/__init__.py b/utils/_context/_scenarios/__init__.py index 4f5f679f5b..15ef0ac547 100644 --- a/utils/_context/_scenarios/__init__.py +++ b/utils/_context/_scenarios/__init__.py @@ -376,6 +376,14 @@ def all_endtoend_scenarios(test_object): scenario_groups=[ScenarioGroup.APPSEC], ) + appsec_meta_struct_disabled = EndToEndScenario( + "APPSEC_META_STRUCT_DISABLED", + weblog_env={"DD_APPSEC_ENABLED": "true", "DD_IAST_ENABLED": "true"}, + meta_structs_disabled=True, + doc="Appsec tests with support for meta struct disabled in the agent configuration", + scenario_groups=[ScenarioGroup.APPSEC], + ) + remote_config_mocked_backend_asm_features = EndToEndScenario( "REMOTE_CONFIG_MOCKED_BACKEND_ASM_FEATURES", rc_api_enabled=True, @@ -700,6 +708,13 @@ def all_endtoend_scenarios(test_object): scenario_groups=[ScenarioGroup.ALL, ScenarioGroup.LIB_INJECTION], ) + k8s_library_injection_djm = KubernetesScenario( + "K8S_LIBRARY_INJECTION_DJM", + doc="Kubernetes Instrumentation with Data Jobs Monitoring", + github_workflow="libinjection", + scenario_groups=[ScenarioGroup.ALL, ScenarioGroup.LIB_INJECTION], + ) + k8s_library_injection_profiling = KubernetesScenario( "K8S_LIBRARY_INJECTION_PROFILING", doc=" Kubernetes auto instrumentation, profiling activation", diff --git a/utils/_features.py b/utils/_features.py index f64c8785f5..57ca2c9544 100644 --- a/utils/_features.py +++ b/utils/_features.py @@ -2267,6 +2267,16 @@ def sca_standalone(test_object): pytest.mark.features(feature_id=320)(test_object) return test_object + @staticmethod + def security_events_metastruct(test_object): + """ + Appsec meta struct + + https://feature-parity.us1.prod.dog/#/?feature=314 + """ + pytest.mark.features(feature_id=314)(test_object) + return test_object + @staticmethod def host_auto_installation_script_profiling(test_object): """ @@ -2497,3 +2507,13 @@ def iast_stack_trace(test_object): """ pytest.mark.features(feature_id=329)(test_object) return test_object + + @staticmethod + def djm_ssi_k8s(test_object): + """ + Data Jobs Monitoring: Java lib auto instrumentation for Spark applications on K8s. + + https://feature-parity.us1.prod.dog/#/?feature=342 + """ + pytest.mark.features(feature_id=342)(test_object) + return test_object diff --git a/utils/build/virtual_machine/provisions/auto-inject/auto-inject_init_vm_config.yml b/utils/build/virtual_machine/provisions/auto-inject/auto-inject_init_vm_config.yml index facdd6c239..3588d4650a 100644 --- a/utils/build/virtual_machine/provisions/auto-inject/auto-inject_init_vm_config.yml +++ b/utils/build/virtual_machine/provisions/auto-inject/auto-inject_init_vm_config.yml @@ -29,9 +29,9 @@ lsb_release=/etc/lsb-release must_update_repositories="false" if [ -e "$lsb_release" ]; then - if grep -q 'Ubuntu 21.04' "$lsb_release"; then + if grep -q 'Ubuntu 21' "$lsb_release"; then must_update_repositories="true" - elif grep -q 'Ubuntu 23.10' "$lsb_release"; then + elif grep -q 'Ubuntu 23' "$lsb_release"; then #Why this works for arm machine but not for amd64? must_update_repositories="true" fi diff --git a/utils/k8s_lib_injection/k8s_weblog.py b/utils/k8s_lib_injection/k8s_weblog.py index 946b087c6b..23145a3b60 100644 --- a/utils/k8s_lib_injection/k8s_weblog.py +++ b/utils/k8s_lib_injection/k8s_weblog.py @@ -36,7 +36,7 @@ def configure(self, k8s_kind_cluster, k8s_wrapper): self.k8s_wrapper = k8s_wrapper self.logger = k8s_logger(self.output_folder, self.test_name, "k8s_logger") - def _get_base_weblog_pod(self, env=None): + def _get_base_weblog_pod(self, env=None, service_account=None): """ Installs a target app for manual library injection testing. It returns when the app pod is ready.""" @@ -106,15 +106,15 @@ def _get_base_weblog_pod(self, env=None): containers.append(container1) - pod_spec = client.V1PodSpec(containers=containers) + pod_spec = client.V1PodSpec(containers=containers, service_account=service_account) pod_body = client.V1Pod(api_version="v1", kind="Pod", metadata=pod_metadata, spec=pod_spec) self.logger.info("[Deploy weblog] Weblog pod configuration done.") return pod_body - def install_weblog_pod_with_admission_controller(self, env=None): + def install_weblog_pod_with_admission_controller(self, env=None, service_account=None): self.logger.info("[Deploy weblog] Installing weblog pod using admission controller") - pod_body = self._get_base_weblog_pod(env=env) + pod_body = self._get_base_weblog_pod(env=env, service_account=service_account) self.k8s_wrapper.create_namespaced_pod(body=pod_body) self.logger.info("[Deploy weblog] Weblog pod using admission controller created. Waiting for it to be ready!") self.wait_for_weblog_ready_by_label_app("my-app", timeout=200)