diff --git a/.circleci/base_config.yml b/.circleci/base_config.yml index 634a3c29b..02d3c8f53 100644 --- a/.circleci/base_config.yml +++ b/.circleci/base_config.yml @@ -13,11 +13,11 @@ executors: machine-executor: machine: docker_layer_caching: false - image: ubuntu-2204:2024.01.1 + image: ubuntu-2204:2024.05.1 large-machine-executor: machine: docker_layer_caching: false - image: ubuntu-2204:2024.01.1 + image: ubuntu-2204:2024.05.1 resource_class: large parameters: diff --git a/.circleci/build-and-test/commands.yml b/.circleci/build-and-test/commands.yml index 52cfe7149..70ef3f98d 100644 --- a/.circleci/build-and-test/commands.yml +++ b/.circleci/build-and-test/commands.yml @@ -49,6 +49,13 @@ - run: name: Disable npm audit warnings in CI command: npm set audit false - + # This allows us to use the node orb to install packages within other commands install-nodejs-packages: node/install-packages + + docker-login: + steps: + - run: + name: Docker login + command: | + echo "$CIRCI_DOCKER_LOGIN" | docker login https://tdp-docker.dev.raftlabs.tech -u tdp-circi --password-stdin diff --git a/.circleci/build-and-test/jobs.yml b/.circleci/build-and-test/jobs.yml index a40d1568f..469c92250 100644 --- a/.circleci/build-and-test/jobs.yml +++ b/.circleci/build-and-test/jobs.yml @@ -3,6 +3,7 @@ steps: - checkout - docker-compose-check + - docker-login - docker-compose-up-backend - run: name: Run Unit Tests And Create Code Coverage Report @@ -46,6 +47,7 @@ steps: - checkout - docker-compose-check + - docker-login - docker-compose-up-backend - docker-compose-up-frontend - install-nodejs-machine diff --git a/.circleci/deployment/commands.yml b/.circleci/deployment/commands.yml index af907351b..d1aa82b7d 100644 --- a/.circleci/deployment/commands.yml +++ b/.circleci/deployment/commands.yml @@ -1,4 +1,33 @@ # commands: + init-deploy: + steps: + - checkout + - sudo-check + - cf-check + + build-and-tag-images: + parameters: + backend-appname: + default: tdp-backend + type: string + frontend-appname: + default: tdp-frontend + type: string + steps: + - run: + name: Update Docker daemon + command: | + sudo echo '{"max-concurrent-uploads": 1}' | sudo tee /etc/docker/daemon.json + sudo service docker restart + - run: + name: Create builder + command: | + docker buildx create --name container-builder --driver docker-container --use --bootstrap + - run: + name: Build and tag images + command: | + ./scripts/build-and-tag-images.sh <> <> ./tdrs-backend ./tdrs-frontend $CIRCLE_BUILD_NUM $CIRCLE_SHA1 "$CIRCI_DOCKER_LOGIN" tdp-circi + deploy-cloud-dot-gov: parameters: environment: @@ -25,9 +54,6 @@ default: tdp-frontend type: string steps: - - checkout - - sudo-check - - cf-check - login-cloud-dot-gov: cf-password: <> cf-org: <> diff --git a/.circleci/deployment/jobs.yml b/.circleci/deployment/jobs.yml index 63d5bc070..ce163101f 100644 --- a/.circleci/deployment/jobs.yml +++ b/.circleci/deployment/jobs.yml @@ -1,3 +1,33 @@ + build-and-tag-develop: + executor: large-machine-executor + working_directory: ~/tdp-deploy + steps: + - checkout + - sudo-check + - build-and-tag-images: + backend-appname: tdp-backend-develop + frontend-appname: tdp-frontend-develop + + build-and-tag-staging: + executor: large-machine-executor + working_directory: ~/tdp-deploy + steps: + - checkout + - sudo-check + - build-and-tag-images: + backend-appname: tdp-backend-staging + frontend-appname: tdp-frontend-staging + + build-and-tag-production: + executor: large-machine-executor + working_directory: ~/tdp-deploy + steps: + - checkout + - sudo-check + - build-and-tag-images: + backend-appname: tdp-backend-production + frontend-appname: tdp-frontend-production + deploy-dev: parameters: target_env: @@ -5,6 +35,7 @@ executor: docker-executor working_directory: ~/tdp-deploy steps: + - init-deploy - deploy-cloud-dot-gov: backend-appname: tdp-backend-<< parameters.target_env >> frontend-appname: tdp-frontend-<< parameters.target_env >> @@ -13,6 +44,7 @@ executor: docker-executor working_directory: ~/tdp-deploy steps: + - init-deploy - deploy-cloud-dot-gov: backend-appname: tdp-backend-staging frontend-appname: tdp-frontend-staging @@ -24,6 +56,7 @@ executor: docker-executor working_directory: ~/tdp-deploy steps: + - init-deploy - deploy-cloud-dot-gov: backend-appname: tdp-backend-develop frontend-appname: tdp-frontend-develop @@ -133,6 +166,7 @@ executor: docker-executor working_directory: ~/tdp-deploy steps: + - init-deploy - deploy-cloud-dot-gov: environment: production backend-appname: tdp-backend-prod diff --git a/.circleci/deployment/workflows.yml b/.circleci/deployment/workflows.yml index 8a4269c04..a0de09f9e 100644 --- a/.circleci/deployment/workflows.yml +++ b/.circleci/deployment/workflows.yml @@ -93,27 +93,48 @@ - develop - main - master - - deploy-develop: + - build-and-tag-develop: requires: - deploy-infrastructure-staging filters: branches: only: - develop - - deploy-staging: + - deploy-develop: + requires: + - build-and-tag-develop + filters: + branches: + only: + - develop + - build-and-tag-staging: requires: - deploy-infrastructure-staging filters: branches: only: - main - - deploy-production: + - deploy-staging: + requires: + - build-and-tag-staging + filters: + branches: + only: + - main + - build-and-tag-production: requires: - deploy-infrastructure-production filters: branches: only: - master + - deploy-production: + requires: + - build-and-tag-production + filters: + branches: + only: + - master - test-deployment-e2e: requires: - deploy-develop diff --git a/.circleci/generate_config.sh b/.circleci/generate_config.sh old mode 100644 new mode 100755 diff --git a/.circleci/owasp/jobs.yml b/.circleci/owasp/jobs.yml index 225758ef5..fdabb0a22 100644 --- a/.circleci/owasp/jobs.yml +++ b/.circleci/owasp/jobs.yml @@ -4,6 +4,7 @@ steps: - checkout - docker-compose-check + - docker-login - docker-compose-up-backend - docker-compose-up-frontend - run: @@ -26,6 +27,7 @@ steps: - checkout - docker-compose-check + - docker-login - docker-compose-up-backend - docker-compose-up-frontend - run: @@ -66,6 +68,7 @@ - sudo-check - cf-check - docker-compose-check + - docker-login - login-cloud-dot-gov: cf-password: <> cf-space: <> diff --git a/.github/ISSUE_TEMPLATE/dev-issue-template.md b/.github/ISSUE_TEMPLATE/dev-issue-template.md index 530a6db49..8857901a5 100644 --- a/.github/ISSUE_TEMPLATE/dev-issue-template.md +++ b/.github/ISSUE_TEMPLATE/dev-issue-template.md @@ -9,6 +9,7 @@ assignees: '' **Description:** _Provide a brief background and justification for this issue_ +_OFA & UX team: Please include bullets capturing the value to STT end users, related guidance, and/or talking points that should be communicated to STTs in release notes_ **Acceptance Criteria:** diff --git a/.gitignore b/.gitignore index 2fee3eca0..6be3a5017 100644 --- a/.gitignore +++ b/.gitignore @@ -108,4 +108,5 @@ tfapply cypress.env.json # Patches -*.patch \ No newline at end of file +*.patch +tdrs-backend/*.pg diff --git a/Taskfile.yml b/Taskfile.yml index 2c67784b9..8f1731fe9 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -2,6 +2,11 @@ version: '3' tasks: + upload-kibana-objs: + desc: Upload dashboards to Kibana server + cmds: + - curl -X POST localhost:5601/api/saved_objects/_import -H "kbn-xsrf: true" --form file=@tdrs-backend/tdpservice/search_indexes/kibana_saved_objs.ndjson + create-network: desc: Create the external network cmds: @@ -29,7 +34,7 @@ tasks: desc: Create Sentry service dir: sentry cmds: - # limiting the memory to 2GB and CPU to only one cpu @0, for faster response, you can remove the limittask : --cpuset-cpus 0 + # limiting the memory to 2GB and CPU to only one cpu @0, for faster response, you can remove the limittask : --cpuset-cpus 0 - (docker run --privileged -p 9001:9000 -d --memory="8g" --memory-swap="8g" --name sentry docker:dind) || true - docker exec sentry sh -c "git clone https://github.com/getsentry/self-hosted.git || true" @@ -109,6 +114,23 @@ tasks: cmds: - docker-compose -f docker-compose.yml exec web sh -c "python ./manage.py shell" + backend-exec: + desc: Execute a command in the backend container + dir: tdrs-backend + vars: + CMD: '{{.CMD}}' + cmds: + - docker-compose -f docker-compose.yml exec web sh -c "python manage.py {{.CMD}}" + + backend-exec-seed-db: + desc: Execute seed_db command in the backend container + dir: tdrs-backend + vars: + CMD: '{{.CMD}}' + cmds: + - docker-compose -f docker-compose.yml up -d + - docker-compose -f docker-compose.yml exec web sh -c "python manage.py populate_stts; python ./manage.py seed_db" + backend-pytest: desc: 'Run pytest in the backend container E.g: task backend-pytest PYTEST_ARGS="tdpservice/test/ -s -vv"' dir: tdrs-backend @@ -155,7 +177,7 @@ tasks: - docker rm $(docker ps -aq) || true - docker rmi $(docker images -q) || true - docker volume rm $(docker volume ls -q) || true - + clamav-up: desc: Start clamav service dir: tdrs-backend @@ -187,7 +209,7 @@ tasks: - task: frontend-up - task: clamav-up - + # need more work frontend-init: desc: Initialize the frontend project diff --git a/docs/How-We-Work/team-meetings.md b/docs/How-We-Work/team-meetings.md index 8b78dcc59..ff770ed7a 100644 --- a/docs/How-We-Work/team-meetings.md +++ b/docs/How-We-Work/team-meetings.md @@ -68,12 +68,12 @@ A typical sprint schedule is described in the table below. - External factors outside of the author spending time building the ticket (ie need external team's input, see how a feature develops, etc.) - Ex. Waiting on X ticket to finish the scope of said unrefined ticket, problem found / unsure how big it is and knows other work will unearth it - If we know the ACs but not the tasks, then its unrefined + - Release Notes summary is empty or incomplete as applicable, to be provided by UX/Product by default. - Refined: Ticket is complete and is ready to be executed. - Refined & Ready to Go (Next Sprint) - "Earmarked" work for the upcoming sprint. - **Labelling:** - WIP - - Author knows the 5 W's or darn near (90%) - Drafted ticket – either still on the author the finish their part or a short team conversation is needed. - Administrative in nature - Ex. Stub, ticket that doesn't feel there's enough to warrant an introduction diff --git a/docs/Sprint-Review/sprint-107-summary.md b/docs/Sprint-Review/sprint-107-summary.md new file mode 100644 index 000000000..e6d1aa4d9 --- /dev/null +++ b/docs/Sprint-Review/sprint-107-summary.md @@ -0,0 +1,89 @@ +# sprint-107-summary + +8/28/2024 - 9/10/2024 + +### Priority Setting + +* Re-parsing epic +* Postgres db access +* UX research with DIGIT team +* Continuous communication with STTs about latest TDP features and updates + +### Sprint Goal + +**Dev:** + +_**Re-parsing, Admin Console Improvements, and Application Health Monitoring work**_ + +* \#3106 — Re-Parse Django Action +* \#3137 — \[bug] OFA unable to export data to csv by record type and fiscal period +* \#3074 — TDP Data Files page permissions for DIGIT & Sys Admin user groups +* \#3044 — Prometheus/Grafana - Local Environment +* \#3042 — Sentry in cloud.gov + +**DevOps:** +_**Successful deployments across environments and pipeline stability investments**_ + +* \#2965 — As tech lead, I want a database seed implemented for testing +* \#2458 — Integrate Nexus into CircleCI + +**Design:** + +_**Support reviews, In-app banner to support parsed data, Continue Error Audit (Cat 4)**_ + +* \#3156 — Release Notes Email Template +* \#3100 — \[Design Deliverable] Update stakeholders & personas document +* \#2968 — \[Design Deliverable] Update Error Audit for Cat 4 / QA + +## Tickets + +### Completed/Merged + +* [#2561 As a sys admin, I need TDP to automatically deactivate accounts that are inactive for 180 days](https://github.com/raft-tech/TANF-app/issues/2561) +* [#2792 \[Error Audit\] Category 3 error messages clean-up ](https://github.com/raft-tech/TANF-app/issues/2792) +* [#3043 Sentry: Local environment for Debugging](https://github.com/raft-tech/TANF-app/issues/3043) +* [#3064 Re-parse Meta Model](https://github.com/raft-tech/TANF-app/issues/3064) +* [#3065 Spike - Guarantee Sequential Execution of Re-parse Command](https://github.com/raft-tech/TANF-app/issues/3065) +* [#3074 TDP Data Files page permissions for DIGIT & Sys Admin user groups ](https://github.com/raft-tech/TANF-app/issues/3074) +* [#3076 Admin Filter Enhancements for Data Files Page ](https://github.com/raft-tech/TANF-app/issues/3076) +* [#3078 \[Research Synthesis\] DIGIT Admin Experience Improvements ](https://github.com/raft-tech/TANF-app/issues/3078) +* [#3087 Admin By Newest Filter Enhancements for Data Files Page ](https://github.com/raft-tech/TANF-app/issues/3087) +* [#3114 \[Design Spike\] In-app banner for submission history pages w/ data parsed before May 2024 ](https://github.com/raft-tech/TANF-app/issues/3114) +* [#3142 \[Research Spike\] Get more detail about Yun & DIGIT's data workflow and use cases ](https://github.com/raft-tech/TANF-app/issues/3142) + +### Submitted (QASP Review, OCIO Review) + +* + +### Ready to Merge + +* [#2883 Pre-Made Reporting Dashboards on Kibana ](https://github.com/raft-tech/TANF-app/issues/2883) +* [#3102 Admin Exp: Django Implement Multi-Select Fiscal Period Dropdown For Data Export ](https://github.com/raft-tech/TANF-app/issues/3102) + +### Closed (Not Merged) + +* [#3110 Spike - Investigate Custom Filter Integration ](https://github.com/raft-tech/TANF-app/issues/3110) +* [#3156 Release Notes Knowledge Center and Email Template ](https://github.com/raft-tech/TANF-app/issues/3156) + +### Moved to Next Sprint + +**In Progress** + +* [#2968 \[Design Deliverable\] Update Error Audit for Cat 4 / QA ](https://github.com/raft-tech/TANF-app/issues/2968) +* [#3060 As a TDP user, I need to stay logged in when I'm actively using the system ](https://github.com/raft-tech/TANF-app/issues/3060) +* [#3100 \[Design Deliverable\] Update stakeholders & personas document ](https://github.com/raft-tech/TANF-app/issues/3100) +* [#3106 Re-Parse Django Action ](https://github.com/raft-tech/TANF-app/issues/3106) +* [#3137 \[bug\] OFA unable to export data to csv by record type and fiscal period ](https://github.com/raft-tech/TANF-app/issues/3137) +* [#3164 \[Research Synthesis\] Yun & DIGIT's data workflow and use cases ](https://github.com/raft-tech/TANF-app/issues/3164) +* [#3170 Reparse Command Fails when Queryset is Large ](https://github.com/raft-tech/TANF-app/issues/3170) +* [#3179 Spike - How We Work / Hopes & Fears Workshop prep ](https://github.com/raft-tech/TANF-app/issues/3179) + +**Blocked** + +* + +**Raft Review** + +* [#2458 Integrate Nexus into CircleCI ](https://github.com/raft-tech/TANF-app/issues/2458) +* [#2965 As tech lead, I want a database seed implemented for testing ](https://github.com/raft-tech/TANF-app/issues/2965) +* [#3044 Prometheus/Grafana - Local Environment ](https://github.com/raft-tech/TANF-app/issues/3044) diff --git a/docs/Technical-Documentation/images/nexus-dev-admin-login.png b/docs/Technical-Documentation/images/nexus-dev-admin-login.png new file mode 100644 index 000000000..d3b00e903 Binary files /dev/null and b/docs/Technical-Documentation/images/nexus-dev-admin-login.png differ diff --git a/docs/Technical-Documentation/nexus-repo.md b/docs/Technical-Documentation/nexus-repo.md index 6f4a15bf5..5e504a384 100644 --- a/docs/Technical-Documentation/nexus-repo.md +++ b/docs/Technical-Documentation/nexus-repo.md @@ -40,7 +40,7 @@ After logging in as root for the first time, you will be taken to a page to set In order to use Nexus as a Docker repository, the DNS for the repo needs to be able to terminate https. We are currently using cloudflare to do this. -When creating the repository (must be signed in with admin privileges), since the nexus server isn't actually terminating the https, select the HTTP repository connector. The port can be anything you assign, as long as the tool used to terminate the https connection forwards the traffic to that port. +When creating the repository (must be signed in with admin privileges), since the nexus server isn't actually terminating the https, select the HTTP repository connector. The port can be anything you assign, as long as the tool used to terminate the https connection forwards the traffic to that port. In order to allow [Docker client login and connections](https://help.sonatype.com/repomanager3/nexus-repository-administration/formats/docker-registry/docker-authentication) you must set up the Docker Bearer Token Realm in Settings -> Security -> Realms -> and move the Docker Bearer Token Realm over to Active. Also, any users will need nx-repository-view-docker-#{RepoName}-(browse && read) at a minimum and (add and edit) in order to push images. @@ -48,21 +48,86 @@ Also, any users will need nx-repository-view-docker-#{RepoName}-(browse && read) We have a separate endpoint to connect specifically to the docker repository. [https://tdp-docker.dev.raftlabs.tech](tdp-docker.dev.raftlabs.tech) -e.g. `docker login https://tdp-docker.dev.raftlabs.tech` +e.g. +``` +docker login https://tdp-docker.dev.raftlabs.tech +``` ### Pushing Images Before an image can be pushed to the nexus repository, it must be tagged for that repo: -`docker image tag ${ImageId} tdp-docker.dev.raftlabs.tech/${ImageName}:${Version}` +``` +docker image tag ${ImageId} tdp-docker.dev.raftlabs.tech/${ImageName}:${Version} +``` then you can push: -`docker push tdp-docker.dev.raftlabs.tech/${ImageName}:${Version}` +``` +docker push tdp-docker.dev.raftlabs.tech/${ImageName}:${Version} +``` ### Pulling Images -We have set up a proxy mirror to dockerhub that can pull and cache DockerHub images. -Then we have created a group docker repository that can be pulled from. If the container is in our hosted repo, the group will return that container. If not, it will see if we have a cached version of that container in our proxy repo and, if not, pull that from dockerhub, cache it and allow the docker pull to happen. +We do not allow anonymous access on our Nexus instance. With that said, if you have not [logged in with Docker](#docker-login) you will not be able to pull. If you are logged in: + +``` +docker pull tdp-docker.dev.raftlabs.tech/${ImageName}:${Version} +``` + +## Nexus Administration + +### UI Admin Login +To administer Nexus via the UI, you will need to access the service key in our dev cloud.gov environment. + +Log in with CloudFoundry +``` +cf login --sso +``` +Be sure to specify the space as `tanf-dev` -`docker pull https://tdp-docker-store.dev.raftlabs.tech/${ImageName}:${Version}` \ No newline at end of file +After you've authenticated you can grab the password from the key: +``` +cf service-key tanf-keys nexus-dev-admin +``` + +The key returns a username and a password: +``` +{ + "credentials": { + "password": REDACTED, + "username": REDACTED + } +} +``` +Copy the `password` to your clipboard and login into the Nexus UI with the `tdp-dev-admin` user. See below: + +![Nexus Dev Admin Login](./images/nexus-dev-admin-login.png) + +### VM Login +To access the VM running Nexus, you will need to gain access to the Raft internal network. To do this, you will need to install CloudFlare's WARP zero trust VPN. Follow the instructions [here](https://gorafttech-my.sharepoint.com/:w:/g/personal/tradin_teamraft_com/EZePOTv0dbdBguHITcoXQF0Bd5JAcqeLsJTlEOktTfIXHA?e=34WqB4) to get setup. From there, reach out to Eric Lipe or Connor Meehan for the IP, username, and password to access the VM. Once you have the credentials, you can login with SSH: +``` +ssh username@IP_Address +``` + +Once logged in, you can run `docker ps` or other docker commands to view and administer the Nexus container as necessary. You should also consider generating an ssh key to avoid having to enter the password each time you login. To do so, run the following commands on your local machine. +``` +ssh-keygen +``` + +``` +ssh-copy-id username@IP_Address +``` +Now you will no longer have to enter the password when logging in. + +## Local Docker Login +After logging into the `tanf-dev` space with the `cf` cli, execute the following commands to authenticate your local docker daemon +``` +export NEXUS_DOCKER_PASSWORD=`cf service-key tanf-keys nexus-dev | tail -n +2 | jq .credentials.password` +echo "$NEXUS_DOCKER_PASSWORD" | docker login https://tdp-docker.dev.raftlabs.tech -u tdp-dev --password-stdin +``` + +Sometimes the `docker login...` command above doesn't work. If that happens, just copy the content of `NEXUS_DOCKER_PASSWORD` to your clipboard and paste it when prompted for the password after executing the command below. +``` +docker login https://tdp-docker.dev.raftlabs.tech -u tdp-dev +``` diff --git a/docs/Technical-Documentation/tech-memos/multi-select-fiters/multi-select-filters.md b/docs/Technical-Documentation/tech-memos/multi-select-fiters/multi-select-filters.md new file mode 100644 index 000000000..93d668bd6 --- /dev/null +++ b/docs/Technical-Documentation/tech-memos/multi-select-fiters/multi-select-filters.md @@ -0,0 +1,171 @@ +# Multi-Select Filters + +**Audience**: TDP Software Engineers
+**Subject**: Multi-Select Filter Integration
+**Date**: August 27, 2024
+ +## Summary +This technical memorandum provides the suggested guidelines for a future engineer to integrate TDP's need for multi-select filtering with Django 508. The memorandum provides some necessary background on both the TDP multi-select filters as well as Django 508 and it's purpose and effects. The [Method](#method) section provides the guidelines and updates required to integrate TDP's custom filtering needs with Django 508. Specifically, the [Django 508 Updates](#django-508-updates) section introduces the engineer to the area where the filtering and query string building occurs within Django 508 and the suggested changes. The [TDP Updates](#tdp-updates) section introduces the recommended changes to current TDP custom filtering with respect to how it can be simplified and how it could be unified with Django 508 to provide a seamless filtering experience. + +## Background +TDP has been expanding it's Django Admin Console (DAC) filtering capabilities by introducing custom filters, specifically multi-select filters. This has introduced a myriad of issues because TDP does not use the default DAC. Instead, to assist with accessibility compliance TDP wraps the default DAC with [Django 508](https://github.com/raft-tech/django-admin-508) (henceforth referred to as 508) which makes various updates to the styling and functionality of the default DAC. A key change is that 508 introduces to the DAC is an `Apply Filters` button that intercepts query string parameters from default DAC filters and only applies them after clicking the button. The default DAC applies the filters as they are selected as opposed to all at once. The issue with 508's approach is that it assumes all filters are built-in Django filters (i.e. single select filters). This presents a discrepancy because Django allows developers to write custom templates and filters to add further filtering functionality (e.g. multi-select filters). + +## Out of Scope +General filter template specification and general property based multi-select filtering mentioned in the [TDP Updates](#tdp-updates) section of this memorandum are out of scope for this memorandum. + +## Method +To support multi-select/custom filtering in the DAC, both the TDP repository and the 508 repository will require updates. + +### Django 508 Updates +508 builds the query string for all filters on the currently selected DAC page with the [dropdown-filter.js](https://github.com/raft-tech/django-admin-508/blob/main/admin_interface/static/admin_interface/508/dropdown-filter.js) JavaScript file. This file defines a JQuery function that operates on the `changelist-filter` element in the DOM. The function adds `onchange` event handlers to each filter in the `changelist-filter` element which extract the filter's query string template value that gets selected when it changes to construct a new query string. However, when custom templates and custom filters are introduced the JQuery function breaks down and cannot handle it. The implementation of the single-select and multi-select query building cannot be unified. This is because Django built-in single-select filters define a single prop on the `option` elements for the filter. The `value` prop that is defined on all the `option` elements is that `option`'s query parameter with the rest of the current query string appended to it. This same implementation cannot be achieved on multi-select filters because the query string cannot (and should not) contain multiple queries of the same type. This implies single-select and multi-select filters have to be handled in 508 differently. The update to `dropdown-filter.js` provided below serves as a guide towards a final solution for integrating multi-select filters, single-select filters, and the `Apply Filters` button. The implementation below relies on two key facts. One, all multi-select filters define `ariaMultiSelectable` and two, that all multi-select filters define two custom props: `key` and `value`. These key value pairs (e.g. `key=name__in`, `value=Bob`) are used to build the query string along with whatever the remaining single-select filters have chosen. When a user clicks the `Apply Filters` button, the code below executes and builds the query string for single and multi-select filters. + +```javascript +if (typeof (django) !== 'undefined' && typeof (django.jQuery) !== 'undefined') { + (function ($) { + 'use strict'; + $(document).ready(function () { + const filters = document.querySelectorAll('#changelist-filter .list-filter-dropdown select') + let query = '?' + + const applyFiltersButton = document.querySelector('#submit-filters'); + if (applyFiltersButton) { + applyFiltersButton.onclick = function () { + for (const filter of filters) { + let conjunction = query === '?' ? '' : '&' + if (!filter.ariaMultiSelectable) { + if (filter.selectedIndex !== 0) { + // Built in Django filters append the query string to the `value` field on the element. However, when we + // have a mult-selectable filter, the select element can't have the `value` field as a query string + // because multiple options can be selected and there is no way to track that. Therefore, we strip the + // single select filters query param from the existing query string and build and entirely new query + // string from that. + let opt = filter.options[filter.selectedIndex] + let query_str = opt.value + let filter_query = '' + for (let i = 1; i < query_str.length; i++) { + if (query_str[i] === '&') { + break + } + filter_query += query_str[i] + } + query = query.concat(conjunction, filter_query) + } + } + else { + // All multi select filters are required to set the `key` and `value` fields on the option element to the + // individual options to be able to build the correct query string. + let selected = '' + for (const option of filter.options) { + if (option.selected) { + selected = selected.concat(option.value, '%2C') + } + } + selected = selected.substring(0, selected.lastIndexOf('%2C')) + if (selected !== '') { + query = query.concat(conjunction, filter.options[0].getAttribute('key'), '=', selected) + } + } + } + window.location = query + }; + } + }); + })(django.jQuery); +} +``` + +### TDP Updates +Currently, TDP implements a custom multi-select filter, with a custom template. This filter is complex and relies on a custom "filter" button to apply it's selection which is very incohesive with the `Apply Filters` button that 508 introduces. To remedy this, the current and future multi-select/custom filters implemented in TDP need to give 508 control of constructing the appropriate query string by way of supplying 508 with the appropriate key-value pairs needed in their templates. In doing so, we can also simplify and generalize the current multi-select filter available in TDP. + +TDP currently utilizes three classes to implement field based multi-select filtering. This should be able to be simplified to a single class when we let 508 manage query string building. There are a few main features to note that this type of class would need. The first is the custom template used to create a multi-select drop down filter. An example template is the [multiselectdropdownfilter.html](multiselectdropdownfilter.html). The second are the unique query string parameters that would need to be defined in the `choices` method of the class. E.g the `key` and `value` parameters. These are the parameters that Django would populate into the aforementioned template and that 508 will need to parse to build the appropriate query string. Below is an example class that allows for field based multi-select filtering. The example class introduces some extra caveats to handle multi-select functionality. The `FieldListMultiSelectFilter` overrides and adds new parameters in the constructor before calling `super()`. These additions and overrides help convert the parent class `AllValuesFieldListFilter` from a single-select to a multi-select filter. Looking forward towards the future of TDP filtering, the example class implementation provides a path forward for Django model property based multi-select filtering (e.g. the `fiscal_period` property on the `DataFile` model). Leveraging the aforementioned template and building a class which sub-classes the Django `SimpleListFiter` class we have the ability to provide general multi-select filtering for Django model properties by implementing the correct `queryset`, `choices`, and `lookups` methods. + +```python +class FieldListMultiSelectFilter(admin.AllValuesFieldListFilter): + """Multi select dropdown filter for all kind of fields.""" + + template = 'multiselectdropdownfilter.html' + + def __init__(self, field, request, params, model, model_admin, field_path): + self.lookup_kwarg = '%s__in' % field_path + self.lookup_kwarg_isnull = '%s__isnull' % field_path + lookup_vals = request.GET.get(self.lookup_kwarg) + self.lookup_vals = lookup_vals.split(',') if lookup_vals else list() + self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull) + self.empty_value_display = model_admin.get_empty_value_display() + parent_model, reverse_path = reverse_field_path(model, field_path) + # Obey parent ModelAdmin queryset when deciding which options to show + if model == parent_model: + queryset = model_admin.get_queryset(request) + else: + queryset = parent_model._default_manager.all() + self.lookup_choices = (queryset + .distinct() + .order_by(field.name) + .values_list(field.name, flat=True)) + super(admin.AllValuesFieldListFilter, self).__init__(field, request, params, model, model_admin, field_path) + + def queryset(self, request, queryset): + """Build queryset based on choices.""" + params = Q() + for lookup_arg, value in self.used_parameters.items(): + params |= Q(**{lookup_arg: value}) + try: + return queryset.filter(params) + except (ValueError, ValidationError) as e: + # Fields may raise a ValueError or ValidationError when converting + # the parameters to the correct type. + raise IncorrectLookupParameters(e) + + def prepare_querystring_value(self, value): + """Mask commas.""" + return str(value).replace(',', '%~') + + def choices(self, changelist): + """Generate choices.""" + add_facets = getattr(changelist, "add_facets", False) + facet_counts = self.get_facet_queryset(changelist) if add_facets else None + query_string = changelist.get_query_string({}, [self.lookup_kwarg, self.lookup_kwarg_isnull]) + yield { + 'selected': not self.lookup_vals and self.lookup_val_isnull is None, + 'query_string': query_string, + 'display': _('All'), + } + include_none = False + count = None + empty_title = self.empty_value_display + for i, val in enumerate(self.lookup_choices): + if add_facets: + count = facet_counts[f"{i}__c"] + if val is None: + include_none = True + empty_title = f"{empty_title} ({count})" if add_facets else empty_title + continue + + val = str(val) + qval = self.prepare_querystring_value(val) + yield { + 'selected': qval in self.lookup_vals, + 'query_string': query_string, + "display": f"{val} ({count})" if add_facets else val, + 'value': urllib.parse.quote_plus(val), + 'key': self.lookup_kwarg, + } + if include_none: + yield { + 'selected': bool(self.lookup_val_isnull), + 'query_string': query_string, + "display": empty_title, + 'value': 'True', + 'key': self.lookup_kwarg_isnull, + } +``` + + +## Affected Systems +- Django 508 +- TANF-App + +## Use and Test cases to consider +- Consider adding 508 integration tests for all/most Django fields for the suggested `FieldListMultiSelectFilter` +- Test having multiple Django built-in and `FieldListMultiSelectFilter`'s on the same page and verify the query string + diff --git a/docs/Technical-Documentation/tech-memos/multi-select-fiters/multiselectdropdownfilter.html b/docs/Technical-Documentation/tech-memos/multi-select-fiters/multiselectdropdownfilter.html new file mode 100644 index 000000000..4cec46c77 --- /dev/null +++ b/docs/Technical-Documentation/tech-memos/multi-select-fiters/multiselectdropdownfilter.html @@ -0,0 +1,18 @@ +{% load i18n admin_urls %} +
+

{% blocktrans with filter_title=title %} By {{ filter_title }} {% endblocktrans %}

+ +
+ +
+
\ No newline at end of file diff --git a/docs/Technical-Documentation/tech-memos/tm-template.md b/docs/Technical-Documentation/tech-memos/tm-template.md new file mode 100644 index 000000000..0921d3888 --- /dev/null +++ b/docs/Technical-Documentation/tech-memos/tm-template.md @@ -0,0 +1,26 @@ +# TITLE + +**Audience**: TDP Software Engineers
+**Subject**: SUBJECT/TITLE
+**Date**: August 8, 2024
+ +## Summary +This is a template to use to create new technical memorandums. + +## Background (Optional) +Background for the feature if necessary. + +## Out of Scope +Call out what is out of scope for this technical memorandum and should be considered in a different technical memorandum. + +## Method/Design +This section should contain sub sections that provide general implementation details surrounding key components required to implement the feature. + +### Sub header (piece of the design, can be many of these) +sub header content describing component. + +## Affected Systems +provide a list of systems this feature will depend on/change. + +## Use and Test cases to consider +provide a list of use cases and test cases to be considered when the feature is being implemented. diff --git a/product-updates/knowledge-center/index.html b/product-updates/knowledge-center/index.html index a31984e6e..770593eb1 100644 --- a/product-updates/knowledge-center/index.html +++ b/product-updates/knowledge-center/index.html @@ -335,13 +335,72 @@

Use an existing Login.gov Account
- +

What's new in TDP

+

September 10th 2024 (v 3.6.0)

+

In Development:

+
    + +
  • +
    + +
    +
    +
    + File Status and Error Messaging +
    +

    + We've received some requests for clarification around file submission status and + error messages. In response, we're working to improve guidance and the user experience to better meet partners' needs. Additionally, the TDP team is planning more opportunities to connect with partners to discuss the TDP error report including terminology, what errors mean, etc. Please stay tuned.

    +
    +
  • +
-

What's new in TDP

+

Added:

+
    +
  • +
    + +
    +
    +
    + Additional Context +
    +

    Error messages regarding values across related records now include the associated item number(s), item name(s), and row number to help you identify records that have been detected as having data quality discrepancies.

    +
    +
  • +
+

Changed / Fixed:

+
    +
  • +
    + +
    +
    +
    + Error Message Language Cleanup +
    +

    TDP performs validation checks across related items in a record and will generate error messages capturing instances where inconsistent values within the record were detected.

    +

    Previous versions of these types of error messages would return the following (for example): + If Cash Amount :873 validator1 passed then Item 21B (Cash and Cash Equivalents: Number of Months) 0 is not larger than 0. +

    +

    Now, this type of error message will read: + Since Item 12 (Cash Amount) is 100, then Item 21B (Cash and Cash Equivalents: Number of Months) 0 must be larger than 0. +

    +
    +
  • +
+ + +
-

August 15th 2024 (v 3.5.2)

+ +

August 15th 2024 (v 3.5.2)

Added:

  • @@ -404,8 +463,6 @@
  • - -
  • diff --git a/product-updates/knowledge-center/view-submission-history.html b/product-updates/knowledge-center/view-submission-history.html index a825f9142..ac9869cf3 100644 --- a/product-updates/knowledge-center/view-submission-history.html +++ b/product-updates/knowledge-center/view-submission-history.html @@ -360,7 +360,7 @@

    Download Error Reports and Previously Submitted Data Files

    -

    Understanding File Statuses and Aggregate Data

    +

    Understanding File Statuses and Aggregate Data

    The Submission History table includes a status which communicates the level of completeness of each file and related aggregate data about its contents including counts of cases with and without errors, counts of records unable to process within a file, and (for section 3 and 4 data) a count of total errors in the file.

    diff --git a/product-updates/knowledge-center/viewing-error-reports.html b/product-updates/knowledge-center/viewing-error-reports.html index 6024ff3bc..a72931f3b 100644 --- a/product-updates/knowledge-center/viewing-error-reports.html +++ b/product-updates/knowledge-center/viewing-error-reports.html @@ -632,7 +632,7 @@

    Errors related to invalid values for a specific item/data element:

    -

    Errors related to inconsistent values for related items/data elements in the same record:

    +

    Errors related to inconsistent values for related items/data elements in the same record:

    Some errors may require review of the coding instructions for multiple items (and their respective values) to determine the proper correction. In the example below, the error is communicating that the value reported for Item 49 is in a conflict with the value for Item 30 in the same record. This message suggests a problem with either the value of Item 49 or the value of Item 30. Refer to the coding instructions and your own data to determine which value needs to be corrected.

    @@ -644,7 +644,7 @@

    Errors related to inconsistent values for related items/data elements in the
    -

    Errors related to inconsistent values across related records:

    +

    Errors with inconsistent values across related records may require review of the coding instructions to determine the proper correction. In the example below, the error is communicating that a T1 (family) record was found in the file that did not have a corresponding T2 (adult) or T3 (child) record, which effectively means that person-level records associated with this family are missing from the file.

    diff --git a/scripts/apply-remote-migrations.sh b/scripts/apply-remote-migrations.sh index 897f4d04f..a067109bf 100644 --- a/scripts/apply-remote-migrations.sh +++ b/scripts/apply-remote-migrations.sh @@ -5,7 +5,7 @@ app=${1} cd ./tdrs-backend echo "Install dependencies..." -sudo apt install -y gcc +sudo apt-get install -y gcc && sudo apt-get install -y graphviz && sudo apt-get install -y graphviz-dev sudo apt install -y libpq-dev python3-dev python -m venv ./env diff --git a/scripts/build-and-tag-images.sh b/scripts/build-and-tag-images.sh new file mode 100755 index 000000000..679485d79 --- /dev/null +++ b/scripts/build-and-tag-images.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +if [ "$#" -ne 8 ]; then + echo "Error, this script expects 8 parameters." + echo "I.e: ./build-tag-images.sh BACKEND_APP_NAME FRONTEND_APP_NAME BACKEND_PATH FRONTEND_PATH BUILD_NUM COMMIT_HASH DOCKER_LOGIN DOCKER_USER" + exit 1 +fi + +BACKEND_APP_NAME=$1 +FRONTEND_APP_NAME=$2 +BACKEND_PATH=$3 +FRONTEND_PATH=$4 +BUILD_NUM=$5 +COMMIT_HASH=$6 +DOCKER_LOGIN=$7 +DOCKER_USER=$8 +BUILD_DATE=`date +%F` +TAG="${BUILD_DATE}_build-${BUILD_NUM}_${COMMIT_HASH}" + +export DOCKER_CLI_EXPERIMENTAL=enabled + +build_and_tag() { + echo "$DOCKER_LOGIN" | docker login https://tdp-docker.dev.raftlabs.tech -u $DOCKER_USER --password-stdin + docker buildx build --load --platform linux/amd64 -t tdp-docker.dev.raftlabs.tech/$BACKEND_APP_NAME:$TAG -t tdp-docker.dev.raftlabs.tech/$BACKEND_APP_NAME:latest "$BACKEND_PATH" + docker buildx build --load --platform linux/arm64 -t tdp-docker.dev.raftlabs.tech/$BACKEND_APP_NAME:$TAG -t tdp-docker.dev.raftlabs.tech/$BACKEND_APP_NAME:latest "$BACKEND_PATH" + docker push --all-tags tdp-docker.dev.raftlabs.tech/$BACKEND_APP_NAME + docker buildx build --load --platform linux/amd64 -t tdp-docker.dev.raftlabs.tech/$FRONTEND_APP_NAME:$TAG -t tdp-docker.dev.raftlabs.tech/$FRONTEND_APP_NAME:latest "$FRONTEND_PATH" + docker buildx build --load --platform linux/arm64 -t tdp-docker.dev.raftlabs.tech/$FRONTEND_APP_NAME:$TAG -t tdp-docker.dev.raftlabs.tech/$FRONTEND_APP_NAME:latest "$FRONTEND_PATH" + docker push --all-tags tdp-docker.dev.raftlabs.tech/$FRONTEND_APP_NAME + docker logout +} + +echo "Building and Tagging images for $BACKEND_APP_NAME and $FRONTEND_APP_NAME" +build_and_tag diff --git a/scripts/deploy-backend.sh b/scripts/deploy-backend.sh index ebbce8243..a1e3f2583 100755 --- a/scripts/deploy-backend.sh +++ b/scripts/deploy-backend.sh @@ -99,6 +99,10 @@ update_kibana() cf add-network-policy "$CGAPPNAME_BACKEND" "$CGAPPNAME_KIBANA" --protocol tcp --port 5601 cf add-network-policy "$CGAPPNAME_FRONTEND" "$CGAPPNAME_KIBANA" --protocol tcp --port 5601 cf add-network-policy "$CGAPPNAME_KIBANA" "$CGAPPNAME_FRONTEND" --protocol tcp --port 80 + + # Upload dashboards to Kibana + CMD="curl -X POST $CGAPPNAME_KIBANA.apps.internal:5601/api/saved_objects/_import -H 'kbn-xsrf: true' --form file=@/home/vcap/app/tdpservice/search_indexes/kibana_saved_objs.ndjson" + cf run-task $CGAPPNAME_BACKEND --command "$CMD" --name kibana-obj-upload } update_backend() diff --git a/tdrs-backend/Dockerfile b/tdrs-backend/Dockerfile index e8233528d..854151968 100644 --- a/tdrs-backend/Dockerfile +++ b/tdrs-backend/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.10.8-slim-buster +FROM python:3.10.8-slim-bullseye ENV PYTHONUNBUFFERED 1 ARG user=tdpuser @@ -17,7 +17,7 @@ RUN apt-get -y upgrade # Postgres client setup RUN apt --purge remove postgresql postgresql-* && apt install -y postgresql-common curl ca-certificates && install -d /usr/share/postgresql-common/pgdg && \ curl -o /usr/share/postgresql-common/pgdg/apt.postgresql.org.asc --fail https://www.postgresql.org/media/keys/ACCC4CF8.asc && \ -sh -c 'echo "deb [signed-by=/usr/share/postgresql-common/pgdg/apt.postgresql.org.asc] https://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' && \ +sh -c 'echo "deb [signed-by=/usr/share/postgresql-common/pgdg/apt.postgresql.org.asc] https://apt.postgresql.org/pub/repos/apt bullseye-pgdg main" > /etc/apt/sources.list.d/pgdg.list' && \ apt -y update && apt install postgresql-client-15 -y # Install packages: RUN apt install -y gcc graphviz graphviz-dev libpq-dev python3-dev vim diff --git a/tdrs-backend/Pipfile b/tdrs-backend/Pipfile index a8c35b602..412321a7d 100644 --- a/tdrs-backend/Pipfile +++ b/tdrs-backend/Pipfile @@ -59,6 +59,8 @@ cerberus = "==1.3.5" xlsxwriter = "==3.2.0" openpyxl = "==3.1.2" sendgrid = "==6.11.0" +django_prometheus = "==2.3.1" +sentry-sdk = "==2.11.0" [requires] python_version = "3.10.8" \ No newline at end of file diff --git a/tdrs-backend/Pipfile.lock b/tdrs-backend/Pipfile.lock index f6ca1a836..7883b6f80 100644 --- a/tdrs-backend/Pipfile.lock +++ b/tdrs-backend/Pipfile.lock @@ -482,6 +482,14 @@ ], "version": "==0.2.7" }, + "django-prometheus": { + "hashes": [ + "sha256:cf9b26f7ba2e4568f08f8f91480a2882023f5908579681bcf06a4d2465f12168", + "sha256:f9c8b6c780c9419ea01043c63a437d79db2c33353451347894408184ad9c3e1e" + ], + "index": "pypi", + "version": "==2.3.1" + }, "django-storages": { "hashes": [ "sha256:31f263389e95ce3a1b902fb5f739a7ed32895f7d8b80179fe7453ecc0dfe102e", diff --git a/tdrs-backend/docker-compose.yml b/tdrs-backend/docker-compose.yml index 81d7065c4..3330ae493 100644 --- a/tdrs-backend/docker-compose.yml +++ b/tdrs-backend/docker-compose.yml @@ -3,7 +3,7 @@ version: "3.4" services: zaproxy: - image: softwaresecurityproject/zap-stable:2.14.0 + image: tdp-docker.dev.raftlabs.tech/dependencies/softwaresecurityproject/zap-stable:2.14.0 command: sleep 3600 depends_on: - web @@ -12,7 +12,7 @@ services: - ../scripts/zap-hook.py:/zap/scripts/zap-hook.py:ro postgres: - image: postgres:15.7 + image: tdp-docker.dev.raftlabs.tech/dependencies/postgres:15.7 environment: - PGDATA=/var/lib/postgresql/data/ - POSTGRES_DB=tdrs_test @@ -25,14 +25,14 @@ services: - postgres_data:/var/lib/postgresql/data/:rw clamav-rest: - image: rafttech/clamav-rest:0.103.2 + image: tdp-docker.dev.raftlabs.tech/dependencies/rafttech/clamav-rest:0.103.2 environment: - MAX_FILE_SIZE=200M ports: - "9000:9000" localstack: - image: localstack/localstack:0.13.3 + image: tdp-docker.dev.raftlabs.tech/dependencies/localstack/localstack:0.13.3 environment: - SERVICES=s3 - DATA_DIR=/tmp/localstack/data @@ -46,7 +46,7 @@ services: - ../scripts/localstack-setup.sh:/docker-entrypoint-initaws.d/localstack-setup.sh kibana: - image: docker.elastic.co/kibana/kibana-oss:7.10.2 + image: tdp-docker.dev.raftlabs.tech/dependencies/docker.elastic.co/kibana/kibana-oss:7.10.2 ports: - 5601:5601 environment: @@ -55,11 +55,13 @@ services: - SERVER_BASEPATH=/kibana - SERVER_SECURITYRESPONSEHEADERS_REFERRERPOLICY=no-referrer - CSP_WARNLEGACYBROWSERS=false + volumes: + - ./search_indexes/kibana_saved_objs.ndjson:/usr/share/kibana/kibana_saved_objs.ndjson depends_on: - elastic elastic: - image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.10.2 + image: tdp-docker.dev.raftlabs.tech/dependencies/docker.elastic.co/elasticsearch/elasticsearch-oss:7.10.2 environment: - discovery.type=single-node - logger.discovery.level=debug @@ -69,6 +71,71 @@ services: volumes: - elastic_data:/usr/share/elasticsearch/data + grafana: + restart: always + image: grafana/grafana:11.2.0 + ports: + - 9400:9400 + volumes: + - ./plg/grafana/datasources.yml:/etc/grafana/provisioning/datasources/default.yml + - ./plg/grafana/dashboards/provider.yml:/etc/grafana/provisioning/dashboards/default.yml + - ./plg/grafana/dashboards:/var/lib/grafana/provisioning/dashboards + - ./plg/grafana/custom.ini:/etc/grafana/grafana.ini + - grafana_data:/var/lib/grafana + + prometheus: + restart: always + image: prom/prometheus:v2.54.1 + ports: + - 9090:9090 + volumes: + - ./plg/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml + - ./plg/prometheus/django_rules.yml:/etc/prometheus/prom_django_rules.yml + - prometheus_data:/prometheus + depends_on: + - web + - celery-exporter + - postgres-exporter + + promtail: + restart: always + image: grafana/promtail:3.0.1 + ports: + - 9080:9080 + volumes: + - ./plg/promtail/config.yml:/etc/promtail/config.yml + - ~/tdp-logs/nginx:/var/log/nginx + - logs:/logs + command: -config.file=/etc/promtail/config.yml + + loki: + restart: always + image: grafana/loki:3.0.1 + ports: + - 3100:3100 + volumes: + - loki_data:/loki + + celery-exporter: + restart: always + image: ovalmoney/celery-exporter:1.5.1 + ports: + - 9540:9540 + environment: + - CELERY_EXPORTER_BROKER_URL=redis://redis-server:6379/0 + depends_on: + - web + + postgres-exporter: + restart: always + image: prometheuscommunity/postgres-exporter:v0.15.0 + ports: + - 9187:9187 + environment: + - DATA_SOURCE_NAME=postgres://tdpuser:something_secure@postgres:5432/tdrs_test?sslmode=disable + depends_on: + - web + web: restart: always environment: @@ -97,7 +164,7 @@ services: - DJANGO_DEBUG - SENDGRID_API_KEY - GENERATE_TRAILER_ERRORS=True - - BYPASS_KIBANA_AUTH + - BYPASS_OFA_AUTH - ELASTICSEARCH_REINDEX_THREAD_COUNT - ELASTICSEARCH_REINDEX_CHUNK_SIZE - ELASTICSEARCH_REINDEX_REQUEST_TIMEOUT @@ -111,7 +178,8 @@ services: - ELASTICSEARCH_LOG_INDEX_SLOW_LEVEL volumes: - .:/tdpapp - image: tdp + - logs:/tdpapp + image: tdp-backend build: . command: > bash -c "./wait_for_services.sh && @@ -130,7 +198,7 @@ services: - elastic redis-server: - image: "redis:alpine" + image: tdp-docker.dev.raftlabs.tech/dependencies/redis:alpine command: redis-server /tdpapp/redis.conf ports: - "6379:6379" @@ -141,6 +209,10 @@ volumes: localstack_data: postgres_data: elastic_data: + prometheus_data: + grafana_data: + loki_data: + logs: networks: diff --git a/tdrs-backend/manifest.kibana.yml b/tdrs-backend/manifest.kibana.yml index 181b29ec0..da77a16d4 100644 --- a/tdrs-backend/manifest.kibana.yml +++ b/tdrs-backend/manifest.kibana.yml @@ -10,7 +10,7 @@ applications: SERVER_SECURITYRESPONSEHEADERS_REFERRERPOLICY: no-referrer CSP_WARNLEGACYBROWSERS: false docker: - image: docker.elastic.co/kibana/kibana-oss:7.4.2 + image: docker.elastic.co/kibana/kibana-oss:7.10.2 command: | export ELASTICSEARCH_HOSTS=http://$CGAPPNAME_PROXY.apps.internal:8080 && /usr/local/bin/dumb-init -- /usr/local/bin/kibana-docker diff --git a/tdrs-backend/plg/grafana/custom.ini b/tdrs-backend/plg/grafana/custom.ini new file mode 100644 index 000000000..b8124cdeb --- /dev/null +++ b/tdrs-backend/plg/grafana/custom.ini @@ -0,0 +1,1975 @@ +##################### Grafana Configuration Defaults ##################### +# +# Do not modify this file in grafana installs +# + +# possible values : production, development +app_mode = production + +# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty +instance_name = ${HOSTNAME} + +#################################### Paths ############################### +[paths] +# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) +data = data + +# Temporary files in `data` directory older than given duration will be removed +temp_data_lifetime = 24h + +# Directory where grafana can store logs +logs = data/log + +# Directory where grafana will automatically scan and look for plugins +plugins = data/plugins + +# folder that contains provisioning config files that grafana will apply on startup and while running. +provisioning = conf/provisioning + +#################################### Server ############################## +[server] +# Protocol (http, https, h2, socket) +protocol = http + +# Minimum TLS version allowed. By default, this value is empty. Accepted values are: TLS1.2, TLS1.3. If nothing is set TLS1.2 would be taken +min_tls_version = "" + +# The ip address to bind to, empty will bind to all interfaces +http_addr = + +# The http port to use +http_port = 9400 + +# The public facing domain name used to access grafana from a browser +domain = localhost + +# Redirect to correct domain if host header does not match domain +# Prevents DNS rebinding attacks +enforce_domain = false + +# The full public facing url +root_url = %(protocol)s://%(domain)s:%(http_port)s/grafana/ + +# Serve Grafana from subpath specified in `root_url` setting. By default it is set to `false` for compatibility reasons. +serve_from_sub_path = true + +# Log web requests +router_logging = false + +# the path relative working path +static_root_path = public + +# enable gzip +enable_gzip = false + +# https certs & key file +cert_file = +cert_key = +cert_pass = + +# Certificates file watch interval +certs_watch_interval = + +# Unix socket gid +# Changing the gid of a file without privileges requires that the target group is in the group of the process and that the process is the file owner +# It is recommended to set the gid as http server user gid +# Not set when the value is -1 +socket_gid = -1 + +# Unix socket mode +socket_mode = 0660 + +# Unix socket path +socket = /tmp/grafana.sock + +# CDN Url +cdn_url = + +# Sets the maximum time in minutes before timing out read of an incoming request and closing idle connections. +# `0` means there is no timeout for reading the request. +read_timeout = 0 + +# This setting enables you to specify additional headers that the server adds to HTTP(S) responses. +[server.custom_response_headers] +#exampleHeader1 = exampleValue1 +#exampleHeader2 = exampleValue2 + +[environment] +# Sets whether the local file system is available for Grafana to use. Default is true for backward compatibility. +local_file_system_available = true + +#################################### GRPC Server ######################### +[grpc_server] +network = "tcp" +address = "127.0.0.1:10000" +use_tls = false +cert_file = +key_file = +# this will log the request and response for each unary gRPC call +enable_logging = false + +# Maximum size of a message that can be received in bytes. If not set, uses the gRPC default (4MiB). +max_recv_msg_size = + +# Maximum size of a message that can be sent in bytes. If not set, uses the gRPC default (unlimited). +max_send_msg_size = + +#################################### Database ############################ +[database] +# You can configure the database connection by specifying type, host, name, user and password +# as separate properties or as on string using the url property. + +# Either "mysql", "postgres" or "sqlite3", it's your choice +type = sqlite3 +host = 127.0.0.1:3306 +name = grafana +user = root +# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;""" +password = +# Use either URL or the previous fields to configure the database +# Example: mysql://user:secret@host:port/database +url = + +# Max idle conn setting default is 2 +max_idle_conn = 2 + +# Max conn setting default is 0 (mean not set) +max_open_conn = + +# Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours) +conn_max_lifetime = 14400 + +# Set to true to log the sql calls and execution times. +log_queries = + +# For "postgres", use either "disable", "require" or "verify-full" +# For "mysql", use either "true", "false", or "skip-verify". +ssl_mode = disable + +# For "postgres", use either "1" to enable or "0" to disable SNI +ssl_sni = + +# Database drivers may support different transaction isolation levels. +# Currently, only "mysql" driver supports isolation levels. +# If the value is empty - driver's default isolation level is applied. +# For "mysql" use "READ-UNCOMMITTED", "READ-COMMITTED", "REPEATABLE-READ" or "SERIALIZABLE". +isolation_level = + +ca_cert_path = +client_key_path = +client_cert_path = +server_cert_name = + +# For "sqlite3" only, path relative to data_path setting +path = grafana.db + +# For "sqlite3" only. cache mode setting used for connecting to the database +cache_mode = private + +# For "sqlite3" only. Enable/disable Write-Ahead Logging, https://sqlite.org/wal.html. Default is false. +wal = false + +# For "mysql" and "postgres". Lock the database for the migrations, default is true. +migration_locking = true + +# For "mysql" and "postgres" only if migrationLocking is set. How many seconds to wait before failing to lock the database for the migrations, default is 0. +locking_attempt_timeout_sec = 0 + +# For "sqlite" only. How many times to retry query in case of database is locked failures. Default is 0 (disabled). +query_retries = 0 + +# For "sqlite" only. How many times to retry transaction in case of database is locked failures. Default is 5. +transaction_retries = 5 + +# Set to true to add metrics and tracing for database queries. +instrument_queries = false + +#################################### Cache server ############################# +[remote_cache] +# Either "redis", "memcached" or "database" default is "database" +type = database + +# cache connectionstring options +# database: will use Grafana primary database. +# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=0,ssl=false`. Only addr is required. ssl may be 'true', 'false', or 'insecure'. +# memcache: 127.0.0.1:11211 +connstr = + +# prefix prepended to all the keys in the remote cache +prefix = + +# This enables encryption of values stored in the remote cache +encryption = + +#################################### Data proxy ########################### +[dataproxy] + +# This enables data proxy logging, default is false +logging = false + +# How long the data proxy waits to read the headers of the response before timing out, default is 30 seconds. +# This setting also applies to core backend HTTP data sources where query requests use an HTTP client with timeout set. +timeout = 30 + +# How long the data proxy waits to establish a TCP connection before timing out, default is 10 seconds. +dialTimeout = 10 + +# How many seconds the data proxy waits before sending a keepalive request. +keep_alive_seconds = 30 + +# How many seconds the data proxy waits for a successful TLS Handshake before timing out. +tls_handshake_timeout_seconds = 10 + +# How many seconds the data proxy will wait for a server's first response headers after +# fully writing the request headers if the request has an "Expect: 100-continue" +# header. A value of 0 will result in the body being sent immediately, without +# waiting for the server to approve. +expect_continue_timeout_seconds = 1 + +# Optionally limits the total number of connections per host, including connections in the dialing, +# active, and idle states. On limit violation, dials will block. +# A value of zero (0) means no limit. +max_conns_per_host = 0 + +# The maximum number of idle connections that Grafana will keep alive. +max_idle_connections = 100 + +# How many seconds the data proxy keeps an idle connection open before timing out. +idle_conn_timeout_seconds = 90 + +# If enabled and user is not anonymous, data proxy will add X-Grafana-User header with username into the request. +send_user_header = false + +# Limit the amount of bytes that will be read/accepted from responses of outgoing HTTP requests. +response_limit = 0 + +# Limits the number of rows that Grafana will process from SQL data sources. +row_limit = 1000000 + +# Sets a custom value for the `User-Agent` header for outgoing data proxy requests. If empty, the default value is `Grafana/` (for example `Grafana/9.0.0`). +user_agent = + +#################################### Analytics ########################### +[analytics] +# Server reporting, sends usage counters to stats.grafana.org every 24 hours. +# No ip addresses are being tracked, only simple counters to track +# running instances, dashboard and error counts. It is very helpful to us. +# Change this option to false to disable reporting. +reporting_enabled = true + +# The name of the distributor of the Grafana instance. Ex hosted-grafana, grafana-labs +reporting_distributor = grafana-labs + +# Set to false to disable all checks to https://grafana.com +# for new versions of grafana. The check is used +# in some UI views to notify that a grafana update exists. +# This option does not cause any auto updates, nor send any information +# only a GET request to https://grafana.com/api/grafana/versions/stable to get the latest version. +check_for_updates = true + +# Set to false to disable all checks to https://grafana.com +# for new versions of plugins. The check is used +# in some UI views to notify that a plugin update exists. +# This option does not cause any auto updates, nor send any information +# only a GET request to https://grafana.com to get the latest versions. +check_for_plugin_updates = true + +# Google Analytics universal tracking code, only enabled if you specify an id here +google_analytics_ua_id = + +# Google Analytics 4 tracking code, only enabled if you specify an id here +google_analytics_4_id = + +# When Google Analytics 4 Enhanced event measurement is enabled, we will try to avoid sending duplicate events and let Google Analytics 4 detect navigation changes, etc. +google_analytics_4_send_manual_page_views = false + +# Google Tag Manager ID, only enabled if you specify an id here +google_tag_manager_id = + +# Rudderstack write key, enabled only if rudderstack_data_plane_url is also set +rudderstack_write_key = + +# Rudderstack data plane url, enabled only if rudderstack_write_key is also set +rudderstack_data_plane_url = + +# Rudderstack SDK url, optional, only valid if rudderstack_write_key and rudderstack_data_plane_url is also set +rudderstack_sdk_url = + +# Rudderstack Config url, optional, used by Rudderstack SDK to fetch source config +rudderstack_config_url = + +# Rudderstack Integrations URL, optional. Only valid if you pass the SDK version 1.1 or higher +rudderstack_integrations_url = + +# Intercom secret, optional, used to hash user_id before passing to Intercom via Rudderstack +intercom_secret = + +# Application Insights connection string. Specify an URL string to enable this feature. +application_insights_connection_string = + +# Optional. Specifies an Application Insights endpoint URL where the endpoint string is wrapped in backticks ``. +application_insights_endpoint_url = + +# Controls if the UI contains any links to user feedback forms +feedback_links_enabled = true + +# Static context that is being added to analytics events +reporting_static_context = + +#################################### Security ############################ +[security] +# disable creation of admin user on first start of grafana +disable_initial_admin_creation = false + +# default admin user, created on startup +admin_user = admin + +# default admin password, can be changed before first start of grafana, or in profile settings +admin_password = admin + +# default admin email, created on startup +admin_email = admin@localhost + +# used for signing +secret_key = SW2YcwTIb9zpOOhoPsMm + +# current key provider used for envelope encryption, default to static value specified by secret_key +encryption_provider = secretKey.v1 + +# list of configured key providers, space separated (Enterprise only): e.g., awskms.v1 azurekv.v1 +available_encryption_providers = + +# disable gravatar profile images +disable_gravatar = false + +# data source proxy whitelist (ip_or_domain:port separated by spaces) +data_source_proxy_whitelist = + +# disable protection against brute force login attempts +disable_brute_force_login_protection = false + +# set to true if you host Grafana behind HTTPS. default is false. +cookie_secure = false + +# set cookie SameSite attribute. defaults to `lax`. can be set to "lax", "strict", "none" and "disabled" +cookie_samesite = lax + +# set to true if you want to allow browsers to render Grafana in a ,