diff --git a/Taskfile.yml b/Taskfile.yml index 7e0c48e48..525ea16fa 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -5,16 +5,66 @@ tasks: create-network: desc: Create the external network cmds: - - docker network create external-net + - (docker network create external-net) || true init-backend: desc: Initialize the backend project dir: tdrs-backend cmds: + - task: create-network - docker-compose -f docker-compose.yml up -d --build - docker-compose -f docker-compose.yml exec web sh -c "python ./manage.py makemigrations" - docker-compose -f docker-compose.yml exec web sh -c "python ./manage.py migrate" - docker-compose -f docker-compose.yml down + - task: sentry-down + + clone-sentry-repo: + desc: Clone the sentry repo + dir: sentry + cmds: + - git clone https://github.com/getsentry/self-hosted.git || true + + + create-sentry: + desc: Create Sentry service + dir: sentry + cmds: + # limiting the memory to 2GB and CPU to only one cpu @0, for faster response, you can remove the limittask : --cpuset-cpus 0 + - (docker run --privileged -p 9001:9000 -d --memory="8g" --memory-swap="8g" --name sentry docker:dind) || true + - docker exec sentry sh -c "git clone https://github.com/getsentry/self-hosted.git || true" + + # need sleep 10 for docker to start + # there is a bug with other version of self-hosted. looks like they are trying to upgrade to Django 5.0 (July 2024) + - docker exec sentry sh -c "cd self-hosted && sleep 10 && git checkout tags/23.10.1" + + # add bash + - docker exec sentry sh -c "apk add bash" + - docker cp docker-compose.yml sentry:/self-hosted/docker-compose.yml + - docker cp .env sentry:/self-hosted/.env + - docker exec sentry bash -c "cd self-hosted && ./install.sh --skip-user-creation --no-report-self-hosted-issues" + # create a new user + - docker exec sentry bash -c "cd self-hosted && docker-compose run --rm web createuser --email admin@tanf.com --password admin --superuser" + # copy backup.json file to sentry + - docker cp backup.json sentry:/self-hosted/sentry/backup.json + # restore backup + - docker exec sentry bash -c "cd self-hosted && docker compose up -d" + - docker exec sentry bash -c "docker cp /self-hosted/sentry/backup.json sentry-self-hosted-web-1:/home/sentry/backup.json" + - docker exec sentry bash -c "docker exec sentry-self-hosted-web-1 bash -c 'sentry import /home/sentry/backup.json'" + - docker exec sentry bash -c "cd self-hosted && docker compose down" + - docker exec sentry bash -c "cd self-hosted && docker compose up -d" + + + sentry-up: + desc: Start sentry service + dir: sentry + cmds: + - docker exec sentry bash -c "cd self-hosted && docker-compose up -d" + + sentry-down: + desc: Stop sentry service + dir: sentry + cmds: + - docker exec sentry bash -c "cd self-hosted && docker-compose down" drop-db: desc: Drop the backend database @@ -87,6 +137,7 @@ tasks: desc: Run flake8 in the backend container dir: tdrs-backend cmds: + - task backend-up - docker-compose -f docker-compose.yml exec web sh -c "flake8 . && if [ $? -eq 0 ]; then echo 'Flake8 linter found no issues'; fi" backend-pip-lock: @@ -94,6 +145,7 @@ tasks: desc: Lock the pip dependencies dir: tdrs-backend cmds: + - task: backend-up - docker-compose -f docker-compose.yml exec web sh -c "pipenv lock" psql: @@ -108,9 +160,10 @@ tasks: clean: desc: Remove all containers, networks, and volumes cmds: - - docker-compose -f tdrs-backend/docker-compose.yml down -v - - docker-compose -f tdrs-frontend/docker-compose.yml down -v - - docker system prune -f -a + - docker stop $(docker ps -aq) || true + - docker rm $(docker ps -aq) || true + - docker rmi $(docker images -q) || true + - docker volume rm $(docker volume ls -q) || true clamav-up: desc: Start clamav service diff --git a/docs/Sprint-Review/sprint-104-summary.md b/docs/Sprint-Review/sprint-104-summary.md new file mode 100644 index 000000000..93beb6ef8 --- /dev/null +++ b/docs/Sprint-Review/sprint-104-summary.md @@ -0,0 +1,82 @@ +# sprint-104-summary + +7/17/2024 - 7/30/2024 + +### Sprint Goal + +**Dev:** + +_**Plain Language Error Messaging and Application Health Monitoring work, improved dev tooling, and fixing bugs**_ + +* \#2792 — \[Error Audit] Category 3 error messages clean-up +* \#3059 — Bug: file stuck in pending state when DOB or SSN field is space-filled +* \#2965 — As tech lead, I want a database seed implemented for testing +* \#2175 — \[Bug] Data Files “Download” button(s) disappear when clicked +* \#3055 — Service timeout blocks parsing completion +* \#3061 — \[a11y fix] Django multi-select filter +* \#2960 — As a engineer I need to replace bash script with task file for local dev + +**DevOps:** + +_**Successful deployments across environments and pipeline stability investments**_ + +* \#2458 — Integrate Nexus into CircleCI +* \#3043 — Sentry: Local environment for Debugging +* \#2526 — "nightly" owasp scan after qasp deployment +* \#1623 — As tech lead, I want CircleCI pipelines to catch migration and/or deployment failures + +**Design:** + +_**Support reviews, Finalize Django Admin Experience epic research, Draft research synthesis**_ + +* \#2910 — Django Admin Experience Improvements Research Session (Part 2) +* \#3078 — DIGIT Admin Experience Synthesis + +## Tickets + +### Completed/Merged + +* [#1620 \[SPIKE\] As tech lead, I need to know the real-time branches deployed in Cloud.gov spaces](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/1620) +* [#2910 \[Research Facilitation\] Admin Experience Improvements](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/2910) +* [#2960 As a engineer I need to replace bash script with task file for local dev](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/2960) +* [#3004 Implement (small) data lifecycle (backup/archive ES)](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3004) +* [#3016 Spike - Cat2 Validator Improvement](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3016) +* [#3049 as an STT user, I need the error message related to the header update indicator clarified](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3049) +* [#3059 Bug: file stuck in pending state when DOB or SSN field is space-filled](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3059) + +### Submitted (QASP Review, OCIO Review) + +* [#3055 Service timeout blocks parsing completion](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3055) +* [#3061 \[a11y fix\] Django multi-select filter ](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3061) +* [#1621 As a TDP user, I'd like to see a descriptive error message page if authentication source is unavailable.](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/1621) +* [#2996 Add dynamic field name to cat4 error messages](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/2996) +* [#3058 \[Design Deliverable\] Release notes email template](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3058) +* [#3057 \[Design Deliverable\] Spec for light-lift fiscal quarter / calendar quarter explainer in TDP](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3057) +* [#2985 \[Design Deliverable\] Email spec for Admin Notification for stuck files](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/2985) +* [#2883 Pre-Made Reporting Dashboards on Kibana](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/2883) +* [#2954 Extend SESSION\_COOKIE\_AGE](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/2954) +* [#2993 Kibana Dashboard MVP](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/2993) + +### Ready to Merge + +* + +### Closed (Not Merged) + +* [#2526 "nightly" owasp scan after qasp deployment](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/2526) +* [#1623 As tech lead, I want CircleCI pipelines to catch migration and/or deployment failures](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/1623) + +### Moved to Next Sprint + +**In Progress** + +* [#2792 \[Error Audit\] Category 3 error messages clean-up](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/2792) + +#### Blocked + +* + +**Raft Review** + +* [#3043 Sentry: Local environment for Debugging](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3043) +* [\[Research Synthesis\] DIGIT Admin Experience Improvements#3078](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3078) diff --git a/docs/Sprint-Review/sprint-105-summary.md b/docs/Sprint-Review/sprint-105-summary.md new file mode 100644 index 000000000..b9b57f9ef --- /dev/null +++ b/docs/Sprint-Review/sprint-105-summary.md @@ -0,0 +1,92 @@ +# sprint-105-summary + +7/31/2024 - 8/14/2024 + +### Priority Setting + +* Reparsing + * Tickets: + * \#3064 — Re-parse Meta Model + * \#3113 — As tech lead, I need the validation on the header update indicator revised to unblock parsing + * \#3073 — \[bug] TDP is raising cat 4 error on TANF/SSP closed case files that is not present +* System Monitoring +* DIGIT Work + +### Sprint Goal + +**Dev:** + +_**Plain Language Error Messaging and Application Health Monitoring work, improved dev tooling, and fixing bugs**_ + +* \#2792 — \[Error Audit] Category 3 error messages clean-up +* \#2965 — As tech lead, I want a database seed implemented for testing +* \#3064 — Re-parse Meta Model +* \#3113 — As tech lead, I need the validation on the header update indicator revised to unblock parsing +* \#3073 — \[bug] TDP is raising cat 4 error on TANF/SSP closed case files that is not present +* \#3062 — bug: ES docker image for non-dev spaces stored in personal dockerhub +* \#1646 — \[A11y Fix] Correct TDP home : aria label mismatch + +**DevOps:** + +_**Successful deployments across environments and pipeline stability investments**_ + +* \#2458 — Integrate Nexus into CircleCI + +**Design:** + +_**Support reviews, Complete Research Synthesis, Continue Error Audit (Cat 4)**_ + +* \#3078 — DIGIT Admin Experience Synthesis +* \#3114 — \[Design Spike] In-app banner for submission history pages +* \#2968 — \[Design Deliverable] Update Error Audit for Cat 4 / QA + +## Tickets + +### Completed/Merged + +* [#1621 As a TDP user, I'd like to see a descriptive error message page if authentication source is unavailable.](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/1621) +* [#1646 \[A11y Fix\] Correct TDP home : aria label mismatch](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/1646) +* [#3033 As tech lead, I need the sections 3 and 4 calendar quarter logic updated](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3033) +* [#3055 Service timeout blocks parsing completion](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3055) +* [#3057 \[Design Deliverable\] Spec for light-lift fiscal quarter / calendar quarter explainer in TDP](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3057) +* [#3113 As tech lead, I need the validation on the header update indicator revised to unblock parsing ](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3113) + +### Submitted (QASP Review, OCIO Review) + +* [#2954 Extend SESSION\_COOKIE\_AGE](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/2954) +* [#3061 \[a11y fix\] Django multi-select filter ](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3061) +* [#3079 DB Backup Script Fix](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3079) +* [#2883 Pre-Made Reporting Dashboards on Kibana](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/2883) +* [#2985 \[Design Deliverable\] Email spec for Admin Notification for stuck files](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/2985) +* [#2996 Add dynamic field name to cat4 error messages](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/2996) +* [#2993 Kibana Dashboard MVP](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/2993) + +### Ready to Merge + +* [#3058 \[Design Deliverable\] Release notes email template](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3058) +* [#3062 bug: ES docker image for non-dev spaces stored in personal dockerhub](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3062) +* [#3073 \[bug\] TDP is raising cat 4 error on TANF/SSP closed case files that is not present](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3073) +* [#3107 \[Re-parse command\] Retain original submission date when command runs](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3107) + +### Closed (Not Merged) + +* [#1355 Research questions around DIGIT teams query usage for parsed data](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/1355) + +### Moved to Next Sprint + +**In Progress** + +* [#2965 As tech lead, I want a database seed implemented for testing](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/2965) + +#### Blocked + +* [#2458 Integrate Nexus into CircleCI](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/2458) + +**Raft Review** + +* [#3043 Sentry: Local environment for Debugging](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3043) +* [#3064 Re-parse Meta Model](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3064) +* [#3065 Spike - Guarantee Sequential Execution of Re-parse Command](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3065) +* [#3078 \[Research Synthesis\] DIGIT Admin Experience Improvements](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3078) +* [#3087 Admin By Newest Filter Enhancements for Data Files Page](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/3087) +* [#2792 \[Error Audit\] Category 3 error messages clean-up](https://app.zenhub.com/workspaces/sprint-board-5f18ab06dfd91c000f7e682e/issues/gh/raft-tech/tanf-app/2792) diff --git a/docs/Technical-Documentation/clean-and-reparse.md b/docs/Technical-Documentation/clean-and-reparse.md index 92175ab02..34fdd80eb 100644 --- a/docs/Technical-Documentation/clean-and-reparse.md +++ b/docs/Technical-Documentation/clean-and-reparse.md @@ -1,13 +1,13 @@ -# Clean and Re-parse DataFiles +# Clean and Reparse DataFiles ## Background As TDP has evolved so has it's validation mechanisms, messages, and expansiveness. As such, many of the datafiles locked in the database and S3 have not undergone TDP's latest and most stringent validation processes. Because data quality is so important to all TDP stakeholders -we wanted to introduce a way to re-parse and subsequently re-validate datafiles that have already been submitted to TDP to enhance the integrity +we wanted to introduce a way to reparse and subsequently re-validate datafiles that have already been submitted to TDP to enhance the integrity and the quality of the submissions. The following lays out the process TDP takes to automate and execute this process, and how this process can be tested locally and in our deployed environments. -# Clean and Re-parse Flow +# Clean and Reparse Flow As a safety measure, this process must ALWAYS be executed manually by a system administrator. Once executed, all processes thereafter are completely automated. The steps below outline how this process executes. @@ -24,7 +24,7 @@ automated. The steps below outline how this process executes. 10. `clean_and_reparse` re-saves the selected datafiles to the database. 11. `clean_and_reparse` pushes a new `parser_task` onto the Redis queue for each of the selected datafiles. -## Local Clean and Re-parse +## Local Clean and Reparse Make sure you have submitted a few datafiles, ideally accross program types and fiscal timeframes. 1. Browse the [indices](http://localhost:9200/_cat/indices/?pretty&v&s=index) and the DAC and verify the indices reflect the document counts you expect and the DAC reflects the record counts you expect. @@ -53,70 +53,32 @@ The commands should ALWAYS be executed in the order they appear below. 1. curl -X DELETE 'http://localhost:9200/dev*' 2. python manage.py search_index --rebuild -#### Clean and Re-parse All with New Indices and Keeping Old Indices +#### Clean and Reparse All with New Indices and Keeping Old Indices 1. Execute `python manage.py clean_and_reparse -a -n` - If this is the first time you're executing a command with new indices, because we have to create an alias in Elastic with the same name as the original index i.e. (`dev_tanf_t1_submissions`), the old indices no matter whether you specified `-d` or not will be deleted. From thereafter, the command will always respect the `-d` switch. 2. Expected Elastic results. - - If this is the first time you have ran the command the [indices](http://localhost:9200/_cat/indices/?pretty&v&s=index) url should reflect 21 indices prefixed with `dev` and they should contain the same number of documents as the original indices did. The new indices will also have a datetime suffix indicating when the re-parse occurred. + - If this is the first time you have ran the command the [indices](http://localhost:9200/_cat/indices/?pretty&v&s=index) url should reflect 21 indices prefixed with `dev` and they should contain the same number of documents as the original indices did. The new indices will also have a datetime suffix indicating when the reparse occurred. - If this is the second time running this command the [indices](http://localhost:9200/_cat/indices/?pretty&v&s=index) url should reflect 42 indices prefixed with `dev` and they should each contain the same number of documents as the original indices did. The latest indices will have a new datetime suffix delineating them from the other indices. 3. Expected DAC results. - The DAC record counts should be exactly the same no matter how many times the command is run. - The primary key for all reparsed datafiles should no longer be the same. - `ParserError` and `DataFileSummary` objects should be consistent with the file. -#### Clean and Re-parse All with New Indices and Deleting Old Indices -1. Execute `python manage.py clean_and_reparse -a -n -d` -2. The expected results for this command will be exactly the same as above. The only difference is that no matter how many times you execute this command, you should only see 21 indices in Elastic with the `dev` prefix. - -#### Clean and Re-parse All with Same Indices +#### Clean and Reparse All 1. Execute `python manage.py clean_and_reparse -a` -2. The expected results for this command will match the initial result from above. - -``` -health status index uuid pri rep docs.count docs.deleted store.size pri.store.size -green open .kibana_1 VKeA-BPcSQmJJl_AbZr8gQ 1 0 1 0 4.9kb 4.9kb -yellow open dev_ssp_m1_submissions mDIiQxJrRdq0z7W9H_QUYg 1 1 5 0 24kb 24kb -yellow open dev_ssp_m2_submissions OUrgAN1XRKOJgJHwr4xm7w 1 1 6 0 33.6kb 33.6kb -yellow open dev_ssp_m3_submissions 60fCBXHGTMK31MyWw4t2gQ 1 1 8 0 32.4kb 32.4kb -yellow open dev_tanf_t1_submissions 19f_lawWQKSeuwejo2Qgvw 1 1 817 0 288.2kb 288.2kb -yellow open dev_tanf_t2_submissions dPj2BdNtSJyAxCqnMaV2aw 1 1 884 0 414.4kb 414.4kb -yellow open dev_tanf_t3_submissions e7bEl0AURPmcZ5kiFwclcA 1 1 1380 0 355.2kb 355.2kb -``` - -#### Clean and Re-parse FY 2024 New Indices and Keep Old Indices -1. Execute `python manage.py clean_and_reparse -y 2024 -n` -2. The expected results here are much different with respect to Elastic. Again, Postgres is the ground truth and it's counts should never change. Because this is the first time we execute this command and therfore are creating our Elastic aliases the result returned from the [indices](http://localhost:9200/_cat/indices/?pretty&v&s=index) url might be confusing. See below. - -``` -index docs.count -.kibana_1 2 -dev_ssp_m1_submissions_2024-07-05_17.26.26 5 -dev_ssp_m2_submissions_2024-07-05_17.26.26 6 -dev_ssp_m3_submissions_2024-07-05_17.26.26 8 -dev_tanf_t1_submissions_2024-07-05_17.26.26 2 -dev_tanf_t2_submissions_2024-07-05_17.26.26 2 -dev_tanf_t3_submissions_2024-07-05_17.26.26 4 -``` - -- While the DAC reports the correct number of records for all submitted types, Elastic does not. This is because we only reparsed a subset of the entire collection of datafiles for the first time we executed the `clean_and_reparse` command. Therefore, Elastic only has documents for the subset of resubmitted files. If we had already executed the command: `python manage.py clean_and_reparse -a -n` and then executed `python manage.py clean_and_reparse -y 2024 -n`, we would see what you might have initially expected to see. +2. The expected results for this command will be exactly the same as above. The only difference is that no matter how many times you execute this command, you should only see 21 indices in Elastic with the `dev` prefix. ``` -index docs.count -.kibana_1 2 -dev_ssp_m1_submissions_2024-07-05_17.34.34 5 -dev_ssp_m1_submissions_2024-07-05_17.35.26 5 -dev_ssp_m2_submissions_2024-07-05_17.34.34 6 -dev_ssp_m2_submissions_2024-07-05_17.35.26 6 -dev_ssp_m3_submissions_2024-07-05_17.34.34 8 -dev_ssp_m3_submissions_2024-07-05_17.35.26 8 -dev_tanf_t1_submissions_2024-07-05_17.34.34 817 -dev_tanf_t1_submissions_2024-07-05_17.35.26 2 -dev_tanf_t2_submissions_2024-07-05_17.34.34 884 -dev_tanf_t2_submissions_2024-07-05_17.35.26 2 -dev_tanf_t3_submissions_2024-07-05_17.34.34 1380 -dev_tanf_t3_submissions_2024-07-05_17.35.26 4 +health status index uuid pri rep docs.count docs.deleted store.size pri.store.size +green open .kibana_1 VKeA-BPcSQmJJl_AbZr8gQ 1 0 1 0 4.9kb 4.9kb +yellow open dev_ssp_m1_submissions_2024-07-05_17.26.26 mDIiQxJrRdq0z7W9H_QUYg 1 1 5 0 24kb 24kb +yellow open dev_ssp_m2_submissions_2024-07-05_17.26.26 OUrgAN1XRKOJgJHwr4xm7w 1 1 6 0 33.6kb 33.6kb +yellow open dev_ssp_m3_submissions_2024-07-05_17.26.26 60fCBXHGTMK31MyWw4t2gQ 1 1 8 0 32.4kb 32.4kb +yellow open dev_tanf_t1_submissions_2024-07-05_17.26.26 19f_lawWQKSeuwejo2Qgvw 1 1 817 0 288.2kb 288.2kb +yellow open dev_tanf_t2_submissions_2024-07-05_17.26.26 dPj2BdNtSJyAxCqnMaV2aw 1 1 884 0 414.4kb 414.4kb +yellow open dev_tanf_t3_submissions_2024-07-05_17.26.26 e7bEl0AURPmcZ5kiFwclcA 1 1 1380 0 355.2kb 355.2kb ``` ## Cloud.gov Examples @@ -131,7 +93,7 @@ Running the `clean_and_reparse` command in a Cloud.gov environment will require ## OFA Admin Backend App Login -### 0. Disconnect from VPN. +### 0. Disconnect from VPN. ### 1. Authenticate with Cloud.gov API endpoint: api.fr.cloud.gov @@ -172,7 +134,7 @@ space: tanf-dev 1. Get the app GUID ```bash $ cf curl v3/apps/$(cf app tdp-backend-qasp --guid)/processes | jq --raw-output '.resources | .[]? | select(.type == "web").guid' - + ``` @@ -201,23 +163,21 @@ space: tanf-dev $ /tmp/lifecycle/shell ``` -### 4. Display Help for Re-parse Command +### 4. Display Help for Reparse Command ```bash $ python manage.py clean_and_reparse -h usage: manage.py clean_and_parse [-h] [-q {Q1,Q2,Q3,Q4}] [-y FISCAL_YEAR] [-a] [-n] [-d] [--configuration CONFIGURATION] [--version] [-v {0,1,2,3}] [--settings SETTINGS] [--pythonpath PYTHONPATH] [--traceback] [--no-color] [--force-color] [--skip-checks] -Delete and re-parse a set of datafiles. All re-parsed data will be moved into a new set of Elastic indexes. +Delete and reparse a set of datafiles. All reparsed data will be moved into a new set of Elastic indexes. options: -h, --help show this help message and exit -q {Q1,Q2,Q3,Q4}, --fiscal_quarter {Q1,Q2,Q3,Q4} - Re-parse all files in the fiscal quarter, e.g. Q1. + Reparse all files in the fiscal quarter, e.g. Q1. -y FISCAL_YEAR, --fiscal_year FISCAL_YEAR - Re-parse all files in the fiscal year, e.g. 2021. - -a, --all Clean and re-parse all datafiles. If selected, fiscal_year/quarter aren't necessary. - -n, --new_indices Move re-parsed data to new Elastic indices. - -d, --delete_indices Requires new_indices. Delete the current Elastic indices. + Reparse all files in the fiscal year, e.g. 2021. + -a, --all Clean and reparse all datafiles. If selected, fiscal_year/quarter aren't necessary. --configuration CONFIGURATION The name of the configuration class to load, e.g. "Development". If this isn't provided, the DJANGO_CONFIGURATION environment variable will be used. --version show program's version number and exit diff --git a/docs/Technical-Documentation/diagrams/tdp-environments.drawio b/docs/Technical-Documentation/diagrams/tdp-environments.drawio index 255c6061e..4449dff1a 100644 --- a/docs/Technical-Documentation/diagrams/tdp-environments.drawio +++ b/docs/Technical-Documentation/diagrams/tdp-environments.drawio @@ -1,6 +1,6 @@ - + - + @@ -14,7 +14,7 @@ - + @@ -212,11 +212,6 @@ - - - - - @@ -300,7 +295,7 @@ - + @@ -324,8 +319,16 @@ + + + + + + + + - + @@ -477,18 +480,16 @@ - + - + - + + - - - @@ -498,38 +499,39 @@ - + - + - - + + + + + + + - + + - + + - + - - - - - - @@ -625,6 +627,9 @@ + + + @@ -634,9 +639,6 @@ - - - @@ -669,10 +671,13 @@ - + - + + + + @@ -729,13 +734,47 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + + + + diff --git a/docs/Technical-Documentation/diagrams/tdp-environments.drawio.png b/docs/Technical-Documentation/diagrams/tdp-environments.drawio.png index a56b1c516..757d7ec10 100644 Binary files a/docs/Technical-Documentation/diagrams/tdp-environments.drawio.png and b/docs/Technical-Documentation/diagrams/tdp-environments.drawio.png differ diff --git a/docs/User-Experience/Research-Syntheses/2024, Summer - OFA Admin Experience.md b/docs/User-Experience/Research-Syntheses/2024, Summer - OFA Admin Experience.md new file mode 100644 index 000000000..4f769a7ba --- /dev/null +++ b/docs/User-Experience/Research-Syntheses/2024, Summer - OFA Admin Experience.md @@ -0,0 +1,62 @@ +# 2024, Summer - OFA Admin Experience + +**Jump To:** + +* [Background](#background) +* [What we did & who we talked to](#what-we-did--who-we-talked-to) +* [What we learned](#what-we-learned) +* [Next steps](#next-steps) + +*** + +## Background + +The Django Admin Console (DAC) is an internal tool tailored for our OFA System Admin and DIGIT Team users. It allows for access to the Postgres database & system logs, managing user permissions, and has been increasingly adopted to provide the DIGIT team with quick insights into data file errors and STT submissions. + +*** + +## What we did & who we talked to + +We ran two workshops with the DIGIT team; which overlaps with our two OFA System Admins. While those teams overlap, note that the permissions for each user group differ. OFA System Admins have privileged access to DAC while DIGIT team users have non-privileged access. Our product manager and two developers were also in attendance for alignment and work estimation purposes. These workshops facilitated conversation around DAC enhancement requests & pain points provided by OFA in tickets [#2930](https://github.com/raft-tech/TANF-app/issues/2930), [#1662](https://github.com/raft-tech/TANF-app/issues/1662), [#960](https://github.com/raft-tech/TANF-app/issues/960), and [#2910](https://github.com/raft-tech/TANF-app/issues/2910) in order to achieve: + +* A clear understanding of current enhancement requests and described pain points +* Initial estimation of the work required to deliver each change to the DAC +* Alignment on the scope of potential work to support prioritizing all issues within our product roadmap and upcoming sprints + +*** + +## What we learned + +We identified and refined our understanding of Django Admin Console enhancements in the following categories: + +### Filtering & readability + +
EnhancementDescriptionDAC PageTicketRecommended Priority
Filter data files by relative dateAdds filter to the DAC Data Files page that filters by submission date and includes options for submissions yesterday, today, the past 7 days, the current month, and the current year. #3077 captures a higher lift enhancement to this use case.Data Files#30764.0 / P3
Default filter on DAC Data Files page to show only the most recent submissions per STT, fiscal period, and section. Currently DAC Search Indexes pages default to filtering results to "Newest". This ticket updates the language of that filter to "Most recent version" and adds that behavior to the Data Files page.Search Indexes, Data Files#30874.0 / P4
Add multiselect control to Search Indexes Fiscal Period FilterCurrently the filter control on DAC Search Indexes pages is a single option dropdown. This ticket replaces it with the multiselect control that we use when filtering by STT. This replicates the SQL union queries used in the legacy system.Search Indexes#31024.0 / P4
Add filters from DAC Data Files page to Data File Summaries pagesCurrently Data File Summaries pages lack filtering capability. This ticket delivers filter options matching those on the Data Files page.Data File Summaries#30934.0 / P2
[Spike] Investigate adding Change Message typesThis spike investigates whether we can supplement the current Change Message column on log entries with a change type to allow for filtering capability. Log Entries#30924.0 / P2
[Spike] Investigates how we can provide a tabular view of data file summariesCurrently data file summaries are served up in a raw JSON format. To make them easier to read we should investigate how we might map these data to a table view.Data File Summaries (Specific Summary view)#30954.0 / P2
Rearrange Data Files filters and implement multiselect fiscal period filterAdds the multiselect control for fiscal period filtering (as seen on current Search Index pages on the Data Files page and rearanges filters into a more intuitive order.Data Files#30974.0 / P2
[Spike] Investigate YYYYMMDD value filtering for data filesHigher lift ideal solution following on from #3076. Investigates how we might add the ability to filter the DAC Data Files page to those submitted on a specific date or within a specific date range. Data Files#3077Beyond 4.0 / P2
List of Cat 1,4 rejected case numbersProvides a method of filtering current django outputs to produce a list of case numbers and months which are associated with category 1 and 4 errors. This provides DIGIT with a new analogue to the transmission reports of the legacy system.Data File Summaries (Specific Summary view)#3096Beyond 4.0 / P2
+ +### DAC actions & behavior + +
EnhancementDescriptionDAC PageTicketRecommended Priority
Read-only data file summaries Modifies the DAC Data File Summaries view to make it read-only to better correspond to how it's used by the DIGIT team.Data File Summaries (Specific summary view)#30944.0 / P2
User deletionCurrently user deletion is not supported via the DAC. This ticket delivers that capability while retaining all objects associated to deleted users.Users#3089Beyond 4.0 / P2
Mass actions on Users tableCurrently System Admins cannot select and deactivate multiple users at a time. This ticket also delivers a filter to restrict the Users table to only those who have been inactive for 180+ days.Users#3090Beyond 4.0 / P2
Add mailto: functionality to user email addresses in metadataCurrently in views of date file metadata, clicking on the user's email address links to that users entry in the DAC Users page. This ticket delivers an update that changes it to a mailto: that will open in the device's default email application.Users (Specific user view)#3120Beyond 4.0 / P2
+ +### Bugs & system performance + +
EnhancementDescriptionDAC PageTicketRecommended Priority
[Bug] Misleading file status column on DAC data files pageThe file status column can return incorrect status values when viewed from the data files table rather than an individual data file's metadata page.Data Files#30684.0 / P2
[Spike] Investigate handling of custom filters During implementation of the first DAC multiselect filter we discovered problems with the handling of query strings which will pose scalability problems as we introduce new filters. N/A#31104.0 / P3
[Spike] Investigate latency when clicking into the parsing errors column on DAC data files pageCurrently when clicking into "Parser Errors" for a given row of the DAC Data Files page there is significant latency before the system returns results. Data Files#30754.0 / P3
+ +### Parsing + +
EnhancementDescriptionTicketRecommended Priority
Update Section 3, 4 validation to screen for ≥ 1 families rather than ≥ 0 Sections 3 and 4 of TANF data concern aggregate values that are highly unlikely to be 0. This ticket delivers a parsing logic fix to reflect that.#30884.0 / P3
+ +### User permissions + +
EnhancementDescriptionTicketRecommended Priority
TDP Data Files page permissions for DIGIT & Sys Admin user groupsCurrently users assigned to the DIGIT or System Admin user groups cannot reach and browse TDP's Data Files page. This ticket adds those permissions for both groups.#30744.0 / P4
+ +### Security Controls + +
EnhancementDescriptionTicketRecommended Priority
Auto-deactivation of usersUser deactivation is currently a manual process for system admins. This ticket delivers automation that will automatically deactivate users who have been inactive for 180 days.#25614.0 / P3
System owner notification upon assigned admin permissionsSince very few people should be granted System Admin permissions in production, the system owner should be notified whenever the role is assigned/unassigned.#13374.0 / P2
+ +*** + +## Next Steps + +Following this research, the design team will fully refine all the tickets referenced above and coordinate time with development and the DIGIT team to determine which enhancements will be tackled in release 4.0 and which will be deprioritized for a subsequent release. + +Additionally, the design team will prioritize [#3121](https://github.com/raft-tech/TANF-app/issues/3121) which delivers the email template that will be implemented by development in [#1337.](https://github.com/raft-tech/TANF-app/issues/1337) diff --git a/docs/User-Experience/Research-Syntheses/README.md b/docs/User-Experience/Research-Syntheses/README.md index 43496f75a..1602253be 100644 --- a/docs/User-Experience/Research-Syntheses/README.md +++ b/docs/User-Experience/Research-Syntheses/README.md @@ -5,6 +5,14 @@ With a few exceptions, we've tended to publish markdown research syntheses to su The syntheses included herein are organized reverse-chronologically from newest to oldest: +### [2024, Summer - OFA Admin Experience](https://github.com/raft-tech/TANF-app/blob/develop/docs/User-Experience/Research-Syntheses/2024,%20Summer%20-%20OFA%20Admin%20Experience.md) +- Ran two workshops with the OFA DIGIT team focused on enhancement requests for the Django Admin Console (DAC) to achieve: + - A clear understanding of current enhancement requests and described pain points + - Initial estimation of the work required to deliver each change to the DAC + - Alignment on the scope of potential work to support prioritizing all issues within our product roadmap and upcoming sprints + + + ### [2023, Sprint - TDP 3.0 Pilot Program](https://github.com/raft-tech/TANF-app/blob/develop/docs/User-Experience/Research-Syntheses/2023%2C%20Spring%20-%20Testing%20CSV%20%26%20Excel-based%20error%20reports.md#spring-2023---testing-csv--excel-based-error-reports) - Research sessions conducted with 5 states and 4 Tribes with a focus on programs that had errors on their Section 1 Data Files. diff --git a/product-updates/knowledge-center/index.html b/product-updates/knowledge-center/index.html index 8639f343f..a31984e6e 100644 --- a/product-updates/knowledge-center/index.html +++ b/product-updates/knowledge-center/index.html @@ -228,6 +228,7 @@
  • Frequently Asked Questions
  • + @@ -340,6 +341,96 @@

    Use an existing Login.gov AccountWhat's new in TDP

    +

    August 15th 2024 (v 3.5.2)

    +

    Added:

    +
      +
    • +
      + +
      +
      +
      + Email notifications for quarterly data deadlines +
      +

      If you have an active TDP account and have not yet submitted all of your program's quarterly files, you will now receive friendly e-mail reminders 5 days in advance of each quarterly deadline to help support timely submissions and feedback. Please note: If additional time is needed, please feel free to reach out to TANFData@acf.hhs.gov.

      +
      +
    • +
    + +

    Changed / Fixed:

    +
      + +
    • +
      + +
      +
      +
      + The Header update indicator errors no longer results in "Rejected" files +
      +

      We have relaxed TDP's data processing requirements to help ensure that the data in your files can still be processed even if these files have an invalid value for the update indicator. However, the TDP system expects the header update indicator to be set to "D" and if not, your error report will still include an error asking you to correct it
      Read more about the header record and how to export complete data using fTANF.

      +
      +
    • + +
    • +
      + +
      +
      +
      + A bug related to Supplemental Security Income (SSI) receipt no longer results in "Rejected" files +
      +

      TDP was incorrectly generating the following error messages for some TANF and SSP Section 2: Closed Case files (for states and territories only):

      + T5 People in states must have a valid value for REC_SSI. +
      + M5 People in states must have a valid value for REC_SSI. + +

      This bug made it more likely for these files to be "Rejected". This has been resolved, and no changes to these files are needed. The TDP team will re-process the files at a later date. You may also resubmit any files impacted by this bug to obtain revised TDP-generated feedback reports, but this is not required.

      +
      +
    • + +
    • +
      + +
      +
      +
      + A bug related to submitting some data files for the first quarter of fiscal year 2021 has been resolved +
      +

      TDP was incorrectly generating the following error message on Section 3: Aggregate and Section 4: Stratum data files submitted for Q1, FY2021 (Oct - Dec, 2020):

      + Year 2020 must be larger than 2020. + +

      The issue has been corrected in TDP, and no changes to data are needed to resolve this.

      +
      +
    • + + + +
    • +
      + +
      +
      +
      + We're continuing to improve error messages that TDP generates +
      +

      User feedback from our TANF partners is invaluable to ensure that TDP-generated error reports are highly accurate, intuitive, and easy to act on.

      We are exploring better ways to structure the error report to help guide you toward the most important feedback to review. This will take some time, and we appreciate your patience and partnership in this process.

      +

      We're currently trying to learn more from our TANF partners about the circumstances that appear to indicate that families on the active caseload are receiving $0 in assistance across multiple assistance categories. This will yield the following message in your Active Case error reports:

      + T1: The sum of ('amount of food stamp assistance', 'amount of subsidized child care', 'cash amount', 'child care amount', 'transportation amount', 'transition services amount', and/or 'other amount') is not larger than 0. + +

      If you have encountered an error message like this in your error reports, we'd love to learn more from you. Please reach out to TANFData@acf.hhs.gov. You may also share feedback here on the Knowledge Center at your convenience. We use it to ensure that the TANF Data Portal is meeting your needs and better serve you and your team. All feedback is anonymous unless you provide your contact information.

      +
      +
    • + +
    + + +
    +

    June 5th 2024 (v 3.4.3)

    Added:

      @@ -400,9 +491,7 @@
      Improved the usability of TDP's request access form

      - First-time TDP users provide their first name, last name, and associated state, tribe, or territory. This information helps TDP administrators confirm system access. - We have updated the form to include a new field for the jurisdiction type (state, tribe, or territory). This change addresses questions from tribal program users about whether to select their tribal program name or the state where the program is located. - After selecting the jurisdiction type, the user can more quickly locate and select the appropriate jurisdiction from the dropdown menu. + First-time users to TDP provide their first name, last name, and associated state, tribe, or territory to help TDP administrators confirm access to the system. We've made revisions to the form by adding the jurisdiction type (state, tribe, or territory). After selecting the jurisdiction type, the user can more quickly locate and select the appropriate jurisdiction from the dropdown menu. the eliminate confusion about whether tribes need to select the name of their tribal program or the name of the state it's located in.

      diff --git a/product-updates/knowledge-center/viewing-error-reports.html b/product-updates/knowledge-center/viewing-error-reports.html index c8e458ad1..6024ff3bc 100644 --- a/product-updates/knowledge-center/viewing-error-reports.html +++ b/product-updates/knowledge-center/viewing-error-reports.html @@ -516,7 +516,7 @@

      Records

      -

      Header and Trailer Records

      +

      Header and Trailer Records

      The Header and Trailer refer to special records at the beginning and end of every data file. The Header communicates key information to TDP about the file's classification that helps the system correctly process it, including calendar year and quarter, program type, and section. The Trailer contains information about the number of records (excluding the header and trailer records) in the file.

      diff --git a/scripts/deploy-backend.sh b/scripts/deploy-backend.sh index 24bef90d9..ebbce8243 100755 --- a/scripts/deploy-backend.sh +++ b/scripts/deploy-backend.sh @@ -1,7 +1,7 @@ #!/bin/bash ############################## -# Global Variable Decls +# Global Variable Decls ############################## # The deployment strategy you wish to employ ( rolling update or setting up a new environment) @@ -77,7 +77,7 @@ set_cf_envs() else cf_cmd="cf set-env $CGAPPNAME_BACKEND $var_name ${!var_name}" fi - + echo "Setting var : $var_name" $cf_cmd done @@ -85,7 +85,7 @@ set_cf_envs() } # Helper method to generate JWT cert and keys for new environment -generate_jwt_cert() +generate_jwt_cert() { echo "regenerating JWT cert/key" yes 'XX' | openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 -nodes -sha256 @@ -94,7 +94,7 @@ generate_jwt_cert() } update_kibana() -{ +{ # Add network policy allowing Kibana to talk to the proxy and to allow the backend to talk to Kibana cf add-network-policy "$CGAPPNAME_BACKEND" "$CGAPPNAME_KIBANA" --protocol tcp --port 5601 cf add-network-policy "$CGAPPNAME_FRONTEND" "$CGAPPNAME_KIBANA" --protocol tcp --port 5601 @@ -105,12 +105,16 @@ update_backend() { cd tdrs-backend || exit cf unset-env "$CGAPPNAME_BACKEND" "AV_SCAN_URL" - + if [ "$CF_SPACE" = "tanf-prod" ]; then cf set-env "$CGAPPNAME_BACKEND" AV_SCAN_URL "http://tanf-prod-clamav-rest.apps.internal:9000/scan" else # Add environment varilables for clamav cf set-env "$CGAPPNAME_BACKEND" AV_SCAN_URL "http://tdp-clamav-nginx-$env.apps.internal:9000/scan" + + # Add variable for dev/staging apps to know their DB name. Prod uses default AWS name. + cf unset-env "$CGAPPNAME_BACKEND" "APP_DB_NAME" + cf set-env "$CGAPPNAME_BACKEND" "APP_DB_NAME" "tdp_db_$backend_app_name" fi if [ "$1" = "rolling" ] ; then @@ -129,12 +133,12 @@ update_backend() fi set_cf_envs - + cf map-route "$CGAPPNAME_BACKEND" apps.internal --hostname "$CGAPPNAME_BACKEND" # Add network policy to allow frontend to access backend cf add-network-policy "$CGAPPNAME_FRONTEND" "$CGAPPNAME_BACKEND" --protocol tcp --port 8080 - + if [ "$CF_SPACE" = "tanf-prod" ]; then # Add network policy to allow backend to access tanf-prod services cf add-network-policy "$CGAPPNAME_BACKEND" clamav-rest --protocol tcp --port 9000 @@ -149,7 +153,7 @@ bind_backend_to_services() { echo "Binding services to app: $CGAPPNAME_BACKEND" if [ "$CGAPPNAME_BACKEND" = "tdp-backend-develop" ]; then - # TODO: this is technical debt, we should either make staging mimic tanf-dev + # TODO: this is technical debt, we should either make staging mimic tanf-dev # or make unique services for all apps but we have a services limit # Introducing technical debt for release 3.0.0 specifically. env="develop" @@ -158,10 +162,10 @@ bind_backend_to_services() { cf bind-service "$CGAPPNAME_BACKEND" "tdp-staticfiles-${env}" cf bind-service "$CGAPPNAME_BACKEND" "tdp-datafiles-${env}" cf bind-service "$CGAPPNAME_BACKEND" "tdp-db-${env}" - + # Setting up the ElasticSearch service cf bind-service "$CGAPPNAME_BACKEND" "es-${env}" - + set_cf_envs echo "Restarting app: $CGAPPNAME_BACKEND" diff --git a/sentry/.env b/sentry/.env new file mode 100644 index 000000000..a3d4f1b11 --- /dev/null +++ b/sentry/.env @@ -0,0 +1,35 @@ +COMPOSE_PROJECT_NAME=sentry-self-hosted +COMPOSE_PROFILES=feature-complete +SENTRY_EVENT_RETENTION_DAYS=90 +# You can either use a port number or an IP:PORT combo for SENTRY_BIND +# See https://docs.docker.com/compose/compose-file/#ports for more +SENTRY_BIND=9000 +# Set SENTRY_MAIL_HOST to a valid FQDN (host/domain name) to be able to send emails! +# SENTRY_MAIL_HOST=example.com + + +# https://hub.docker.com/r/getsentry/sentry/tags?page=1205&page_size=&ordering=&name= +SENTRY_IMAGE=getsentry/sentry:23.10.1 + + +# https://hub.docker.com/r/getsentry/snuba/tags?page=105&page_size=&name=&ordering= +SNUBA_IMAGE=getsentry/snuba:23.10.1 + +# https://hub.docker.com/r/getsentry/relay/tags?page=100&page_size=&name=&ordering= +RELAY_IMAGE=getsentry/relay:23.10.1 + + +#https://hub.docker.com/r/getsentry/symbolicator/tags?page=15&page_size=&name=&ordering= +SYMBOLICATOR_IMAGE=getsentry/symbolicator:23.10.1 + +# https://hub.docker.com/r/getsentry/vroom/tags?page=15&page_size=&name=&ordering= +VROOM_IMAGE=getsentry/vroom:23.10.1 + + +WAL2JSON_VERSION=latest +HEALTHCHECK_INTERVAL=30s +HEALTHCHECK_TIMEOUT=1m30s +HEALTHCHECK_RETRIES=10 +# Caution: Raising max connections of postgres increases CPU and RAM usage +# see https://github.com/getsentry/self-hosted/pull/2740 for more information +POSTGRES_MAX_CONNECTIONS=100 \ No newline at end of file diff --git a/sentry/backup.json b/sentry/backup.json new file mode 100644 index 000000000..e83f7b874 --- /dev/null +++ b/sentry/backup.json @@ -0,0 +1,503 @@ +[ +{ + "model": "sites.site", + "pk": 1, + "fields": { + "domain": "example.com", + "name": "example.com" + } +}, +{ + "model": "sentry.option", + "pk": 1, + "fields": { + "key": "sentry:last_worker_ping", + "last_updated": "2024-08-01T13:53:00.189Z", + "last_updated_by": "unknown", + "value": 1722520380.1114867 + } +}, +{ + "model": "sentry.option", + "pk": 2, + "fields": { + "key": "sentry:last_worker_version", + "last_updated": "2024-08-01T13:53:00.238Z", + "last_updated_by": "unknown", + "value": "\"23.7.0.dev0\"" + } +}, +{ + "model": "sentry.option", + "pk": 3, + "fields": { + "key": "system.url-prefix", + "last_updated": "2024-08-01T13:50:36.841Z", + "last_updated_by": "unknown", + "value": "\"http://localhost:9001\"" + } +}, +{ + "model": "sentry.option", + "pk": 4, + "fields": { + "key": "system.admin-email", + "last_updated": "2024-08-01T13:50:36.854Z", + "last_updated_by": "unknown", + "value": "\"admin@tanf.com\"" + } +}, +{ + "model": "sentry.option", + "pk": 5, + "fields": { + "key": "mail.port", + "last_updated": "2024-08-01T13:50:36.860Z", + "last_updated_by": "unknown", + "value": 25 + } +}, +{ + "model": "sentry.option", + "pk": 6, + "fields": { + "key": "mail.username", + "last_updated": "2024-08-01T13:50:36.866Z", + "last_updated_by": "unknown", + "value": "\"\"" + } +}, +{ + "model": "sentry.option", + "pk": 7, + "fields": { + "key": "mail.password", + "last_updated": "2024-08-01T13:50:36.870Z", + "last_updated_by": "unknown", + "value": "\"\"" + } +}, +{ + "model": "sentry.option", + "pk": 8, + "fields": { + "key": "mail.use-tls", + "last_updated": "2024-08-01T13:50:36.873Z", + "last_updated_by": "unknown", + "value": false + } +}, +{ + "model": "sentry.option", + "pk": 9, + "fields": { + "key": "mail.use-ssl", + "last_updated": "2024-08-01T13:50:36.876Z", + "last_updated_by": "unknown", + "value": false + } +}, +{ + "model": "sentry.option", + "pk": 10, + "fields": { + "key": "auth.allow-registration", + "last_updated": "2024-08-01T13:50:36.883Z", + "last_updated_by": "unknown", + "value": false + } +}, +{ + "model": "sentry.option", + "pk": 11, + "fields": { + "key": "sentry:version-configured", + "last_updated": "2024-08-01T13:50:36.889Z", + "last_updated_by": "unknown", + "value": "\"23.7.0.dev0.dd25c26bcece07936bb6401f6fa9c89b96a2118e\"" + } +}, +{ + "model": "sentry.actor", + "pk": 1, + "fields": { + "type": 0, + "user_id": null, + "team": 1 + } +}, +{ + "model": "sentry.actor", + "pk": 2, + "fields": { + "type": 1, + "user_id": 1, + "team": null + } +}, +{ + "model": "sentry.email", + "pk": 1, + "fields": { + "email": "admin@tanf.com", + "date_added": "2024-08-01T13:46:16.066Z" + } +}, +{ + "model": "sentry.organization", + "pk": 1, + "fields": { + "name": "Sentry", + "slug": "sentry", + "status": 0, + "date_added": "2024-08-01T13:44:41.175Z", + "default_role": "member", + "is_test": false, + "flags": "1" + } +}, +{ + "model": "sentry.user", + "pk": 1, + "fields": { + "password": "pbkdf2_sha256$150000$hhBadj48lYdN$XnnczKcFZPnOXsw6KLgbOdg+9Ff8oIFCzKBFuLPh7M4=", + "last_login": "2024-08-01T13:50:33.020Z", + "username": "admin@tanf.com", + "name": "", + "email": "admin@tanf.com", + "is_staff": true, + "is_active": true, + "is_superuser": true, + "is_managed": false, + "is_sentry_app": null, + "is_password_expired": false, + "last_password_change": "2024-08-01T13:46:16.183Z", + "flags": "0", + "session_nonce": null, + "date_joined": "2024-08-01T13:46:16.058Z", + "last_active": "2024-08-01T13:51:16.376Z", + "avatar_type": 0, + "avatar_url": null + } +}, +{ + "model": "sentry.organizationmapping", + "pk": 1, + "fields": { + "organization_id": 1, + "slug": "sentry", + "name": "Sentry", + "date_created": "2024-08-01T13:44:41.239Z", + "customer_id": null, + "verified": false, + "idempotency_key": "", + "region_name": "--monolith--", + "status": 0 + } +}, +{ + "model": "sentry.relayusage", + "pk": 1, + "fields": { + "relay_id": "6d26be62-e8e3-4604-a148-656227d9769f", + "version": "23.6.1", + "first_seen": "2024-08-01T13:48:32.374Z", + "last_seen": "2024-08-01T13:48:32.374Z", + "public_key": "VGxPbAyvOjbRdVdaIF8PmuCq-0YCjRqT9Q0dKhxYg_A" + } +}, +{ + "model": "sentry.relay", + "pk": 1, + "fields": { + "relay_id": "6d26be62-e8e3-4604-a148-656227d9769f", + "public_key": "VGxPbAyvOjbRdVdaIF8PmuCq-0YCjRqT9Q0dKhxYg_A", + "first_seen": null, + "last_seen": null, + "is_internal": true + } +}, +{ + "model": "sentry.useremail", + "pk": 1, + "fields": { + "user": [ + "admin@tanf.com" + ], + "email": "admin@tanf.com", + "validation_hash": "PFGHXGxhV2oGjQI9tZDOLjx6Q1qZWtKN", + "date_hash_added": "2024-08-01T13:46:16.064Z", + "is_verified": false + } +}, +{ + "model": "sentry.userip", + "pk": 1, + "fields": { + "user": [ + "admin@tanf.com" + ], + "ip_address": "192.168.65.1", + "country_code": null, + "region_code": null, + "first_seen": "2024-08-01T13:49:08.204Z", + "last_seen": "2024-08-01T13:49:08.193Z" + } +}, +{ + "model": "sentry.userrole", + "pk": 1, + "fields": { + "date_updated": "2024-08-01T13:44:41.170Z", + "date_added": "2024-08-01T13:44:41.170Z", + "name": "Super Admin", + "permissions": "['broadcasts.admin', 'users.admin', 'options.admin']" + } +}, +{ + "model": "sentry.userroleuser", + "pk": 1, + "fields": { + "date_updated": "2024-08-01T13:46:16.196Z", + "date_added": "2024-08-01T13:46:16.196Z", + "user": [ + "admin@tanf.com" + ], + "role": 1 + } +}, +{ + "model": "sentry.team", + "pk": 1, + "fields": { + "organization": 1, + "slug": "sentry", + "name": "Sentry", + "status": 0, + "actor": 1, + "idp_provisioned": false, + "date_added": "2024-08-01T13:44:41.185Z", + "org_role": null + } +}, +{ + "model": "sentry.organizationmember", + "pk": 1, + "fields": { + "organization": 1, + "user_id": 1, + "email": null, + "role": "owner", + "flags": "0", + "token": null, + "date_added": "2024-08-01T13:46:16.073Z", + "token_expires_at": null, + "has_global_access": true, + "inviter_id": null, + "invite_status": 0, + "type": 50, + "user_is_active": true, + "user_email": "admin@tanf.com" + } +}, +{ + "model": "sentry.project", + "pk": 1, + "fields": { + "slug": "internal", + "name": "Internal", + "forced_color": null, + "organization": 1, + "public": false, + "date_added": "2024-08-01T13:44:41.191Z", + "status": 0, + "first_event": null, + "flags": "10", + "platform": null + } +}, +{ + "model": "sentry.project", + "pk": 2, + "fields": { + "slug": "python-django", + "name": "python-django", + "forced_color": null, + "organization": 1, + "public": false, + "date_added": "2024-08-01T13:50:58.893Z", + "status": 0, + "first_event": null, + "flags": "10", + "platform": "python-django" + } +}, +{ + "model": "sentry.projectkey", + "pk": 1, + "fields": { + "project": 1, + "label": "Default", + "public_key": "20835f66e30e4e19ac9c98c83bbd951f", + "secret_key": "50b61843dabe4b78886b6817421dc6a1", + "roles": "1", + "status": 0, + "date_added": "2024-08-01T13:44:41.205Z", + "rate_limit_count": null, + "rate_limit_window": null, + "data": { + "dynamicSdkLoaderOptions": { + "hasPerformance": true, + "hasReplay": true + } + } + } +}, +{ + "model": "sentry.projectkey", + "pk": 2, + "fields": { + "project": 2, + "label": "Default", + "public_key": "43ebf8abe1434ec6aea2c7b92c465a0e", + "secret_key": "c62d7709665848f88bbe09082e019f75", + "roles": "1", + "status": 0, + "date_added": "2024-08-01T13:50:59.103Z", + "rate_limit_count": null, + "rate_limit_window": null, + "data": { + "dynamicSdkLoaderOptions": { + "hasPerformance": true, + "hasReplay": true + } + } + } +}, +{ + "model": "sentry.rule", + "pk": 1, + "fields": { + "project": 1, + "environment_id": null, + "label": "Send a notification for new issues", + "data": "{\"match\":\"all\",\"conditions\":[{\"id\":\"sentry.rules.conditions.first_seen_event.FirstSeenEventCondition\"}],\"actions\":[{\"id\":\"sentry.mail.actions.NotifyEmailAction\",\"targetType\":\"IssueOwners\",\"targetIdentifier\":null,\"fallthroughType\":\"ActiveMembers\"}]}", + "status": 0, + "source": 0, + "owner": null, + "date_added": "2024-08-01T13:44:41.213Z" + } +}, +{ + "model": "sentry.rule", + "pk": 2, + "fields": { + "project": 2, + "environment_id": null, + "label": "Send a notification for new issues", + "data": "{\"match\":\"all\",\"conditions\":[{\"id\":\"sentry.rules.conditions.first_seen_event.FirstSeenEventCondition\"}],\"actions\":[{\"id\":\"sentry.mail.actions.NotifyEmailAction\",\"targetType\":\"IssueOwners\",\"targetIdentifier\":null,\"fallthroughType\":\"ActiveMembers\"}]}", + "status": 0, + "source": 0, + "owner": null, + "date_added": "2024-08-01T13:50:59.204Z" + } +}, +{ + "model": "sentry.projectteam", + "pk": 1, + "fields": { + "project": 1, + "team": 1 + } +}, +{ + "model": "sentry.projectteam", + "pk": 2, + "fields": { + "project": 2, + "team": 1 + } +}, +{ + "model": "sentry.organizationmemberteam", + "pk": 1, + "fields": { + "team": 1, + "organizationmember": 1, + "is_active": true, + "role": null + } +}, +{ + "model": "sentry.projectoption", + "pk": 1, + "fields": { + "project": 1, + "key": "sentry:relay-rev", + "value": "\"124b064568394513a93c1cf6b96fa531\"" + } +}, +{ + "model": "sentry.projectoption", + "pk": 2, + "fields": { + "project": 1, + "key": "sentry:relay-rev-lastchange", + "value": "\"2024-08-01T13:44:41.228498Z\"" + } +}, +{ + "model": "sentry.projectoption", + "pk": 3, + "fields": { + "project": 1, + "key": "sentry:option-epoch", + "value": 11 + } +}, +{ + "model": "sentry.projectoption", + "pk": 4, + "fields": { + "project": 1, + "key": "sentry:origins", + "value": "[\"*\"]" + } +}, +{ + "model": "sentry.projectoption", + "pk": 5, + "fields": { + "project": 2, + "key": "sentry:relay-rev", + "value": "\"c588a54b4537446c8ca91477867aeddd\"" + } +}, +{ + "model": "sentry.projectoption", + "pk": 6, + "fields": { + "project": 2, + "key": "sentry:relay-rev-lastchange", + "value": "\"2024-08-01T13:51:01.098125Z\"" + } +}, +{ + "model": "sentry.projectoption", + "pk": 7, + "fields": { + "project": 2, + "key": "sentry:option-epoch", + "value": 11 + } +}, +{ + "model": "sentry.projectoption", + "pk": 8, + "fields": { + "project": 2, + "key": "sentry:token", + "value": "\"164d36a4500d11ef938f0242ac130024\"" + } +} +] diff --git a/sentry/docker-compose.yml b/sentry/docker-compose.yml new file mode 100644 index 000000000..86ecb3615 --- /dev/null +++ b/sentry/docker-compose.yml @@ -0,0 +1,496 @@ +x-restart-policy: &restart_policy + restart: unless-stopped +x-depends_on-healthy: &depends_on-healthy + condition: service_healthy +x-depends_on-default: &depends_on-default + condition: service_started +x-healthcheck-defaults: &healthcheck_defaults + # Avoid setting the interval too small, as docker uses much more CPU than one would expect. + # Related issues: + # https://github.com/moby/moby/issues/39102 + # https://github.com/moby/moby/issues/39388 + # https://github.com/getsentry/self-hosted/issues/1000 + interval: "$HEALTHCHECK_INTERVAL" + timeout: "$HEALTHCHECK_TIMEOUT" + retries: $HEALTHCHECK_RETRIES + start_period: 10s +x-sentry-defaults: &sentry_defaults + <<: *restart_policy + image: sentry-self-hosted-local + # Set the platform to build for linux/arm64 when needed on Apple silicon Macs. + platform: ${DOCKER_PLATFORM:-} + build: + context: ./sentry + args: + - SENTRY_IMAGE + depends_on: + redis: + <<: *depends_on-healthy + kafka: + <<: *depends_on-healthy + postgres: + <<: *depends_on-healthy + memcached: + <<: *depends_on-default + smtp: + <<: *depends_on-default + snuba-api: + <<: *depends_on-default + snuba-consumer: + <<: *depends_on-default + snuba-outcomes-consumer: + <<: *depends_on-default + snuba-transactions-consumer: + <<: *depends_on-default + snuba-subscription-consumer-events: + <<: *depends_on-default + snuba-subscription-consumer-transactions: + <<: *depends_on-default + snuba-replacer: + <<: *depends_on-default + symbolicator: + <<: *depends_on-default + vroom: + <<: *depends_on-default + entrypoint: "/etc/sentry/entrypoint.sh" + command: ["run", "web"] + environment: + PYTHONUSERBASE: "/data/custom-packages" + SENTRY_CONF: "/etc/sentry" + SNUBA: "http://snuba-api:1218" + VROOM: "http://vroom:8085" + # Force everything to use the system CA bundle + # This is mostly needed to support installing custom CA certs + # This one is used by botocore + DEFAULT_CA_BUNDLE: &ca_bundle "/etc/ssl/certs/ca-certificates.crt" + # This one is used by requests + REQUESTS_CA_BUNDLE: *ca_bundle + # This one is used by grpc/google modules + GRPC_DEFAULT_SSL_ROOTS_FILE_PATH_ENV_VAR: *ca_bundle + # Leaving the value empty to just pass whatever is set + # on the host system (or in the .env file) + SENTRY_EVENT_RETENTION_DAYS: + SENTRY_MAIL_HOST: + SENTRY_MAX_EXTERNAL_SOURCEMAP_SIZE: + # Set this value if you plan on using the Suggested Fix Feature + OPENAI_API_KEY: + volumes: + - "sentry-data:/data" + - "./sentry:/etc/sentry" + - "./geoip:/geoip:ro" + - "./certificates:/usr/local/share/ca-certificates:ro" +x-snuba-defaults: &snuba_defaults + <<: *restart_policy + depends_on: + clickhouse: + <<: *depends_on-healthy + kafka: + <<: *depends_on-healthy + redis: + <<: *depends_on-healthy + image: "$SNUBA_IMAGE" + environment: + SNUBA_SETTINGS: self_hosted + CLICKHOUSE_HOST: clickhouse + DEFAULT_BROKERS: "kafka:9092" + REDIS_HOST: redis + UWSGI_MAX_REQUESTS: "10000" + UWSGI_DISABLE_LOGGING: "true" + # Leaving the value empty to just pass whatever is set + # on the host system (or in the .env file) + SENTRY_EVENT_RETENTION_DAYS: +services: + smtp: + <<: *restart_policy + platform: linux/amd64 + image: tianon/exim4 + hostname: "${SENTRY_MAIL_HOST:-}" + volumes: + - "sentry-smtp:/var/spool/exim4" + - "sentry-smtp-log:/var/log/exim4" + memcached: + <<: *restart_policy + image: "memcached:1.6.21-alpine" + command: ["-I", "${SENTRY_MAX_EXTERNAL_SOURCEMAP_SIZE:-1M}"] + healthcheck: + <<: *healthcheck_defaults + # From: https://stackoverflow.com/a/31877626/5155484 + test: echo stats | nc 127.0.0.1 11211 + redis: + <<: *restart_policy + image: "redis:6.2.13-alpine" + healthcheck: + <<: *healthcheck_defaults + test: redis-cli ping + volumes: + - "sentry-redis:/data" + ulimits: + nofile: + soft: 10032 + hard: 10032 + postgres: + <<: *restart_policy + # Using the same postgres version as Sentry dev for consistency purposes + image: "postgres:14.5" + healthcheck: + <<: *healthcheck_defaults + # Using default user "postgres" from sentry/sentry.conf.example.py or value of POSTGRES_USER if provided + test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres}"] + command: + [ + "postgres", + "-c", + "wal_level=logical", + "-c", + "max_replication_slots=1", + "-c", + "max_wal_senders=1", + ] + environment: + POSTGRES_HOST_AUTH_METHOD: "trust" + entrypoint: /opt/sentry/postgres-entrypoint.sh + volumes: + - "sentry-postgres:/var/lib/postgresql/data" + - type: bind + read_only: true + source: ./postgres/ + target: /opt/sentry/ + zookeeper: + <<: *restart_policy + image: "confluentinc/cp-zookeeper:5.5.7" + environment: + ZOOKEEPER_CLIENT_PORT: "2181" + CONFLUENT_SUPPORT_METRICS_ENABLE: "false" + ZOOKEEPER_LOG4J_ROOT_LOGLEVEL: "WARN" + ZOOKEEPER_TOOLS_LOG4J_LOGLEVEL: "WARN" + KAFKA_OPTS: "-Dzookeeper.4lw.commands.whitelist=ruok" + ulimits: + nofile: + soft: 4096 + hard: 4096 + volumes: + - "sentry-zookeeper:/var/lib/zookeeper/data" + - "sentry-zookeeper-log:/var/lib/zookeeper/log" + - "sentry-secrets:/etc/zookeeper/secrets" + healthcheck: + <<: *healthcheck_defaults + test: + ["CMD-SHELL", 'echo "ruok" | nc -w 2 localhost 2181 | grep imok'] + kafka: + <<: *restart_policy + depends_on: + zookeeper: + <<: *depends_on-healthy + image: "confluentinc/cp-kafka:5.5.7" + environment: + KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181" + KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9092" + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: "1" + KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: "1" + KAFKA_LOG_RETENTION_HOURS: "24" + KAFKA_MESSAGE_MAX_BYTES: "50000000" #50MB or bust + KAFKA_MAX_REQUEST_SIZE: "50000000" #50MB on requests apparently too + KAFKA_HEAP_OPTS: "-Xmx500M -Xms500M" + CONFLUENT_SUPPORT_METRICS_ENABLE: "false" + KAFKA_LOG4J_LOGGERS: "kafka.cluster=WARN,kafka.controller=WARN,kafka.coordinator=WARN,kafka.log=WARN,kafka.server=WARN,kafka.zookeeper=WARN,state.change.logger=WARN" + KAFKA_LOG4J_ROOT_LOGLEVEL: "WARN" + KAFKA_TOOLS_LOG4J_LOGLEVEL: "WARN" + ulimits: + nofile: + soft: 4096 + hard: 4096 + volumes: + - "sentry-kafka:/var/lib/kafka/data" + - "sentry-kafka-log:/var/lib/kafka/log" + - "sentry-secrets:/etc/kafka/secrets" + healthcheck: + <<: *healthcheck_defaults + test: ["CMD-SHELL", "/usr/bin/kafka-topics --bootstrap-server kafka:9092 --list"] + interval: 10s + timeout: 10s + retries: 30 + clickhouse: + <<: *restart_policy + image: clickhouse-self-hosted-local + build: + context: ./clickhouse + args: + BASE_IMAGE: "${CLICKHOUSE_IMAGE:-}" + ulimits: + nofile: + soft: 262144 + hard: 262144 + volumes: + - "sentry-clickhouse:/var/lib/clickhouse" + - "sentry-clickhouse-log:/var/log/clickhouse-server" + - type: bind + read_only: true + source: ./clickhouse/config.xml + target: /etc/clickhouse-server/config.d/sentry.xml + environment: + # This limits Clickhouse's memory to 30% of the host memory + # If you have high volume and your search return incomplete results + # You might want to change this to a higher value (and ensure your host has enough memory) + MAX_MEMORY_USAGE_RATIO: 0.3 + healthcheck: + test: [ + "CMD-SHELL", + # Manually override any http_proxy envvar that might be set, because + # this wget does not support no_proxy. See: + # https://github.com/getsentry/self-hosted/issues/1537 + "http_proxy='' wget -nv -t1 --spider 'http://localhost:8123/' || exit 1", + ] + interval: 10s + timeout: 10s + retries: 30 + geoipupdate: + image: "ghcr.io/maxmind/geoipupdate:v6.0.0" + # Override the entrypoint in order to avoid using envvars for config. + # Futz with settings so we can keep mmdb and conf in same dir on host + # (image looks for them in separate dirs by default). + entrypoint: ["/usr/bin/geoipupdate", "-d", "/sentry", "-f", "/sentry/GeoIP.conf"] + volumes: + - "./geoip:/sentry" + snuba-api: + <<: *snuba_defaults + # Kafka consumer responsible for feeding events into Clickhouse + snuba-consumer: + <<: *snuba_defaults + command: consumer --storage errors --auto-offset-reset=latest --max-batch-time-ms 750 --no-strict-offset-reset + # Kafka consumer responsible for feeding outcomes into Clickhouse + # Use --auto-offset-reset=earliest to recover up to 7 days of TSDB data + # since we did not do a proper migration + snuba-outcomes-consumer: + <<: *snuba_defaults + command: consumer --storage outcomes_raw --auto-offset-reset=earliest --max-batch-time-ms 750 --no-strict-offset-reset + # Kafka consumer responsible for feeding transactions data into Clickhouse + snuba-transactions-consumer: + <<: *snuba_defaults + command: consumer --storage transactions --consumer-group transactions_group --auto-offset-reset=latest --max-batch-time-ms 750 --no-strict-offset-reset + snuba-replays-consumer: + <<: *snuba_defaults + command: consumer --storage replays --auto-offset-reset=latest --max-batch-time-ms 750 --no-strict-offset-reset + snuba-issue-occurrence-consumer: + <<: *snuba_defaults + command: consumer --storage search_issues --consumer-group generic_events_group --auto-offset-reset=latest --max-batch-time-ms 750 --no-strict-offset-reset + snuba-metrics-consumer: + <<: *snuba_defaults + command: consumer --storage metrics_raw --consumer-group snuba-metrics-consumers --auto-offset-reset=latest --max-batch-time-ms 750 --no-strict-offset-reset + snuba-generic-metrics-distributions-consumer: + <<: *snuba_defaults + command: consumer --storage generic_metrics_distributions_raw --consumer-group snuba-gen-metrics-distributions-consumers --auto-offset-reset=latest --max-batch-time-ms 750 --no-strict-offset-reset + snuba-generic-metrics-sets-consumer: + <<: *snuba_defaults + command: consumer --storage generic_metrics_sets_raw --consumer-group snuba-gen-metrics-sets-consumers --auto-offset-reset=latest --max-batch-time-ms 750 --no-strict-offset-reset + snuba-generic-metrics-counters-consumer: + <<: *snuba_defaults + command: consumer --storage generic_metrics_counters_raw --consumer-group snuba-gen-metrics-counters-consumers --auto-offset-reset=latest --max-batch-time-ms 750 --no-strict-offset-reset + snuba-replacer: + <<: *snuba_defaults + command: replacer --storage errors --auto-offset-reset=latest --no-strict-offset-reset + snuba-subscription-consumer-events: + <<: *snuba_defaults + command: subscriptions-scheduler-executor --dataset events --entity events --auto-offset-reset=latest --no-strict-offset-reset --consumer-group=snuba-events-subscriptions-consumers --followed-consumer-group=snuba-consumers --schedule-ttl=60 --stale-threshold-seconds=900 + snuba-subscription-consumer-transactions: + <<: *snuba_defaults + command: subscriptions-scheduler-executor --dataset transactions --entity transactions --auto-offset-reset=latest --no-strict-offset-reset --consumer-group=snuba-transactions-subscriptions-consumers --followed-consumer-group=transactions_group --schedule-ttl=60 --stale-threshold-seconds=900 + snuba-subscription-consumer-metrics: + <<: *snuba_defaults + command: subscriptions-scheduler-executor --dataset metrics --entity metrics_sets --entity metrics_counters --auto-offset-reset=latest --no-strict-offset-reset --consumer-group=snuba-metrics-subscriptions-consumers --followed-consumer-group=snuba-metrics-consumers --schedule-ttl=60 --stale-threshold-seconds=900 + snuba-profiling-profiles-consumer: + <<: *snuba_defaults + command: consumer --storage profiles --auto-offset-reset=latest --max-batch-time-ms 1000 --no-strict-offset-reset + snuba-profiling-functions-consumer: + <<: *snuba_defaults + command: consumer --storage functions_raw --auto-offset-reset=latest --max-batch-time-ms 1000 --no-strict-offset-reset + symbolicator: + <<: *restart_policy + image: "$SYMBOLICATOR_IMAGE" + volumes: + - "sentry-symbolicator:/data" + - type: bind + read_only: true + source: ./symbolicator + target: /etc/symbolicator + command: run -c /etc/symbolicator/config.yml + symbolicator-cleanup: + <<: *restart_policy + image: symbolicator-cleanup-self-hosted-local + build: + context: ./cron + args: + BASE_IMAGE: "$SYMBOLICATOR_IMAGE" + command: '"55 23 * * * gosu symbolicator symbolicator cleanup"' + volumes: + - "sentry-symbolicator:/data" + web: + <<: *sentry_defaults + ulimits: + nofile: + soft: 4096 + hard: 4096 + healthcheck: + <<: *healthcheck_defaults + test: + - "CMD" + - "/bin/bash" + - "-c" + # Courtesy of https://unix.stackexchange.com/a/234089/108960 + - 'exec 3<>/dev/tcp/127.0.0.1/9000 && echo -e "GET /_health/ HTTP/1.1\r\nhost: 127.0.0.1\r\n\r\n" >&3 && grep ok -s -m 1 <&3' + cron: + <<: *sentry_defaults + command: run cron + worker: + <<: *sentry_defaults + command: run worker + events-consumer: + <<: *sentry_defaults + command: run consumer ingest-events --consumer-group ingest-consumer + attachments-consumer: + <<: *sentry_defaults + command: run consumer ingest-attachments --consumer-group ingest-consumer + transactions-consumer: + <<: *sentry_defaults + command: run consumer ingest-transactions --consumer-group ingest-consumer + metrics-consumer: + <<: *sentry_defaults + command: run consumer ingest-metrics --consumer-group metrics-consumer + generic-metrics-consumer: + <<: *sentry_defaults + command: run consumer ingest-generic-metrics --consumer-group generic-metrics-consumer + billing-metrics-consumer: + <<: *sentry_defaults + command: run consumer billing-metrics-consumer --consumer-group billing-metrics-consumer + ingest-replay-recordings: + <<: *sentry_defaults + command: run consumer ingest-replay-recordings --consumer-group ingest-replay-recordings + ingest-occurrences: + <<: *sentry_defaults + command: run consumer ingest-occurrences --consumer-group ingest-occurrences + ingest-profiles: + <<: *sentry_defaults + command: run consumer --no-strict-offset-reset ingest-profiles --consumer-group ingest-profiles + ingest-monitors: + <<: *sentry_defaults + command: run consumer --no-strict-offset-reset ingest-monitors --consumer-group ingest-monitors + post-process-forwarder-errors: + <<: *sentry_defaults + command: run consumer post-process-forwarder-errors --consumer-group post-process-forwarder --synchronize-commit-log-topic=snuba-commit-log --synchronize-commit-group=snuba-consumers + post-process-forwarder-transactions: + <<: *sentry_defaults + command: run consumer post-process-forwarder-transactions --consumer-group post-process-forwarder --synchronize-commit-log-topic=snuba-transactions-commit-log --synchronize-commit-group transactions_group + post-process-forwarder-issue-platform: + <<: *sentry_defaults + command: run consumer post-process-forwarder-issue-platform --consumer-group post-process-forwarder --synchronize-commit-log-topic=snuba-generic-events-commit-log --synchronize-commit-group generic_events_group + subscription-consumer-events: + <<: *sentry_defaults + command: run consumer events-subscription-results --consumer-group query-subscription-consumer + subscription-consumer-transactions: + <<: *sentry_defaults + command: run consumer transactions-subscription-results --consumer-group query-subscription-consumer + subscription-consumer-metrics: + <<: *sentry_defaults + command: run consumer metrics-subscription-results --consumer-group query-subscription-consumer + subscription-consumer-generic-metrics: + <<: *sentry_defaults + command: run consumer generic-metrics-subscription-results --consumer-group query-subscription-consumer + sentry-cleanup: + <<: *sentry_defaults + image: sentry-cleanup-self-hosted-local + build: + context: ./cron + args: + BASE_IMAGE: sentry-self-hosted-local + entrypoint: "/entrypoint.sh" + command: '"0 0 * * * gosu sentry sentry cleanup --days $SENTRY_EVENT_RETENTION_DAYS"' + nginx: + <<: *restart_policy + ports: + - "$SENTRY_BIND:80/tcp" + image: "nginx:1.25.2-alpine" + volumes: + - type: bind + read_only: true + source: ./nginx + target: /etc/nginx + - sentry-nginx-cache:/var/cache/nginx + depends_on: + - web + - relay + relay: + <<: *restart_policy + image: "$RELAY_IMAGE" + volumes: + - type: bind + read_only: true + source: ./relay + target: /work/.relay + - type: bind + read_only: true + source: ./geoip + target: /geoip + depends_on: + kafka: + <<: *depends_on-healthy + redis: + <<: *depends_on-healthy + web: + <<: *depends_on-healthy + vroom: + <<: *restart_policy + image: "$VROOM_IMAGE" + environment: + SENTRY_KAFKA_BROKERS_PROFILING: "kafka:9092" + SENTRY_KAFKA_BROKERS_OCCURRENCES: "kafka:9092" + SENTRY_BUCKET_PROFILES: file://localhost//var/lib/sentry-profiles + SENTRY_SNUBA_HOST: "http://snuba-api:1218" + volumes: + - sentry-vroom:/var/lib/sentry-profiles + depends_on: + kafka: + <<: *depends_on-healthy + vroom-cleanup: + <<: *restart_policy + image: vroom-cleanup-self-hosted-local + build: + context: ./cron + args: + BASE_IMAGE: "$VROOM_IMAGE" + entrypoint: "/entrypoint.sh" + environment: + # Leaving the value empty to just pass whatever is set + # on the host system (or in the .env file) + SENTRY_EVENT_RETENTION_DAYS: + command: '"0 0 * * * find /var/lib/sentry-profiles -type f -mtime +$SENTRY_EVENT_RETENTION_DAYS -delete"' + volumes: + - sentry-vroom:/var/lib/sentry-profiles + +volumes: + # These store application data that should persist across restarts. + sentry-data: + external: true + sentry-postgres: + external: true + sentry-redis: + external: true + sentry-zookeeper: + external: true + sentry-kafka: + external: true + sentry-clickhouse: + external: true + sentry-symbolicator: + external: true + # This volume stores profiles and should be persisted. + # Not being external will still persist data across restarts. + # It won't persist if someone does a docker compose down -v. + sentry-vroom: + # These store ephemeral data that needn't persist across restarts. + # That said, volumes will be persisted across restarts until they are deleted. + sentry-secrets: + sentry-smtp: + sentry-nginx-cache: + sentry-zookeeper-log: + sentry-kafka-log: + sentry-smtp-log: + sentry-clickhouse-log: diff --git a/tdrs-backend/Dockerfile b/tdrs-backend/Dockerfile index f09622854..e8233528d 100644 --- a/tdrs-backend/Dockerfile +++ b/tdrs-backend/Dockerfile @@ -15,12 +15,12 @@ RUN apt-get -y update # Upgrade already installed packages: RUN apt-get -y upgrade # Postgres client setup -RUN apt install -y postgresql-common curl ca-certificates && install -d /usr/share/postgresql-common/pgdg && \ +RUN apt --purge remove postgresql postgresql-* && apt install -y postgresql-common curl ca-certificates && install -d /usr/share/postgresql-common/pgdg && \ curl -o /usr/share/postgresql-common/pgdg/apt.postgresql.org.asc --fail https://www.postgresql.org/media/keys/ACCC4CF8.asc && \ sh -c 'echo "deb [signed-by=/usr/share/postgresql-common/pgdg/apt.postgresql.org.asc] https://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' && \ apt -y update && apt install postgresql-client-15 -y # Install packages: -RUN apt install -y gcc graphviz graphviz-dev libpq-dev python3-dev +RUN apt install -y gcc graphviz graphviz-dev libpq-dev python3-dev vim # Install pipenv RUN pip install --upgrade pip pipenv RUN pipenv install --dev --system --deploy diff --git a/tdrs-backend/Pipfile b/tdrs-backend/Pipfile index 51a998b7e..a1defabdb 100644 --- a/tdrs-backend/Pipfile +++ b/tdrs-backend/Pipfile @@ -26,13 +26,12 @@ boto3 = "==1.28.4" cryptography = "==3.4.7" dj-database-url = "==0.5.0" django = "==3.2.15" -django-admin-508 = "==0.2.2" +django-admin-508 = "==1.0.1" django-admin-logs = "==1.0.2" django-configurations = "==2.2" django-cors-headers = "==3.12.0" django-extensions = "==3.1.3" django-filter = "==21.1" -django-more-admin-filters = "==1.8" django-model-utils = "==4.1.1" django-storages = "==1.12.3" django-unique-upload = "==0.2.1" @@ -60,6 +59,7 @@ cerberus = "==1.3.4" xlsxwriter = "==3.1.9" openpyxl = "==3.1.2" sendgrid = "==6.10.0" +sentry-sdk = "==2.11.0" [requires] python_version = "3.10.8" diff --git a/tdrs-backend/Pipfile.lock b/tdrs-backend/Pipfile.lock index 0ca355085..9a2398138 100644 --- a/tdrs-backend/Pipfile.lock +++ b/tdrs-backend/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "2dd2adca467bcb7a6281923765737b5b0b52101a30efc80401e5552109874674" + "sha256": "902bde5efee2d67d08d56183d72faea8d701ed4c753c2ec2f64cb51f08f1846e" }, "pipfile-spec": 6, "requires": { @@ -83,69 +83,84 @@ }, "certifi": { "hashes": [ - "sha256:3cd43f1c6fa7dedc5899d69d3ad0398fd018ad1a17fba83ddaf78aa46c747516", - "sha256:ddc6c8ce995e6987e7faf5e3f1b02b302836a0e5d98ece18392cb1a36c72ad56" + "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8", + "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9" ], "markers": "python_version >= '3.6'", - "version": "==2024.6.2" + "version": "==2024.8.30" }, "cffi": { "hashes": [ - "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc", - "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a", - "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417", - "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab", - "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520", - "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36", - "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743", - "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8", - "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed", - "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684", - "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56", - "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324", - "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d", - "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235", - "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e", - "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088", - "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000", - "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7", - "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e", - "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673", - "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c", - "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe", - "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2", - "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098", - "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8", - "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a", - "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0", - "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b", - "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896", - "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e", - "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9", - "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2", - "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b", - "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6", - "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404", - "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f", - "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0", - "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4", - "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc", - "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936", - "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba", - "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872", - "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb", - "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614", - "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1", - "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d", - "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969", - "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b", - "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4", - "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627", - "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956", - "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357" + "sha256:011aff3524d578a9412c8b3cfaa50f2c0bd78e03eb7af7aa5e0df59b158efb2f", + "sha256:0a048d4f6630113e54bb4b77e315e1ba32a5a31512c31a273807d0027a7e69ab", + "sha256:0bb15e7acf8ab35ca8b24b90af52c8b391690ef5c4aec3d31f38f0d37d2cc499", + "sha256:0d46ee4764b88b91f16661a8befc6bfb24806d885e27436fdc292ed7e6f6d058", + "sha256:0e60821d312f99d3e1569202518dddf10ae547e799d75aef3bca3a2d9e8ee693", + "sha256:0fdacad9e0d9fc23e519efd5ea24a70348305e8d7d85ecbb1a5fa66dc834e7fb", + "sha256:14b9cbc8f7ac98a739558eb86fabc283d4d564dafed50216e7f7ee62d0d25377", + "sha256:17c6d6d3260c7f2d94f657e6872591fe8733872a86ed1345bda872cfc8c74885", + "sha256:1a2ddbac59dc3716bc79f27906c010406155031a1c801410f1bafff17ea304d2", + "sha256:2404f3de742f47cb62d023f0ba7c5a916c9c653d5b368cc966382ae4e57da401", + "sha256:24658baf6224d8f280e827f0a50c46ad819ec8ba380a42448e24459daf809cf4", + "sha256:24aa705a5f5bd3a8bcfa4d123f03413de5d86e497435693b638cbffb7d5d8a1b", + "sha256:2770bb0d5e3cc0e31e7318db06efcbcdb7b31bcb1a70086d3177692a02256f59", + "sha256:331ad15c39c9fe9186ceaf87203a9ecf5ae0ba2538c9e898e3a6967e8ad3db6f", + "sha256:3aa9d43b02a0c681f0bfbc12d476d47b2b2b6a3f9287f11ee42989a268a1833c", + "sha256:41f4915e09218744d8bae14759f983e466ab69b178de38066f7579892ff2a555", + "sha256:4304d4416ff032ed50ad6bb87416d802e67139e31c0bde4628f36a47a3164bfa", + "sha256:435a22d00ec7d7ea533db494da8581b05977f9c37338c80bc86314bec2619424", + "sha256:45f7cd36186db767d803b1473b3c659d57a23b5fa491ad83c6d40f2af58e4dbb", + "sha256:48b389b1fd5144603d61d752afd7167dfd205973a43151ae5045b35793232aa2", + "sha256:4e67d26532bfd8b7f7c05d5a766d6f437b362c1bf203a3a5ce3593a645e870b8", + "sha256:516a405f174fd3b88829eabfe4bb296ac602d6a0f68e0d64d5ac9456194a5b7e", + "sha256:5ba5c243f4004c750836f81606a9fcb7841f8874ad8f3bf204ff5e56332b72b9", + "sha256:5bdc0f1f610d067c70aa3737ed06e2726fd9d6f7bfee4a351f4c40b6831f4e82", + "sha256:6107e445faf057c118d5050560695e46d272e5301feffda3c41849641222a828", + "sha256:6327b572f5770293fc062a7ec04160e89741e8552bf1c358d1a23eba68166759", + "sha256:669b29a9eca6146465cc574659058ed949748f0809a2582d1f1a324eb91054dc", + "sha256:6ce01337d23884b21c03869d2f68c5523d43174d4fc405490eb0091057943118", + "sha256:6d872186c1617d143969defeadac5a904e6e374183e07977eedef9c07c8953bf", + "sha256:6f76a90c345796c01d85e6332e81cab6d70de83b829cf1d9762d0a3da59c7932", + "sha256:70d2aa9fb00cf52034feac4b913181a6e10356019b18ef89bc7c12a283bf5f5a", + "sha256:7cbc78dc018596315d4e7841c8c3a7ae31cc4d638c9b627f87d52e8abaaf2d29", + "sha256:856bf0924d24e7f93b8aee12a3a1095c34085600aa805693fb7f5d1962393206", + "sha256:8a98748ed1a1df4ee1d6f927e151ed6c1a09d5ec21684de879c7ea6aa96f58f2", + "sha256:93a7350f6706b31f457c1457d3a3259ff9071a66f312ae64dc024f049055f72c", + "sha256:964823b2fc77b55355999ade496c54dde161c621cb1f6eac61dc30ed1b63cd4c", + "sha256:a003ac9edc22d99ae1286b0875c460351f4e101f8c9d9d2576e78d7e048f64e0", + "sha256:a0ce71725cacc9ebf839630772b07eeec220cbb5f03be1399e0457a1464f8e1a", + "sha256:a47eef975d2b8b721775a0fa286f50eab535b9d56c70a6e62842134cf7841195", + "sha256:a8b5b9712783415695663bd463990e2f00c6750562e6ad1d28e072a611c5f2a6", + "sha256:a9015f5b8af1bb6837a3fcb0cdf3b874fe3385ff6274e8b7925d81ccaec3c5c9", + "sha256:aec510255ce690d240f7cb23d7114f6b351c733a74c279a84def763660a2c3bc", + "sha256:b00e7bcd71caa0282cbe3c90966f738e2db91e64092a877c3ff7f19a1628fdcb", + "sha256:b50aaac7d05c2c26dfd50c3321199f019ba76bb650e346a6ef3616306eed67b0", + "sha256:b7b6ea9e36d32582cda3465f54c4b454f62f23cb083ebc7a94e2ca6ef011c3a7", + "sha256:bb9333f58fc3a2296fb1d54576138d4cf5d496a2cc118422bd77835e6ae0b9cb", + "sha256:c1c13185b90bbd3f8b5963cd8ce7ad4ff441924c31e23c975cb150e27c2bf67a", + "sha256:c3b8bd3133cd50f6b637bb4322822c94c5ce4bf0d724ed5ae70afce62187c492", + "sha256:c5d97162c196ce54af6700949ddf9409e9833ef1003b4741c2b39ef46f1d9720", + "sha256:c815270206f983309915a6844fe994b2fa47e5d05c4c4cef267c3b30e34dbe42", + "sha256:cab2eba3830bf4f6d91e2d6718e0e1c14a2f5ad1af68a89d24ace0c6b17cced7", + "sha256:d1df34588123fcc88c872f5acb6f74ae59e9d182a2707097f9e28275ec26a12d", + "sha256:d6bdcd415ba87846fd317bee0774e412e8792832e7805938987e4ede1d13046d", + "sha256:db9a30ec064129d605d0f1aedc93e00894b9334ec74ba9c6bdd08147434b33eb", + "sha256:dbc183e7bef690c9abe5ea67b7b60fdbca81aa8da43468287dae7b5c046107d4", + "sha256:dca802c8db0720ce1c49cce1149ff7b06e91ba15fa84b1d59144fef1a1bc7ac2", + "sha256:dec6b307ce928e8e112a6bb9921a1cb00a0e14979bf28b98e084a4b8a742bd9b", + "sha256:df8bb0010fdd0a743b7542589223a2816bdde4d94bb5ad67884348fa2c1c67e8", + "sha256:e4094c7b464cf0a858e75cd14b03509e84789abf7b79f8537e6a72152109c76e", + "sha256:e4760a68cab57bfaa628938e9c2971137e05ce48e762a9cb53b76c9b569f1204", + "sha256:eb09b82377233b902d4c3fbeeb7ad731cdab579c6c6fda1f763cd779139e47c3", + "sha256:eb862356ee9391dc5a0b3cbc00f416b48c1b9a52d252d898e5b7696a5f9fe150", + "sha256:ef9528915df81b8f4c7612b19b8628214c65c9b7f74db2e34a646a0a2a0da2d4", + "sha256:f3157624b7558b914cb039fd1af735e5e8049a87c817cc215109ad1c8779df76", + "sha256:f3e0992f23bbb0be00a921eae5363329253c3b86287db27092461c887b791e5e", + "sha256:f9338cc05451f1942d0d8203ec2c346c830f8e86469903d5126c1f0a13a2bcbb", + "sha256:ffef8fd58a36fb5f1196919638f73dd3ae0db1a878982b27a9a5a176ede4ba91" ], "markers": "python_version >= '3.8'", - "version": "==1.16.0" + "version": "==1.17.0" }, "charset-normalizer": { "hashes": [ @@ -256,11 +271,11 @@ }, "django-admin-508": { "hashes": [ - "sha256:6488ce76cbccecb1667ee21d49e87a259d43f7a619b18e7035c9e6bdf1c79bb3", - "sha256:fd7ed03e27efaa5b33aa47c4d82ae540a7c42957504061854fc76c046bca8607" + "sha256:419d017eab16c264b771c8c7ef1815c1c181cf4a1603b7e45cf78a3bbecb1d4a", + "sha256:fbc7bb8bc37f4c2089efceda9818a97898881ab80273919248f85cd3d6f01215" ], "index": "pypi", - "version": "==0.2.2" + "version": "==1.0.1" }, "django-admin-logs": { "hashes": [ @@ -366,14 +381,6 @@ "index": "pypi", "version": "==4.1.1" }, - "django-more-admin-filters": { - "hashes": [ - "sha256:2d5dd9e8b55d85638d5e260dfb694b1903288b61c37e655b9443b70a5f36833f", - "sha256:fc4d3a3bf0367763a887dceca4b469e467ad062a9e8da1c29b6d6137c5b0e3cd" - ], - "index": "pypi", - "version": "==1.8" - }, "django-nine": { "hashes": [ "sha256:304e0f83cea5a35359375fc919d00f9917b655c1d388244cbfc7363f59489177", @@ -451,19 +458,19 @@ }, "exceptiongroup": { "hashes": [ - "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad", - "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16" + "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b", + "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc" ], "markers": "python_version < '3.11'", - "version": "==1.2.1" + "version": "==1.2.2" }, "executing": { "hashes": [ - "sha256:35afe2ce3affba8ee97f2d69927fa823b08b472b7b994e36a52a964b93d16147", - "sha256:eac49ca94516ccc753f9fb5ce82603156e590b27525a8bc32cce8ae302eb61bc" + "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf", + "sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab" ], - "markers": "python_version >= '3.5'", - "version": "==2.0.1" + "markers": "python_version >= '3.8'", + "version": "==2.1.0" }, "flower": { "hashes": [ @@ -484,19 +491,19 @@ }, "humanize": { "hashes": [ - "sha256:582a265c931c683a7e9b8ed9559089dea7edcf6cc95be39a3cbc2c5d5ac2bcfa", - "sha256:ce284a76d5b1377fd8836733b983bfb0b76f1aa1c090de2566fcf008d7f6ab16" + "sha256:06b6eb0293e4b85e8d385397c5868926820db32b9b654b932f57fa41c23c9978", + "sha256:39e7ccb96923e732b5c2e27aeaa3b10a8dfeeba3eb965ba7b74a3eb0e30040a6" ], "markers": "python_version >= '3.8'", - "version": "==4.9.0" + "version": "==4.10.0" }, "idna": { "hashes": [ - "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc", - "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0" + "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac", + "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603" ], "markers": "python_version >= '3'", - "version": "==3.7" + "version": "==3.8" }, "inflection": { "hashes": [ @@ -516,11 +523,11 @@ }, "ipython": { "hashes": [ - "sha256:53eee7ad44df903a06655871cbab66d156a051fd86f3ec6750470ac9604ac1ab", - "sha256:c6ed726a140b6e725b911528f80439c534fac915246af3efc39440a6b0f9d716" + "sha256:0b99a2dc9f15fd68692e898e5568725c6d49c527d36a9fb5960ffbdeaa82ff7e", + "sha256:f68b3cb8bde357a5d7adc9598d57e22a45dfbea19eb6b98286fa3b288c9cd55c" ], "markers": "python_version >= '3.7'", - "version": "==8.25.0" + "version": "==8.27.0" }, "itypes": { "hashes": [ @@ -563,11 +570,11 @@ }, "kombu": { "hashes": [ - "sha256:011c4cd9a355c14a1de8d35d257314a1d2456d52b7140388561acac3cf1a97bf", - "sha256:5634c511926309c7f9789f1433e9ed402616b56836ef9878f01bd59267b4c7a9" + "sha256:ad200a8dbdaaa2bbc5f26d2ee7d707d9a1fded353a0f4bd751ce8c7d9f449c60", + "sha256:c8dd99820467610b4febbc7a9e8a0d3d7da2d35116b67184418b51cc520ea6b6" ], "markers": "python_version >= '3.8'", - "version": "==5.3.7" + "version": "==5.4.0" }, "markdown": { "hashes": [ @@ -687,78 +694,89 @@ }, "pillow": { "hashes": [ - "sha256:048ad577748b9fa4a99a0548c64f2cb8d672d5bf2e643a739ac8faff1164238c", - "sha256:048eeade4c33fdf7e08da40ef402e748df113fd0b4584e32c4af74fe78baaeb2", - "sha256:0ba26351b137ca4e0db0342d5d00d2e355eb29372c05afd544ebf47c0956ffeb", - "sha256:0ea2a783a2bdf2a561808fe4a7a12e9aa3799b701ba305de596bc48b8bdfce9d", - "sha256:1530e8f3a4b965eb6a7785cf17a426c779333eb62c9a7d1bbcf3ffd5bf77a4aa", - "sha256:16563993329b79513f59142a6b02055e10514c1a8e86dca8b48a893e33cf91e3", - "sha256:19aeb96d43902f0a783946a0a87dbdad5c84c936025b8419da0a0cd7724356b1", - "sha256:1a1d1915db1a4fdb2754b9de292642a39a7fb28f1736699527bb649484fb966a", - "sha256:1b87bd9d81d179bd8ab871603bd80d8645729939f90b71e62914e816a76fc6bd", - "sha256:1dfc94946bc60ea375cc39cff0b8da6c7e5f8fcdc1d946beb8da5c216156ddd8", - "sha256:2034f6759a722da3a3dbd91a81148cf884e91d1b747992ca288ab88c1de15999", - "sha256:261ddb7ca91fcf71757979534fb4c128448b5b4c55cb6152d280312062f69599", - "sha256:2ed854e716a89b1afcedea551cd85f2eb2a807613752ab997b9974aaa0d56936", - "sha256:3102045a10945173d38336f6e71a8dc71bcaeed55c3123ad4af82c52807b9375", - "sha256:339894035d0ede518b16073bdc2feef4c991ee991a29774b33e515f1d308e08d", - "sha256:412444afb8c4c7a6cc11a47dade32982439925537e483be7c0ae0cf96c4f6a0b", - "sha256:4203efca580f0dd6f882ca211f923168548f7ba334c189e9eab1178ab840bf60", - "sha256:45ebc7b45406febf07fef35d856f0293a92e7417ae7933207e90bf9090b70572", - "sha256:4b5ec25d8b17217d635f8935dbc1b9aa5907962fae29dff220f2659487891cd3", - "sha256:4c8e73e99da7db1b4cad7f8d682cf6abad7844da39834c288fbfa394a47bbced", - "sha256:4e6f7d1c414191c1199f8996d3f2282b9ebea0945693fb67392c75a3a320941f", - "sha256:4eaa22f0d22b1a7e93ff0a596d57fdede2e550aecffb5a1ef1106aaece48e96b", - "sha256:50b8eae8f7334ec826d6eeffaeeb00e36b5e24aa0b9df322c247539714c6df19", - "sha256:50fd3f6b26e3441ae07b7c979309638b72abc1a25da31a81a7fbd9495713ef4f", - "sha256:51243f1ed5161b9945011a7360e997729776f6e5d7005ba0c6879267d4c5139d", - "sha256:5d512aafa1d32efa014fa041d38868fda85028e3f930a96f85d49c7d8ddc0383", - "sha256:5f77cf66e96ae734717d341c145c5949c63180842a545c47a0ce7ae52ca83795", - "sha256:6b02471b72526ab8a18c39cb7967b72d194ec53c1fd0a70b050565a0f366d355", - "sha256:6fb1b30043271ec92dc65f6d9f0b7a830c210b8a96423074b15c7bc999975f57", - "sha256:7161ec49ef0800947dc5570f86568a7bb36fa97dd09e9827dc02b718c5643f09", - "sha256:72d622d262e463dfb7595202d229f5f3ab4b852289a1cd09650362db23b9eb0b", - "sha256:74d28c17412d9caa1066f7a31df8403ec23d5268ba46cd0ad2c50fb82ae40462", - "sha256:78618cdbccaa74d3f88d0ad6cb8ac3007f1a6fa5c6f19af64b55ca170bfa1edf", - "sha256:793b4e24db2e8742ca6423d3fde8396db336698c55cd34b660663ee9e45ed37f", - "sha256:798232c92e7665fe82ac085f9d8e8ca98826f8e27859d9a96b41d519ecd2e49a", - "sha256:81d09caa7b27ef4e61cb7d8fbf1714f5aec1c6b6c5270ee53504981e6e9121ad", - "sha256:8ab74c06ffdab957d7670c2a5a6e1a70181cd10b727cd788c4dd9005b6a8acd9", - "sha256:8eb0908e954d093b02a543dc963984d6e99ad2b5e36503d8a0aaf040505f747d", - "sha256:90b9e29824800e90c84e4022dd5cc16eb2d9605ee13f05d47641eb183cd73d45", - "sha256:9797a6c8fe16f25749b371c02e2ade0efb51155e767a971c61734b1bf6293994", - "sha256:9d2455fbf44c914840c793e89aa82d0e1763a14253a000743719ae5946814b2d", - "sha256:9d3bea1c75f8c53ee4d505c3e67d8c158ad4df0d83170605b50b64025917f338", - "sha256:9e2ec1e921fd07c7cda7962bad283acc2f2a9ccc1b971ee4b216b75fad6f0463", - "sha256:9e91179a242bbc99be65e139e30690e081fe6cb91a8e77faf4c409653de39451", - "sha256:a0eaa93d054751ee9964afa21c06247779b90440ca41d184aeb5d410f20ff591", - "sha256:a2c405445c79c3f5a124573a051062300936b0281fee57637e706453e452746c", - "sha256:aa7e402ce11f0885305bfb6afb3434b3cd8f53b563ac065452d9d5654c7b86fd", - "sha256:aff76a55a8aa8364d25400a210a65ff59d0168e0b4285ba6bf2bd83cf675ba32", - "sha256:b09b86b27a064c9624d0a6c54da01c1beaf5b6cadfa609cf63789b1d08a797b9", - "sha256:b14f16f94cbc61215115b9b1236f9c18403c15dd3c52cf629072afa9d54c1cbf", - "sha256:b50811d664d392f02f7761621303eba9d1b056fb1868c8cdf4231279645c25f5", - "sha256:b7bc2176354defba3edc2b9a777744462da2f8e921fbaf61e52acb95bafa9828", - "sha256:c78e1b00a87ce43bb37642c0812315b411e856a905d58d597750eb79802aaaa3", - "sha256:c83341b89884e2b2e55886e8fbbf37c3fa5efd6c8907124aeb72f285ae5696e5", - "sha256:ca2870d5d10d8726a27396d3ca4cf7976cec0f3cb706debe88e3a5bd4610f7d2", - "sha256:ccce24b7ad89adb5a1e34a6ba96ac2530046763912806ad4c247356a8f33a67b", - "sha256:cd5e14fbf22a87321b24c88669aad3a51ec052eb145315b3da3b7e3cc105b9a2", - "sha256:ce49c67f4ea0609933d01c0731b34b8695a7a748d6c8d186f95e7d085d2fe475", - "sha256:d33891be6df59d93df4d846640f0e46f1a807339f09e79a8040bc887bdcd7ed3", - "sha256:d3b2348a78bc939b4fed6552abfd2e7988e0f81443ef3911a4b8498ca084f6eb", - "sha256:d886f5d353333b4771d21267c7ecc75b710f1a73d72d03ca06df49b09015a9ef", - "sha256:d93480005693d247f8346bc8ee28c72a2191bdf1f6b5db469c096c0c867ac015", - "sha256:dc1a390a82755a8c26c9964d457d4c9cbec5405896cba94cf51f36ea0d855002", - "sha256:dd78700f5788ae180b5ee8902c6aea5a5726bac7c364b202b4b3e3ba2d293170", - "sha256:e46f38133e5a060d46bd630faa4d9fa0202377495df1f068a8299fd78c84de84", - "sha256:e4b878386c4bf293578b48fc570b84ecfe477d3b77ba39a6e87150af77f40c57", - "sha256:f0d0591a0aeaefdaf9a5e545e7485f89910c977087e7de2b6c388aec32011e9f", - "sha256:fdcbb4068117dfd9ce0138d068ac512843c52295ed996ae6dd1faf537b6dbc27", - "sha256:ff61bfd9253c3915e6d41c651d5f962da23eda633cf02262990094a18a55371a" + "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885", + "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea", + "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df", + "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5", + "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c", + "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d", + "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd", + "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06", + "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908", + "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a", + "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be", + "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0", + "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b", + "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80", + "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a", + "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e", + "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9", + "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696", + "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b", + "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309", + "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e", + "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab", + "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d", + "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060", + "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d", + "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d", + "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4", + "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3", + "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6", + "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb", + "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94", + "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b", + "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496", + "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0", + "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319", + "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b", + "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856", + "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef", + "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680", + "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b", + "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42", + "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e", + "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597", + "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a", + "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8", + "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3", + "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736", + "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da", + "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126", + "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd", + "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5", + "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b", + "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026", + "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b", + "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc", + "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46", + "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2", + "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c", + "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe", + "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984", + "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a", + "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70", + "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca", + "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b", + "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91", + "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3", + "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84", + "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1", + "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5", + "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be", + "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f", + "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc", + "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9", + "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e", + "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141", + "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef", + "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22", + "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27", + "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e", + "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1" ], "markers": "python_version >= '3.8'", - "version": "==10.3.0" + "version": "==10.4.0" }, "prometheus-client": { "hashes": [ @@ -805,10 +823,10 @@ }, "pure-eval": { "hashes": [ - "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350", - "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3" + "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", + "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42" ], - "version": "==0.2.2" + "version": "==0.2.3" }, "pycparser": { "hashes": [ @@ -837,9 +855,10 @@ }, "python-crontab": { "hashes": [ - "sha256:f4ea1605d24533b67fa7a634ef26cb59a5f2e7954f6e677d2d7a2229959a2fc8" + "sha256:40067d1dd39ade3460b2ad8557c7651514cd3851deffff61c5c60e1227c5c36b", + "sha256:82cb9b6a312d41ff66fd3caf3eed7115c28c195bfb50711bc2b4b9592feb9fe5" ], - "version": "==3.1.0" + "version": "==3.2.0" }, "python-dateutil": { "hashes": [ @@ -980,13 +999,22 @@ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==6.10.0" }, + "sentry-sdk": { + "hashes": [ + "sha256:4ca16e9f5c7c6bc2fb2d5c956219f4926b148e511fffdbbde711dc94f1e0468f", + "sha256:d964710e2dbe015d9dc4ff0ad16225d68c3b36936b742a6fe0504565b760a3b7" + ], + "index": "pypi", + "markers": "python_version >= '3.6'", + "version": "==2.11.0" + }, "setuptools": { "hashes": [ - "sha256:01a1e793faa5bd89abc851fa15d0a0db26f160890c7102cd8dce643e886b47f5", - "sha256:d9b8b771455a97c8a9f3ab3448ebe0b29b5e105f1228bba41028be116985a267" + "sha256:bea195a800f510ba3a2bc65645c88b7e016fe36709fefc58a880c4ae8a0138d7", + "sha256:cee604bd76cc092355a4e43ec17aee5369095974f41f088676724dc6bc2c9ef8" ], "markers": "python_version >= '3.8'", - "version": "==70.1.0" + "version": "==74.1.0" }, "six": { "hashes": [ @@ -998,11 +1026,11 @@ }, "sqlparse": { "hashes": [ - "sha256:714d0a4932c059d16189f58ef5411ec2287a4360f17cdd0edd2d09d4c5087c93", - "sha256:c204494cd97479d0e39f28c93d46c0b2d5959c7b9ab904762ea6c7af211c8663" + "sha256:773dcbf9a5ab44a090f3441e2180efe2560220203dc2f8c0b0fa141e18b505e4", + "sha256:bb6b4df465655ef332548e24f08e205afc81b9ab86cb1c45657a7ff173a3a00e" ], "markers": "python_version >= '3.8'", - "version": "==0.5.0" + "version": "==0.5.1" }, "stack-data": { "hashes": [ @@ -1068,11 +1096,11 @@ }, "urllib3": { "hashes": [ - "sha256:37a0344459b199fce0e80b0d3569837ec6b6937435c5244e7fd73fa6006830f3", - "sha256:3e3d753a8618b86d7de333b4223005f68720bcd6a7d2bcb9fbd2229ec7c1e429" + "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e", + "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", - "version": "==1.26.19" + "version": "==1.26.20" }, "vine": { "hashes": [ @@ -1240,61 +1268,81 @@ "toml" ], "hashes": [ - "sha256:015eddc5ccd5364dcb902eaecf9515636806fa1e0d5bef5769d06d0f31b54523", - "sha256:04aefca5190d1dc7a53a4c1a5a7f8568811306d7a8ee231c42fb69215571944f", - "sha256:05ac5f60faa0c704c0f7e6a5cbfd6f02101ed05e0aee4d2822637a9e672c998d", - "sha256:0bbddc54bbacfc09b3edaec644d4ac90c08ee8ed4844b0f86227dcda2d428fcb", - "sha256:1d2a830ade66d3563bb61d1e3c77c8def97b30ed91e166c67d0632c018f380f0", - "sha256:239a4e75e09c2b12ea478d28815acf83334d32e722e7433471fbf641c606344c", - "sha256:244f509f126dc71369393ce5fea17c0592c40ee44e607b6d855e9c4ac57aac98", - "sha256:25a5caf742c6195e08002d3b6c2dd6947e50efc5fc2c2205f61ecb47592d2d83", - "sha256:296a7d9bbc598e8744c00f7a6cecf1da9b30ae9ad51c566291ff1314e6cbbed8", - "sha256:2e079c9ec772fedbade9d7ebc36202a1d9ef7291bc9b3a024ca395c4d52853d7", - "sha256:33ca90a0eb29225f195e30684ba4a6db05dbef03c2ccd50b9077714c48153cac", - "sha256:33fc65740267222fc02975c061eb7167185fef4cc8f2770267ee8bf7d6a42f84", - "sha256:341dd8f61c26337c37988345ca5c8ccabeff33093a26953a1ac72e7d0103c4fb", - "sha256:34d6d21d8795a97b14d503dcaf74226ae51eb1f2bd41015d3ef332a24d0a17b3", - "sha256:3538d8fb1ee9bdd2e2692b3b18c22bb1c19ffbefd06880f5ac496e42d7bb3884", - "sha256:38a3b98dae8a7c9057bd91fbf3415c05e700a5114c5f1b5b0ea5f8f429ba6614", - "sha256:3d5a67f0da401e105753d474369ab034c7bae51a4c31c77d94030d59e41df5bd", - "sha256:50084d3516aa263791198913a17354bd1dc627d3c1639209640b9cac3fef5807", - "sha256:55f689f846661e3f26efa535071775d0483388a1ccfab899df72924805e9e7cd", - "sha256:5bc5a8c87714b0c67cfeb4c7caa82b2d71e8864d1a46aa990b5588fa953673b8", - "sha256:62bda40da1e68898186f274f832ef3e759ce929da9a9fd9fcf265956de269dbc", - "sha256:705f3d7c2b098c40f5b81790a5fedb274113373d4d1a69e65f8b68b0cc26f6db", - "sha256:75e3f4e86804023e991096b29e147e635f5e2568f77883a1e6eed74512659ab0", - "sha256:7b2a19e13dfb5c8e145c7a6ea959485ee8e2204699903c88c7d25283584bfc08", - "sha256:7cec2af81f9e7569280822be68bd57e51b86d42e59ea30d10ebdbb22d2cb7232", - "sha256:8383a6c8cefba1b7cecc0149415046b6fc38836295bc4c84e820872eb5478b3d", - "sha256:8c836309931839cca658a78a888dab9676b5c988d0dd34ca247f5f3e679f4e7a", - "sha256:8e317953bb4c074c06c798a11dbdd2cf9979dbcaa8ccc0fa4701d80042d4ebf1", - "sha256:923b7b1c717bd0f0f92d862d1ff51d9b2b55dbbd133e05680204465f454bb286", - "sha256:990fb20b32990b2ce2c5f974c3e738c9358b2735bc05075d50a6f36721b8f303", - "sha256:9aad68c3f2566dfae84bf46295a79e79d904e1c21ccfc66de88cd446f8686341", - "sha256:a5812840d1d00eafae6585aba38021f90a705a25b8216ec7f66aebe5b619fb84", - "sha256:a6519d917abb15e12380406d721e37613e2a67d166f9fb7e5a8ce0375744cd45", - "sha256:ab0b028165eea880af12f66086694768f2c3139b2c31ad5e032c8edbafca6ffc", - "sha256:aea7da970f1feccf48be7335f8b2ca64baf9b589d79e05b9397a06696ce1a1ec", - "sha256:b1196e13c45e327d6cd0b6e471530a1882f1017eb83c6229fc613cd1a11b53cd", - "sha256:b368e1aee1b9b75757942d44d7598dcd22a9dbb126affcbba82d15917f0cc155", - "sha256:bde997cac85fcac227b27d4fb2c7608a2c5f6558469b0eb704c5726ae49e1c52", - "sha256:c4c2872b3c91f9baa836147ca33650dc5c172e9273c808c3c3199c75490e709d", - "sha256:c59d2ad092dc0551d9f79d9d44d005c945ba95832a6798f98f9216ede3d5f485", - "sha256:d1da0a2e3b37b745a2b2a678a4c796462cf753aebf94edcc87dcc6b8641eae31", - "sha256:d8b7339180d00de83e930358223c617cc343dd08e1aa5ec7b06c3a121aec4e1d", - "sha256:dd4b3355b01273a56b20c219e74e7549e14370b31a4ffe42706a8cda91f19f6d", - "sha256:e08c470c2eb01977d221fd87495b44867a56d4d594f43739a8028f8646a51e0d", - "sha256:f5102a92855d518b0996eb197772f5ac2a527c0ec617124ad5242a3af5e25f85", - "sha256:f542287b1489c7a860d43a7d8883e27ca62ab84ca53c965d11dac1d3a1fab7ce", - "sha256:f78300789a708ac1f17e134593f577407d52d0417305435b134805c4fb135adb", - "sha256:f81bc26d609bf0fbc622c7122ba6307993c83c795d2d6f6f6fd8c000a770d974", - "sha256:f836c174c3a7f639bded48ec913f348c4761cbf49de4a20a956d3431a7c9cb24", - "sha256:fa21a04112c59ad54f69d80e376f7f9d0f5f9123ab87ecd18fbb9ec3a2beed56", - "sha256:fcf7d1d6f5da887ca04302db8e0e0cf56ce9a5e05f202720e49b3e8157ddb9a9", - "sha256:fd27d8b49e574e50caa65196d908f80e4dff64d7e592d0c59788b45aad7e8b35" + "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca", + "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d", + "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6", + "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989", + "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c", + "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b", + "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223", + "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f", + "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56", + "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3", + "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8", + "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb", + "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388", + "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0", + "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a", + "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8", + "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f", + "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a", + "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962", + "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8", + "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391", + "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc", + "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2", + "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155", + "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb", + "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0", + "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c", + "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a", + "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004", + "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060", + "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232", + "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93", + "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129", + "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163", + "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de", + "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6", + "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23", + "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569", + "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d", + "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778", + "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d", + "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36", + "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a", + "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6", + "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34", + "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704", + "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106", + "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9", + "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862", + "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b", + "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255", + "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16", + "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3", + "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133", + "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb", + "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657", + "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d", + "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca", + "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36", + "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c", + "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e", + "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff", + "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7", + "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5", + "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02", + "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c", + "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df", + "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3", + "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a", + "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959", + "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234", + "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc" ], "markers": "python_version >= '3.8'", - "version": "==7.5.3" + "version": "==7.6.1" }, "docutils": { "hashes": [ @@ -1306,11 +1354,11 @@ }, "exceptiongroup": { "hashes": [ - "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad", - "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16" + "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b", + "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc" ], "markers": "python_version < '3.11'", - "version": "==1.2.1" + "version": "==1.2.2" }, "factory-boy": { "hashes": [ @@ -1323,11 +1371,11 @@ }, "faker": { "hashes": [ - "sha256:4c40b34a9c569018d4f9d6366d71a4da8a883d5ddf2b23197be5370f29b7e1b6", - "sha256:bdec5f2fb057d244ebef6e0ed318fea4dcbdf32c3a1a010766fc45f5d68fc68d" + "sha256:b17d69312ef6485a720e21bffa997668c88876a5298b278e903ba706243c9c6b", + "sha256:bc460a0e6020966410d0b276043879abca0fac51890f3324bc254bb0a383ee3a" ], "markers": "python_version >= '3.8'", - "version": "==25.8.0" + "version": "==28.1.0" }, "flake8": { "hashes": [ @@ -1621,60 +1669,62 @@ }, "pyyaml": { "hashes": [ - "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5", - "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc", - "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df", - "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741", - "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206", - "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27", - "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595", - "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62", - "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98", - "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696", - "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290", - "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9", - "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d", - "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6", - "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867", - "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47", - "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486", - "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6", - "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3", - "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007", - "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938", - "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0", - "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c", - "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735", - "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d", - "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28", - "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4", - "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba", - "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8", - "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef", - "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5", - "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd", - "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3", - "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0", - "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515", - "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c", - "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c", - "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924", - "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34", - "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43", - "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859", - "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673", - "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54", - "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a", - "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b", - "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab", - "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa", - "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c", - "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585", - "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d", - "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f" + "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff", + "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", + "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", + "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e", + "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", + "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", + "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", + "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", + "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", + "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", + "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a", + "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", + "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", + "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8", + "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", + "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19", + "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", + "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a", + "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", + "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", + "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", + "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631", + "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d", + "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", + "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", + "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", + "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", + "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", + "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", + "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706", + "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", + "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", + "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", + "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083", + "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", + "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", + "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", + "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f", + "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725", + "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", + "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", + "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", + "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", + "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", + "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5", + "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d", + "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290", + "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", + "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", + "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", + "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", + "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12", + "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4" ], - "markers": "python_version >= '3.6'", - "version": "==6.0.1" + "markers": "python_version >= '3.8'", + "version": "==6.0.2" }, "pyyaml-env-tag": { "hashes": [ @@ -1733,49 +1783,47 @@ }, "urllib3": { "hashes": [ - "sha256:37a0344459b199fce0e80b0d3569837ec6b6937435c5244e7fd73fa6006830f3", - "sha256:3e3d753a8618b86d7de333b4223005f68720bcd6a7d2bcb9fbd2229ec7c1e429" + "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e", + "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", - "version": "==1.26.19" + "version": "==1.26.20" }, "watchdog": { "hashes": [ - "sha256:0144c0ea9997b92615af1d94afc0c217e07ce2c14912c7b1a5731776329fcfc7", - "sha256:03e70d2df2258fb6cb0e95bbdbe06c16e608af94a3ffbd2b90c3f1e83eb10767", - "sha256:093b23e6906a8b97051191a4a0c73a77ecc958121d42346274c6af6520dec175", - "sha256:123587af84260c991dc5f62a6e7ef3d1c57dfddc99faacee508c71d287248459", - "sha256:17e32f147d8bf9657e0922c0940bcde863b894cd871dbb694beb6704cfbd2fb5", - "sha256:206afc3d964f9a233e6ad34618ec60b9837d0582b500b63687e34011e15bb429", - "sha256:4107ac5ab936a63952dea2a46a734a23230aa2f6f9db1291bf171dac3ebd53c6", - "sha256:4513ec234c68b14d4161440e07f995f231be21a09329051e67a2118a7a612d2d", - "sha256:611be3904f9843f0529c35a3ff3fd617449463cb4b73b1633950b3d97fa4bfb7", - "sha256:62c613ad689ddcb11707f030e722fa929f322ef7e4f18f5335d2b73c61a85c28", - "sha256:667f3c579e813fcbad1b784db7a1aaa96524bed53437e119f6a2f5de4db04235", - "sha256:6e8c70d2cd745daec2a08734d9f63092b793ad97612470a0ee4cbb8f5f705c57", - "sha256:7577b3c43e5909623149f76b099ac49a1a01ca4e167d1785c76eb52fa585745a", - "sha256:998d2be6976a0ee3a81fb8e2777900c28641fb5bfbd0c84717d89bca0addcdc5", - "sha256:a3c2c317a8fb53e5b3d25790553796105501a235343f5d2bf23bb8649c2c8709", - "sha256:ab998f567ebdf6b1da7dc1e5accfaa7c6992244629c0fdaef062f43249bd8dee", - "sha256:ac7041b385f04c047fcc2951dc001671dee1b7e0615cde772e84b01fbf68ee84", - "sha256:bca36be5707e81b9e6ce3208d92d95540d4ca244c006b61511753583c81c70dd", - "sha256:c9904904b6564d4ee8a1ed820db76185a3c96e05560c776c79a6ce5ab71888ba", - "sha256:cad0bbd66cd59fc474b4a4376bc5ac3fc698723510cbb64091c2a793b18654db", - "sha256:d10a681c9a1d5a77e75c48a3b8e1a9f2ae2928eda463e8d33660437705659682", - "sha256:d4925e4bf7b9bddd1c3de13c9b8a2cdb89a468f640e66fbfabaf735bd85b3e35", - "sha256:d7b9f5f3299e8dd230880b6c55504a1f69cf1e4316275d1b215ebdd8187ec88d", - "sha256:da2dfdaa8006eb6a71051795856bedd97e5b03e57da96f98e375682c48850645", - "sha256:dddba7ca1c807045323b6af4ff80f5ddc4d654c8bce8317dde1bd96b128ed253", - "sha256:e7921319fe4430b11278d924ef66d4daa469fafb1da679a2e48c935fa27af193", - "sha256:e93f451f2dfa433d97765ca2634628b789b49ba8b504fdde5837cdcf25fdb53b", - "sha256:eebaacf674fa25511e8867028d281e602ee6500045b57f43b08778082f7f8b44", - "sha256:ef0107bbb6a55f5be727cfc2ef945d5676b97bffb8425650dadbb184be9f9a2b", - "sha256:f0de0f284248ab40188f23380b03b59126d1479cd59940f2a34f8852db710625", - "sha256:f27279d060e2ab24c0aa98363ff906d2386aa6c4dc2f1a374655d4e02a6c5e5e", - "sha256:f8affdf3c0f0466e69f5b3917cdd042f89c8c63aebdb9f7c078996f607cdb0f5" - ], - "markers": "python_version >= '3.8'", - "version": "==4.0.1" + "sha256:1e8ca9b7f5f03d2f0556a43db1e9adf1e5af6adf52e0890f781324514b67a612", + "sha256:20a28c8b0b3edf4ea2b27fb3527fc0a348e983f22a4317d316bb561524391932", + "sha256:2b8cd627b76194e725ed6f48d9524b1ad93a51a0dc3bd0225c56023716245091", + "sha256:39e828c4270452b966bc9d814911a3c7e24c62d726d2a3245f5841664ff56b5e", + "sha256:39f0de161a822402f0f00c68b82349a4d71c9814e749148ca2b083a25606dbf9", + "sha256:4eaebff2f938f5325788cef26521891b2d8ecc8e7852aa123a9b458815f93875", + "sha256:5541a8765c4090decb4dba55d3dceb57724748a717ceaba8dc4f213edb0026e0", + "sha256:59ec6111f3750772badae3403ef17263489ed6f27ac01ec50c0244b2afa258fb", + "sha256:664917cd513538728875a42d5654584b533da88cf06680452c98e73b45466968", + "sha256:6bb68d9adb9c45f0dc1c2b12f4fb6eab0463a8f9741e371e4ede6769064e0785", + "sha256:6fbb4dd5ace074a2969825fde10034b35b31efcb6973defb22eb945b1d3acc37", + "sha256:70e30116849f4ec52240eb1fad83d27e525eae179bfe1c09b3bf120163d731b6", + "sha256:72dbdffe4aa0c36c59f4a5190bceeb7fdfdf849ab98a562b3a783a64cc6dacdd", + "sha256:753c6a4c1eea9d3b96cd58159b49103e66cb288216a414ab9ad234ccc7642ec2", + "sha256:763c6f82bb65504b47d4aea268462b2fb662676676356e04787f332a11f03eb0", + "sha256:8ba1472b5fa7c644e49641f70d7ccc567f70b54d776defa5d6f755dc2edc3fbb", + "sha256:9b1b32f89f95162f09aea6e15d9384f6e0490152f10d7ed241f8a85cddc50658", + "sha256:a03a6ccb846ead406a25a0b702d0a6b88fdfa77becaf907cfcfce7737ebbda1f", + "sha256:a1cd7c919940b15f253db8279a579fb81e4e4e434b39b11a1cb7f54fe3fa46a6", + "sha256:a6b8c6c82ada78479a0df568d27d69aa07105aba9301ac66d1ae162645f4ba34", + "sha256:a791dfc050ed24b82f7f100ae794192594fe863a7e9bdafcdfa5c6e405a981e5", + "sha256:b21e6601efe8453514c2fc21aca57fb5413c3d8b157bfe520b05b57b1788a167", + "sha256:b2d56425dfa0c1e6f8a510f21d3d54ef7fe50bbc29638943c2cb1394b7b49156", + "sha256:c4ae0b3e95455fa9d959aa3b253c87845ad454ef188a4bf5a69cab287c131216", + "sha256:c92812a358eabebe92b12b9290d16dc95c8003654658f6b2676c9a2103a73ceb", + "sha256:c93aa24899cb4e8a51492c7ccc420bea45ced502fe9ef2e83f9ab1107e5a13b5", + "sha256:e321f1561adea30e447130882efe451af519646178d04189d6ba91a8cd7d88a5", + "sha256:f0180e84e6493ef7c82e051334e8c9b00ffd89fa9de5e0613d3c267f6ccf2d38", + "sha256:f3006361dba2005552cc8aa49c44d16a10e0a1939bb3286e888a14f722122808", + "sha256:f66df2c152edf5a2fe472bb2f8a5d562165bcf6cf9686cee5d75e524c21ca895" + ], + "markers": "python_version >= '3.9'", + "version": "==5.0.1" } } } diff --git a/tdrs-backend/docs/session-management.md b/tdrs-backend/docs/session-management.md index 78ff6dd05..e4f0c1831 100644 --- a/tdrs-backend/docs/session-management.md +++ b/tdrs-backend/docs/session-management.md @@ -5,9 +5,17 @@ The requirement for this project is that users will be logged out of the system ### Backend The backend will be the ultimate arbiter of session management. When the user logs in they will receive an HttpOnly cookie that is set to expire in 30 minutes. After that, with every interaction between the FE and BE, the BE will refresh the cookie, so it will extend the timeout time to another 30 minutes. -This is managed in `tdrs-backend/tdpservice/settings/common.py` with the following setting: +When the user logs in, they will receive an HttpOnly cookie with no `Expires=` setting. This indicates a [session cookie](https://developer.mozilla.org/en-US/docs/Web/HTTP/Cookies#removal_defining_the_lifetime_of_a_cookie) which will automatically expire upon browser close. This is controlled with the django setting: + +```python +SESSION_EXPIRE_AT_BROWSER_CLOSE=True ``` -SESSION_TIMEOUT = 30 + +The cookie itself contains a `sessionid` reference to a Django-managed session. The session expiration is set to the same expiration of the login.gov-provided jwt, **15 minutes**. + +This is managed in `tdrs-backend/tdpservice/settings/common.py` with the following setting: +```python +SESSION_COOKIE_AGE = 15 * 60 # 15 minutes ``` ### Frontend diff --git a/tdrs-backend/gunicorn_start.sh b/tdrs-backend/gunicorn_start.sh index 9224f9de3..40a77af88 100755 --- a/tdrs-backend/gunicorn_start.sh +++ b/tdrs-backend/gunicorn_start.sh @@ -20,7 +20,7 @@ else fi # Celery worker config can be found here: https://docs.celeryq.dev/en/stable/userguide/workers.html#:~:text=The-,hostname,-argument%20can%20expand -celery -A tdpservice.settings worker --loglevel=WARNING --concurrency=1 -n worker1@%h & +celery -A tdpservice.settings worker --loglevel=INFO --concurrency=1 --max-tasks-per-child=1 -n worker1@%h & sleep 5 # TODO: Uncomment the following line to add flower service when memory limitation is resolved diff --git a/tdrs-backend/tdpservice/data_files/admin/__init__.py b/tdrs-backend/tdpservice/data_files/admin/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tdrs-backend/tdpservice/data_files/admin.py b/tdrs-backend/tdpservice/data_files/admin/admin.py similarity index 75% rename from tdrs-backend/tdpservice/data_files/admin.py rename to tdrs-backend/tdpservice/data_files/admin/admin.py index 1a049dad3..7e5689460 100644 --- a/tdrs-backend/tdpservice/data_files/admin.py +++ b/tdrs-backend/tdpservice/data_files/admin/admin.py @@ -1,34 +1,26 @@ """Admin class for DataFile objects.""" from django.contrib import admin - -from ..core.utils import ReadOnlyAdminMixin -from .models import DataFile, LegacyFileTransfer +from tdpservice.core.utils import ReadOnlyAdminMixin +from tdpservice.data_files.models import DataFile, LegacyFileTransfer from tdpservice.parsers.models import DataFileSummary, ParserError +from tdpservice.data_files.admin.filters import DataFileSummaryPrgTypeFilter, LatestReparseEvent from django.conf import settings from django.utils.html import format_html DOMAIN = settings.FRONTEND_BASE_URL -class DataFileSummaryPrgTypeFilter(admin.SimpleListFilter): - """Admin class filter for Program Type on datafile model.""" - title = 'Program Type' - parameter_name = 'program_type' +class DataFileInline(admin.TabularInline): + """Inline model for many to many relationship.""" + + model = DataFile.reparse_meta_models.through + can_delete = False + ordering = ["-pk"] - def lookups(self, request, model_admin): - """Return a list of tuples.""" - return [ - ('TAN', 'TAN'), - ('SSP', 'SSP'), - ] + def has_change_permission(self, request, obj=None): + """Read only permissions.""" + return False - def queryset(self, request, queryset): - """Return a queryset.""" - if self.value(): - query_set_ids = [df.id for df in queryset if df.prog_type == self.value()] - return queryset.filter(id__in=query_set_ids) - else: - return queryset @admin.register(DataFile) class DataFileAdmin(ReadOnlyAdminMixin, admin.ModelAdmin): @@ -61,6 +53,8 @@ def data_file_summary(self, obj): field=f'{df.id}' + ":" + df.get_status(), url=f"{DOMAIN}/admin/parsers/datafilesummary/{df.id}/change/") + inlines = [DataFileInline] + list_display = [ 'id', 'stt', @@ -80,7 +74,8 @@ def data_file_summary(self, obj): 'year', 'version', 'summary__status', - DataFileSummaryPrgTypeFilter + DataFileSummaryPrgTypeFilter, + LatestReparseEvent ] @admin.register(LegacyFileTransfer) diff --git a/tdrs-backend/tdpservice/data_files/admin/filters.py b/tdrs-backend/tdpservice/data_files/admin/filters.py new file mode 100644 index 000000000..a0f44c270 --- /dev/null +++ b/tdrs-backend/tdpservice/data_files/admin/filters.py @@ -0,0 +1,59 @@ +"""Filter classes for DataFiles admin page.""" +from django.contrib import admin +from django.utils.translation import ugettext_lazy as _ +from tdpservice.search_indexes.models.reparse_meta import ReparseMeta + +class DataFileSummaryPrgTypeFilter(admin.SimpleListFilter): + """Admin class filter for Program Type on datafile model.""" + + title = 'Program Type' + parameter_name = 'program_type' + + def lookups(self, request, model_admin): + """Return a list of tuples.""" + return [ + ('TAN', 'TAN'), + ('SSP', 'SSP'), + ] + + def queryset(self, request, queryset): + """Return a queryset.""" + if self.value(): + query_set_ids = [df.id for df in queryset if df.prog_type == self.value()] + return queryset.filter(id__in=query_set_ids) + else: + return queryset + + +class LatestReparseEvent(admin.SimpleListFilter): + """Filter class to filter files based on the latest reparse event.""" + + title = _('Reparse Event') + + parameter_name = 'reparse_meta_model' + + def lookups(self, request, model_admin): + """Available options in dropdown.""" + return ( + (None, _('All')), + ('latest', _('Latest')), + ) + + def choices(self, cl): + """Update query string based on selection.""" + for lookup, title in self.lookup_choices: + yield { + 'selected': self.value() == lookup, + 'query_string': cl.get_query_string({ + self.parameter_name: lookup, + }, []), + 'display': title, + } + + def queryset(self, request, queryset): + """Sort queryset to show datafiles associated to the most recent reparse event.""" + if self.value() is not None and queryset.exists(): + latest_meta = ReparseMeta.get_latest() + if latest_meta is not None: + queryset = queryset.filter(reparse_meta_models=latest_meta) + return queryset diff --git a/tdrs-backend/tdpservice/data_files/migrations/0013_datafile_reparse_meta.py b/tdrs-backend/tdpservice/data_files/migrations/0013_datafile_reparse_meta.py new file mode 100644 index 000000000..2065d23e2 --- /dev/null +++ b/tdrs-backend/tdpservice/data_files/migrations/0013_datafile_reparse_meta.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.15 on 2024-08-05 15:07 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('search_indexes', '0030_reparse_meta_model'), + ('data_files', '0012_datafile_s3_versioning_id'), + ] + + operations = [ + migrations.AddField( + model_name='datafile', + name='reparse_meta_models', + field=models.ManyToManyField(help_text='Reparse events this file has been associated with.', related_name='datafiles', to='search_indexes.ReparseMeta'), + ), + ] diff --git a/tdrs-backend/tdpservice/data_files/models.py b/tdrs-backend/tdpservice/data_files/models.py index abfcce8ab..c00541419 100644 --- a/tdrs-backend/tdpservice/data_files/models.py +++ b/tdrs-backend/tdpservice/data_files/models.py @@ -152,6 +152,11 @@ class Meta: null=True ) + reparse_meta_models = models.ManyToManyField("search_indexes.ReparseMeta", + help_text="Reparse events this file has been associated with.", + related_name="datafiles" + ) + @property def prog_type(self): """Return the program type for a given section.""" diff --git a/tdrs-backend/tdpservice/data_files/test/test_admin.py b/tdrs-backend/tdpservice/data_files/test/test_admin.py index 02701fe82..c11b1bd6f 100644 --- a/tdrs-backend/tdpservice/data_files/test/test_admin.py +++ b/tdrs-backend/tdpservice/data_files/test/test_admin.py @@ -2,7 +2,7 @@ import pytest from django.contrib.admin.sites import AdminSite -from tdpservice.data_files.admin import DataFileAdmin +from tdpservice.data_files.admin.admin import DataFileAdmin from tdpservice.data_files.models import DataFile from tdpservice.data_files.test.factories import DataFileFactory from tdpservice.parsers.test.factories import DataFileSummaryFactory diff --git a/tdrs-backend/tdpservice/data_files/test/test_api.py b/tdrs-backend/tdpservice/data_files/test/test_api.py index 55dba626e..78685b075 100644 --- a/tdrs-backend/tdpservice/data_files/test/test_api.py +++ b/tdrs-backend/tdpservice/data_files/test/test_api.py @@ -101,8 +101,8 @@ def assert_error_report_tanf_file_content_matches_with_friendly_names(response): assert ws.cell(row=1, column=1).value == "Please refer to the most recent versions of the coding " \ + "instructions (linked below) when looking up items and allowable values during the data revision process" assert ws.cell(row=8, column=COL_ERROR_MESSAGE).value == ( - "if Cash Amount :873 validator1 passed then Item 21B " - "(Cash and Cash Equivalents: Number of Months) 0 is not larger than 0." + "Since Item 21A (Cash Amount) is 873, then Item 21B " + "(Cash and Cash Equivalents: Number of Months) 0 must be greater than 0" ) @staticmethod @@ -115,7 +115,7 @@ def assert_error_report_ssp_file_content_matches_with_friendly_names(response): assert ws.cell(row=1, column=1).value == "Please refer to the most recent versions of the coding " \ + "instructions (linked below) when looking up items and allowable values during the data revision process" assert ws.cell(row=7, column=COL_ERROR_MESSAGE).value == ("M1 Item 11 (Receives Subsidized Housing): 3 is " - "not larger or equal to 1 and smaller or equal to 2.") + "not in range [1, 2].") @staticmethod def assert_error_report_file_content_matches_without_friendly_names(response): @@ -135,8 +135,8 @@ def assert_error_report_file_content_matches_without_friendly_names(response): assert ws.cell(row=1, column=1).value == "Please refer to the most recent versions of the coding " \ + "instructions (linked below) when looking up items and allowable values during the data revision process" assert ws.cell(row=8, column=COL_ERROR_MESSAGE).value == ( - "if CASH_AMOUNT :873 validator1 passed then Item 21B " - "(Cash and Cash Equivalents: Number of Months) 0 is not larger than 0." + "Since Item 21A (Cash Amount) is 873, then Item 21B " + "(Cash and Cash Equivalents: Number of Months) 0 must be greater than 0" ) @staticmethod diff --git a/tdrs-backend/tdpservice/email/email.py b/tdrs-backend/tdpservice/email/email.py index 9d554fbeb..30029ee05 100644 --- a/tdrs-backend/tdpservice/email/email.py +++ b/tdrs-backend/tdpservice/email/email.py @@ -6,10 +6,9 @@ from django.conf import settings from django.template.loader import get_template from tdpservice.core.utils import log - import logging -logger = logging.getLogger() +logger = logging.getLogger(__name__) def prepare_recipients(recipient_email): @@ -78,6 +77,8 @@ def filter_valid_emails(emails, logger_context=None): logger_context=logger_context ) if len(valid_emails) == 0: - raise ValidationError("No valid emails provided.") + log("No valid emails provided.", + logger_context, + "warn") return valid_emails diff --git a/tdrs-backend/tdpservice/email/helpers/data_file.py b/tdrs-backend/tdpservice/email/helpers/data_file.py index 20cfbc7af..1ed966a87 100644 --- a/tdrs-backend/tdpservice/email/helpers/data_file.py +++ b/tdrs-backend/tdpservice/email/helpers/data_file.py @@ -43,7 +43,7 @@ def send_data_submitted_email( "url": settings.FRONTEND_BASE_URL } - log(f'Data file submitted; emailing Data Analysts {recipients}', logger_context=logger_context) + log(f'Data file submitted; emailing Data Analysts {list(recipients)}', logger_context=logger_context) match datafile_summary.status: case DataFileSummary.Status.PENDING: diff --git a/tdrs-backend/tdpservice/email/test/test_email.py b/tdrs-backend/tdpservice/email/test/test_email.py index 39beada4f..b4929574a 100644 --- a/tdrs-backend/tdpservice/email/test/test_email.py +++ b/tdrs-backend/tdpservice/email/test/test_email.py @@ -2,7 +2,6 @@ from django.core import mail from django.test import TestCase -from django.core.exceptions import ValidationError from tdpservice.email.email import ( automated_email, @@ -50,8 +49,7 @@ def test_automated_email_fails_with_invalid_email(self): mail.outbox.clear() - with self.assertRaises(ValidationError): - automated_email(email_path, recipient_email, subject, email_context, text_message) + automated_email(email_path, recipient_email, subject, email_context, text_message) self.assertEqual(len(mail.outbox), 0) def test_filter_valid_emails(self): @@ -64,5 +62,4 @@ def test_filter_valid_emails_fails(self): """Test validate emails raised ValidationError .""" emails = ["foo", "bar"] - with self.assertRaises(ValidationError): - filter_valid_emails(emails) + assert len(filter_valid_emails(emails)) == 0 diff --git a/tdrs-backend/tdpservice/parsers/case_consistency_validator.py b/tdrs-backend/tdpservice/parsers/case_consistency_validator.py index ce0d6a13c..c388101da 100644 --- a/tdrs-backend/tdpservice/parsers/case_consistency_validator.py +++ b/tdrs-backend/tdpservice/parsers/case_consistency_validator.py @@ -6,6 +6,8 @@ from .util import get_years_apart from tdpservice.stts.models import STT from tdpservice.parsers.schema_defs.utils import get_program_model +from tdpservice.parsers.validators.util import ValidationErrorArgs +from tdpservice.parsers.validators.category3 import format_error_context import logging logger = logging.getLogger(__name__) @@ -24,25 +26,40 @@ def __init__(self, header, program_type, stt_type, generate_error): self.current_case_hash = None self.case_has_errors = False self.section = header["type"] - self.case_is_section_one_or_two = self.section in {'A', 'C'} + self.case_is_section_one_or_two = self.section in {"A", "C"} self.program_type = program_type + self.is_ssp = self.program_type == "SSP" self.has_validated = False self.generate_error = generate_error self.generated_errors = list() self.total_cases_cached = 0 self.total_cases_validated = 0 self.stt_type = stt_type + self.s1s = None + self.s2s = None def __get_model(self, model_str): - """Return a model for the current program type/section given the model's string name.""" + """Return a model for the current program type/section given the model"s string name.""" manager = get_program_model(self.program_type, self.section, model_str) return manager.schemas[0].document.Django.model - def __generate_and_add_error(self, schema, record, field, msg): + def __get_error_context(self, field_name, schema): + if schema is None: + return field_name + field = schema.get_field_by_name(field_name) + error_args = ValidationErrorArgs(value=None, + row_schema=schema, + friendly_name=field.friendly_name, + item_num=field.item, + ) + return format_error_context(error_args) + + def __generate_and_add_error(self, schema, record, field, line_num, msg): """Generate a ParserError and add it to the `generated_errors` list.""" err = self.generate_error( error_category=ParserErrorCategoryChoices.CASE_CONSISTENCY, schema=schema, + line_number=line_num, record=record, field=field, error_message=msg, @@ -65,18 +82,18 @@ def num_generated_errors(self): """Return current number of generated errors for the current case.""" return len(self.generated_errors) - def add_record_to_structs(self, record_schema_pair): + def add_record_to_structs(self, record_triplet): """Add record_schema_pair to structs.""" - record = record_schema_pair[0] - self.sorted_cases.setdefault(type(record), []).append(record_schema_pair) - self.cases.append(record_schema_pair) + record = record_triplet[0] + self.sorted_cases.setdefault(type(record), []).append(record_triplet) + self.cases.append(record_triplet) - def clear_structs(self, seed_record_schema_pair=None): + def clear_structs(self, seed_record_triplet=None): """Reset and optionally seed the structs.""" self.sorted_cases = dict() self.cases = list() - if seed_record_schema_pair: - self.add_record_to_structs(seed_record_schema_pair) + if seed_record_triplet: + self.add_record_to_structs(seed_record_triplet) def update_removed(self, case_hash, should_remove, was_removed): """Notify duplicate manager's CaseDuplicateDetectors whether they need to mark their records for DB removal.""" @@ -110,13 +127,13 @@ def add_record(self, record, schema, line, line_number, case_has_errors): if self.case_is_section_one_or_two: if latest_case_hash != self.current_case_hash and self.current_case_hash is not None: num_errors += self.validate() - self.clear_structs((record, schema)) + self.clear_structs((record, schema, line_number)) self.case_has_errors = case_has_errors self.has_validated = False case_hash_to_remove = self.current_case_hash else: self.case_has_errors = self.case_has_errors if self.case_has_errors else case_has_errors - self.add_record_to_structs((record, schema)) + self.add_record_to_structs((record, schema, line_number)) self.has_validated = False self.current_case = record.CASE_NUMBER @@ -138,9 +155,12 @@ def validate(self): self.total_cases_cached += 1 num_errors = self.__validate() return num_errors - except Exception as e: - logger.error(f"Uncaught exception during category four validation: {e}") + except Exception: + logger.exception("Uncaught exception during category four validation.") return num_errors + finally: + self.s1s = None + self.s2s = None def __validate(self): """Private validate, lint complexity.""" @@ -168,31 +188,88 @@ def __validate_section1(self, num_errors): def __validate_section2(self, num_errors): """Perform TANF Section 2 category four validation on all cached records.""" num_errors += self.__validate_s2_records_are_related() - num_errors += self.__validate_t5_aabd_and_ssi() + num_errors += self.__validate_t5_atd_and_ssi() return num_errors - def __validate_family_affiliation(self, num_errors, t1s, t2s, t3s, error_msg): + def __has_family_affil(self, records, passed): + """Check if a set of records (T2s or T3s) has correct family affiliation.""" + context = "" + is_records = len(records) > 0 + if is_records and not passed: + context = self.__get_error_context("FAMILY_AFFILIATION", records[0][1]) + "==1" + for record, schema, line_num in records: + family_affiliation = getattr(record, "FAMILY_AFFILIATION") + if family_affiliation == 1: + return context, True, False + return context, passed, is_records + + def __validate_family_affiliation(self, + num_errors, + t1_model_name, t1s, + t2_model_name, t2s, + t3_model_name, t3s): """Validate at least one record in t2s+t3s has FAMILY_AFFILIATION == 1.""" num_errors = 0 passed = False - for record, schema in t2s + t3s: - family_affiliation = getattr(record, 'FAMILY_AFFILIATION') - if family_affiliation == 1: - passed = True - break + error_msg = ( + f"Every {t1_model_name} record should have at least one corresponding " + f"{t2_model_name} or {t3_model_name} record with the same " + ) + + t2_context, passed, is_t2 = self.__has_family_affil(t2s, passed) + t3_context, passed, is_t3 = self.__has_family_affil(t3s, passed) + + final_context = "" + if is_t2 and is_t3: + final_context += t2_context + " or " + t3_context + "." + elif is_t2: + final_context += t2_context + "." + else: + final_context += t3_context + "." if not passed: - for record, schema in t1s: + for record, schema, line_num in t1s: + rpt_context = f"{self.__get_error_context('RPT_MONTH_YEAR', schema)} and " + case_context = f"{self.__get_error_context('CASE_NUMBER', schema)}, where " + error_msg += rpt_context + case_context + final_context self.__generate_and_add_error( schema, record, - field='FAMILY_AFFILIATION', + field="FAMILY_AFFILIATION", + line_num=line_num, msg=error_msg ) num_errors += 1 return num_errors + def __get_s1_triplets_and_names(self): + if self.s1s is None: + t1_model_name = "M1" if self.is_ssp else "T1" + t1_model = self.__get_model(t1_model_name) + t2_model_name = "M2" if self.is_ssp else "T2" + t2_model = self.__get_model(t2_model_name) + t3_model_name = "M3" if self.is_ssp else "T3" + t3_model = self.__get_model(t3_model_name) + + t1s = self.sorted_cases.get(t1_model, []) + t2s = self.sorted_cases.get(t2_model, []) + t3s = self.sorted_cases.get(t3_model, []) + self.s1s = (t1s, t1_model_name, t2s, t2_model_name, t3s, t3_model_name) + return self.s1s + + def __get_s2_triplets_and_names(self): + if self.s2s is None: + t4_model_name = "M4" if self.is_ssp else "T4" + t4_model = self.__get_model(t4_model_name) + t5_model_name = "M5" if self.is_ssp else "T5" + t5_model = self.__get_model(t5_model_name) + + t4s = self.sorted_cases.get(t4_model, []) + t5s = self.sorted_cases.get(t5_model, []) + self.s2s = (t4s, t4_model_name, t5s, t5_model_name) + return self.s2s + def __validate_s1_records_are_related(self): """ Validate section 1 records are related. @@ -201,30 +278,21 @@ def __validate_s1_records_are_related(self): record with the same RPT_MONTH_YEAR and CASE_NUMBER. """ num_errors = 0 - is_ssp = self.program_type == 'SSP' - - t1_model_name = 'M1' if is_ssp else 'T1' - t1_model = self.__get_model(t1_model_name) - t2_model_name = 'M2' if is_ssp else 'T2' - t2_model = self.__get_model(t2_model_name) - t3_model_name = 'M3' if is_ssp else 'T3' - t3_model = self.__get_model(t3_model_name) - - t1s = self.sorted_cases.get(t1_model, []) - t2s = self.sorted_cases.get(t2_model, []) - t3s = self.sorted_cases.get(t3_model, []) + t1s, t1_model_name, t2s, t2_model_name, t3s, t3_model_name = self.__get_s1_triplets_and_names() if len(t1s) > 0: if len(t2s) == 0 and len(t3s) == 0: - for record, schema in t1s: + for record, schema, line_num in t1s: self.__generate_and_add_error( schema, record, - field='RPT_MONTH_YEAR', + field="RPT_MONTH_YEAR", + line_num=line_num, msg=( - f'Every {t1_model_name} record should have at least one ' - f'corresponding {t2_model_name} or {t3_model_name} record ' - f'with the same RPT_MONTH_YEAR and CASE_NUMBER.' + f"Every {t1_model_name} record should have at least one " + f"corresponding {t2_model_name} or {t3_model_name} record " + f"with the same {self.__get_error_context('RPT_MONTH_YEAR', schema)} and " + f"{self.__get_error_context('CASE_NUMBER', schema)}." ) ) num_errors += 1 @@ -232,35 +300,38 @@ def __validate_s1_records_are_related(self): else: # loop through all t2s and t3s # to find record where FAMILY_AFFILIATION == 1 - num_errors += self.__validate_family_affiliation(num_errors, t1s, t2s, t3s, ( - f'Every {t1_model_name} record should have at least one corresponding ' - f'{t2_model_name} or {t3_model_name} record with the same RPT_MONTH_YEAR and ' - f'CASE_NUMBER, where FAMILY_AFFILIATION==1' - )) + num_errors += self.__validate_family_affiliation(num_errors, + t1_model_name, t1s, + t2_model_name, t2s, + t3_model_name, t3s) # the successful route # pass else: - for record, schema in t2s: + for record, schema, line_num in t2s: self.__generate_and_add_error( schema, record, - field='RPT_MONTH_YEAR', + field="RPT_MONTH_YEAR", + line_num=line_num, msg=( - f'Every {t2_model_name} record should have at least one corresponding ' - f'{t1_model_name} record with the same RPT_MONTH_YEAR and CASE_NUMBER.' + f"Every {t2_model_name} record should have at least one corresponding " + f"{t1_model_name} record with the same {self.__get_error_context('RPT_MONTH_YEAR', schema)} " + f"and {self.__get_error_context('CASE_NUMBER', schema)}." ) ) num_errors += 1 - for record, schema in t3s: + for record, schema, line_num in t3s: self.__generate_and_add_error( schema, record, - field='RPT_MONTH_YEAR', + field="RPT_MONTH_YEAR", + line_num=line_num, msg=( - f'Every {t3_model_name} record should have at least one corresponding ' - f'{t1_model_name} record with the same RPT_MONTH_YEAR and CASE_NUMBER.' + f"Every {t3_model_name} record should have at least one corresponding " + f"{t1_model_name} record with the same {self.__get_error_context('RPT_MONTH_YEAR', schema)} " + f"and {self.__get_error_context('CASE_NUMBER', schema)}." ) ) num_errors += 1 @@ -275,11 +346,11 @@ def __validate_case_closure_employment(self, t4, t5s, error_msg): the case must have employment status = 1:Yes in the same month. """ num_errors = 0 - t4_record, t4_schema = t4 + t4_record, t4_schema, line_num = t4 passed = False - for record, schema in t5s: - employment_status = getattr(record, 'EMPLOYMENT_STATUS') + for record, schema, line_num in t5s: + employment_status = getattr(record, "EMPLOYMENT_STATUS") if employment_status == 1: passed = True @@ -289,7 +360,8 @@ def __validate_case_closure_employment(self, t4, t5s, error_msg): self.__generate_and_add_error( t4_schema, t4_record, - 'EMPLOYMENT_STATUS', + "EMPLOYMENT_STATUS", + line_num, error_msg ) num_errors += 1 @@ -304,14 +376,14 @@ def __validate_case_closure_ftl(self, t4, t5s, error_msg): or spouse of HoH on case must have FTL months >=60. """ num_errors = 0 - t4_record, t4_schema = t4 + t4_record, t4_schema, line_num = t4 passed = False - for record, schema in t5s: - relationship_hoh = getattr(record, 'RELATIONSHIP_HOH') - ftl_months = getattr(record, 'COUNTABLE_MONTH_FED_TIME') + for record, schema, line_num in t5s: + relationship_hoh = getattr(record, "RELATIONSHIP_HOH") + ftl_months = getattr(record, "COUNTABLE_MONTH_FED_TIME") - if (relationship_hoh == '01' or relationship_hoh == '02') and int(ftl_months) >= 60: + if (relationship_hoh == "01" or relationship_hoh == "02") and int(ftl_months) >= 60: passed = True break @@ -319,7 +391,8 @@ def __validate_case_closure_ftl(self, t4, t5s, error_msg): self.__generate_and_add_error( t4_schema, t4_record, - 'COUNTABLE_MONTH_FED_TIME', + "COUNTABLE_MONTH_FED_TIME", + line_num, error_msg ) num_errors += 1 @@ -334,42 +407,42 @@ def __validate_s2_records_are_related(self): with the same RPT_MONTH_YEAR and CASE_NUMBER. """ num_errors = 0 - is_ssp = self.program_type == 'SSP' - - t4_model_name = 'M4' if is_ssp else 'T4' - t4_model = self.__get_model(t4_model_name) - t5_model_name = 'M5' if is_ssp else 'T5' - t5_model = self.__get_model(t5_model_name) - - t4s = self.sorted_cases.get(t4_model, []) - t5s = self.sorted_cases.get(t5_model, []) + t4s, t4_model_name, t5s, t5_model_name = self.__get_s2_triplets_and_names() if len(t4s) > 0: if len(t4s) == 1: t4 = t4s[0] - t4_record, t4_schema = t4 - closure_reason = getattr(t4_record, 'CLOSURE_REASON') + t4_record, t4_schema, line_num = t4 + closure_reason = getattr(t4_record, "CLOSURE_REASON") - if closure_reason == '01': + if closure_reason == "01": num_errors += self.__validate_case_closure_employment(t4, t5s, ( - 'At least one person on the case must have employment status = 1:Yes in the ' - 'same RPT_MONTH_YEAR since CLOSURE_REASON = 1:Employment/excess earnings.' + f"At least one person on the case must have " + f"{self.__get_error_context('EMPLOYMENT_STATUS', t5s[0][1] if t5s else None)} = 1:Yes in the " + f"same {self.__get_error_context('RPT_MONTH_YEAR', t4_schema)} since " + f"{self.__get_error_context('CLOSURE_REASON', t4_schema)} = 1:Employment/excess earnings." )) - elif closure_reason == '03' and not is_ssp: - num_errors += self.__validate_case_closure_ftl(t4, t5s, - ('At least one person who is head-of-household or ' - 'spouse of head-of-household on case must have ' - 'countable months toward time limit >= 60 since ' - 'CLOSURE_REASON = 03: federal 5 year time limit.')) + elif closure_reason == "03" and not self.is_ssp: + num_errors += self.__validate_case_closure_ftl( + t4, + t5s, + ("At least one person who is head-of-household or " + "spouse of head-of-household on case must have " + f"{self.__get_error_context('COUNTABLE_MONTH_FED_TIME', t5s[0][1] if t5s else None)} >= 60 " + f"since {self.__get_error_context('CLOSURE_REASON', t4_schema)} = 03: " + "federal 5 year time limit.") + ) if len(t5s) == 0: - for record, schema in t4s: + for record, schema, line_num in t4s: self.__generate_and_add_error( schema, record, - field='RPT_MONTH_YEAR', + field="RPT_MONTH_YEAR", + line_num=line_num, msg=( - f'Every {t4_model_name} record should have at least one corresponding ' - f'{t5_model_name} record with the same RPT_MONTH_YEAR and CASE_NUMBER.' + f"Every {t4_model_name} record should have at least one corresponding " + f"{t5_model_name} record with the same {self.__get_error_context('RPT_MONTH_YEAR', schema)}" + f" and {self.__get_error_context('CASE_NUMBER', schema)}." ) ) num_errors += 1 @@ -377,33 +450,30 @@ def __validate_s2_records_are_related(self): # success pass else: - for record, schema in t5s: + for record, schema, line_num in t5s: self.__generate_and_add_error( schema, record, - field='RPT_MONTH_YEAR', + field="RPT_MONTH_YEAR", + line_num=line_num, msg=( - f'Every {t5_model_name} record should have at least one corresponding ' - f'{t4_model_name} record with the same RPT_MONTH_YEAR and CASE_NUMBER.' + f"Every {t5_model_name} record should have at least one corresponding " + f"{t4_model_name} record with the same {self.__get_error_context('RPT_MONTH_YEAR', schema)} " + f"and {self.__get_error_context('CASE_NUMBER', schema)}." ) ) num_errors += 1 return num_errors - def __validate_t5_aabd_and_ssi(self): + def __validate_t5_atd_and_ssi(self): num_errors = 0 - is_ssp = self.program_type == 'SSP' - - t5_model_name = 'M5' if is_ssp else 'T5' - t5_model = self.__get_model(t5_model_name) + t4s, t4_model_name, t5s, t5_model_name = self.__get_s2_triplets_and_names() is_state = self.stt_type == STT.EntityType.STATE is_territory = self.stt_type == STT.EntityType.TERRITORY - t5s = self.sorted_cases.get(t5_model, []) - - for record, schema in t5s: - rec_aabd = getattr(record, 'REC_AID_TOTALLY_DISABLED') + for record, schema, line_num in t5s: + rec_atd = getattr(record, 'REC_AID_TOTALLY_DISABLED') rec_ssi = getattr(record, 'REC_SSI') family_affiliation = getattr(record, 'FAMILY_AFFILIATION') dob = getattr(record, 'DATE_OF_BIRTH') @@ -413,25 +483,27 @@ def __validate_t5_aabd_and_ssi(self): dob_date = datetime.strptime(dob, '%Y%m%d') is_adult = get_years_apart(rpt_date, dob_date) >= 19 - if is_territory and is_adult and (rec_aabd != 1 and rec_aabd != 2): + if is_territory and is_adult and rec_atd not in {1, 2}: self.__generate_and_add_error( schema, record, - field='REC_AID_TOTALLY_DISABLED', + field="REC_AID_TOTALLY_DISABLED", + line_num=line_num, msg=( - f'{t5_model_name} Adults in territories must have a valid ' - 'value for REC_AID_TOTALLY_DISABLED.' + f"{t5_model_name} Adults in territories must have a valid " + f"value for {self.__get_error_context('REC_AID_TOTALLY_DISABLED', schema)}." ) ) num_errors += 1 - elif is_state and rec_aabd != 2: + elif is_state and rec_atd == 1: self.__generate_and_add_error( schema, record, - field='REC_AID_TOTALLY_DISABLED', + field="REC_AID_TOTALLY_DISABLED", + line_num=line_num, msg=( - f'{t5_model_name} People in states should not have a value ' - 'of 1 for REC_AID_TOTALLY_DISABLED.' + f"{t5_model_name} People in states should not have a value " + f"of 1 for {self.__get_error_context('REC_AID_TOTALLY_DISABLED', schema)}." ) ) num_errors += 1 @@ -440,19 +512,23 @@ def __validate_t5_aabd_and_ssi(self): self.__generate_and_add_error( schema, record, - field='REC_SSI', + field="REC_SSI", + line_num=line_num, msg=( - f'{t5_model_name} People in territories must have value = 2:No for REC_SSI.' + f"{t5_model_name} People in territories must have value = 2:No for " + f"{self.__get_error_context('REC_SSI', schema)}." ) ) num_errors += 1 - elif is_state and family_affiliation == 1: + elif is_state and family_affiliation == 1 and rec_ssi not in {1, 2}: self.__generate_and_add_error( schema, record, - field='REC_SSI', + field="REC_SSI", + line_num=line_num, msg=( - f'{t5_model_name} People in states must have a valid value for REC_SSI.' + f"{t5_model_name} People in states must have a valid value for " + f"{self.__get_error_context('REC_SSI', schema)}." ) ) num_errors += 1 diff --git a/tdrs-backend/tdpservice/parsers/duplicate_manager.py b/tdrs-backend/tdpservice/parsers/duplicate_manager.py index fe8da1992..7de80c2ad 100644 --- a/tdrs-backend/tdpservice/parsers/duplicate_manager.py +++ b/tdrs-backend/tdpservice/parsers/duplicate_manager.py @@ -107,12 +107,14 @@ def __get_partial_dup_error_msg(self, schema, record_type, curr_line_number, exi f"{record_type} at line {curr_line_number}. Record is a partial duplicate of the " f"record at line number {existing_line_number}. Duplicated fields causing error: ") for i, name in enumerate(field_names): + field = schema.get_field_by_name(name) + item_and_name = f"Item {field.item} ({field.friendly_name})" if i == len(field_names) - 1 and len(field_names) != 1: - err_msg += f"and {schema.get_field_by_name(name).friendly_name}." + err_msg += f"and {item_and_name}." elif len(field_names) == 1: - err_msg += f"{schema.get_field_by_name(name).friendly_name}." + err_msg += f"{item_and_name}." else: - err_msg += f"{schema.get_field_by_name(name).friendly_name}, " + err_msg += f"{item_and_name}, " return err_msg def add_case_member(self, record, schema, line, line_number): diff --git a/tdrs-backend/tdpservice/parsers/fields.py b/tdrs-backend/tdpservice/parsers/fields.py index 076743096..d26c27bb4 100644 --- a/tdrs-backend/tdpservice/parsers/fields.py +++ b/tdrs-backend/tdpservice/parsers/fields.py @@ -1,7 +1,7 @@ """Datafile field representations.""" import logging -from .validators import value_is_empty +from .validators.util import value_is_empty logger = logging.getLogger(__name__) @@ -18,6 +18,7 @@ def __init__( endIndex, required=True, validators=[], + ignore_errors=False, ): self.item = item self.name = name @@ -27,6 +28,7 @@ def __init__( self.endIndex = endIndex self.required = required self.validators = validators + self.ignore_errors = ignore_errors def create(self, item, name, length, start, end, type): """Create a new field.""" @@ -64,7 +66,7 @@ class TransformField(Field): """Represents a field that requires some transformation before serializing.""" def __init__(self, transform_func, item, name, friendly_name, type, startIndex, endIndex, required=True, - validators=[], **kwargs): + validators=[], ignore_errors=False, **kwargs): super().__init__( item=item, name=name, @@ -73,7 +75,8 @@ def __init__(self, transform_func, item, name, friendly_name, type, startIndex, startIndex=startIndex, endIndex=endIndex, required=required, - validators=validators) + validators=validators, + ignore_errors=ignore_errors) self.transform_func = transform_func self.kwargs = kwargs diff --git a/tdrs-backend/tdpservice/parsers/models.py b/tdrs-backend/tdpservice/parsers/models.py index bbf7535cd..f9c5f3c63 100644 --- a/tdrs-backend/tdpservice/parsers/models.py +++ b/tdrs-backend/tdpservice/parsers/models.py @@ -6,6 +6,9 @@ from django.contrib.contenttypes.fields import GenericForeignKey from django.contrib.contenttypes.models import ContentType from tdpservice.data_files.models import DataFile +import logging + +logger = logging.getLogger(__name__) class ParserErrorCategoryChoices(models.TextChoices): """Enum of ParserError error_type.""" @@ -93,8 +96,29 @@ class Status(models.TextChoices): total_number_of_records_in_file = models.IntegerField(null=True, blank=False, default=0) total_number_of_records_created = models.IntegerField(null=True, blank=False, default=0) + def set_status(self, status): + """Set the status on the summary object.""" + match status: + case DataFileSummary.Status.PENDING: + self.status = DataFileSummary.Status.PENDING + case DataFileSummary.Status.ACCEPTED: + self.status = DataFileSummary.Status.ACCEPTED + case DataFileSummary.Status.ACCEPTED_WITH_ERRORS: + self.status = DataFileSummary.Status.ACCEPTED_WITH_ERRORS + case DataFileSummary.Status.PARTIALLY_ACCEPTED: + self.status = DataFileSummary.Status.PARTIALLY_ACCEPTED + case DataFileSummary.Status.REJECTED: + self.status = DataFileSummary.Status.REJECTED + case _: + logger.warn(f"Unknown status: {status} passed into set_status.") + def get_status(self): """Set and return the status field based on errors and models associated with datafile.""" + # Because we introduced a setter for the status for exception handling, we need to + # check if it has been set before determining a status based on the queries below. + if self.status != DataFileSummary.Status.PENDING: + return self.status + errors = ParserError.objects.filter(file=self.datafile) # excluding row-level pre-checks and trailer pre-checks. diff --git a/tdrs-backend/tdpservice/parsers/parse.py b/tdrs-backend/tdpservice/parsers/parse.py index 8c17a7087..a81fc397c 100644 --- a/tdrs-backend/tdpservice/parsers/parse.py +++ b/tdrs-backend/tdpservice/parsers/parse.py @@ -2,17 +2,18 @@ from django.conf import settings -from django.contrib.admin.models import LogEntry, ADDITION -from django.contrib.contenttypes.models import ContentType +from django.db.utils import DatabaseError +from elasticsearch.exceptions import ElasticsearchException import itertools import logging -from .models import ParserErrorCategoryChoices, ParserError -from . import schema_defs, validators, util -from . import row_schema -from .schema_defs.utils import get_section_reference, get_program_model -from .case_consistency_validator import CaseConsistencyValidator -from elasticsearch.exceptions import ElasticsearchException -from tdpservice.data_files.models import DataFile +from tdpservice.parsers.models import ParserErrorCategoryChoices, ParserError +from tdpservice.parsers import row_schema, schema_defs, util +from tdpservice.parsers.validators import category1 +from tdpservice.parsers.validators.util import value_is_empty +from tdpservice.parsers.schema_defs.utils import get_section_reference, get_program_model +from tdpservice.parsers.case_consistency_validator import CaseConsistencyValidator +from tdpservice.parsers.util import log_parser_exception +from tdpservice.search_indexes.models.reparse_meta import ReparseMeta logger = logging.getLogger(__name__) @@ -33,12 +34,17 @@ def parse_datafile(datafile, dfs): logger.info(f"Preparser Error: {len(header_errors)} header errors encountered.") errors['header'] = header_errors bulk_create_errors({1: header_errors}, 1, flush=True) + update_meta_model(datafile, dfs) return errors + elif header_is_valid and len(header_errors) > 0: + logger.info(f"Preparser Warning: {len(header_errors)} header warnings encountered.") + errors['header'] = header_errors + bulk_create_errors({1: header_errors}, 1, flush=True) field_values = schema_defs.header.get_field_values_by_names(header_line, {"encryption", "tribe_code", "state_fips"}) is_encrypted = field_values["encryption"] == "E" - is_tribal = not validators.value_is_empty(field_values["tribe_code"], 3, extra_vals={'0'*3}) + is_tribal = not value_is_empty(field_values["tribe_code"], 3, extra_vals={'0'*3}) logger.debug(f"Datafile has encrypted fields: {is_encrypted}.") logger.debug(f"Datafile: {datafile.__repr__()}, is Tribal: {is_tribal}.") @@ -57,20 +63,23 @@ def parse_datafile(datafile, dfs): # Validate tribe code in submission across program type and fips code generate_error = util.make_generate_parser_error(datafile, 1) - tribe_is_valid, tribe_error = validators.validate_tribe_fips_program_agree(header['program_type'], - field_values["tribe_code"], - field_values["state_fips"], - generate_error) + tribe_is_valid, tribe_error = category1.validate_tribe_fips_program_agree( + header['program_type'], + field_values["tribe_code"], + field_values["state_fips"], + generate_error + ) if not tribe_is_valid: logger.info(f"Tribe Code ({field_values['tribe_code']}) inconsistency with Program Type " + f"({header['program_type']}) and FIPS Code ({field_values['state_fips']}).",) errors['header'] = [tribe_error] bulk_create_errors({1: [tribe_error]}, 1, flush=True) + update_meta_model(datafile, dfs) return errors # Ensure file section matches upload section - section_is_valid, section_error = validators.validate_header_section_matches_submission( + section_is_valid, section_error = category1.validate_header_section_matches_submission( datafile, get_section_reference(program_type, section), util.make_generate_parser_error(datafile, 1) @@ -81,9 +90,10 @@ def parse_datafile(datafile, dfs): errors['document'] = [section_error] unsaved_parser_errors = {1: [section_error]} bulk_create_errors(unsaved_parser_errors, 1, flush=True) + update_meta_model(datafile, dfs) return errors - rpt_month_year_is_valid, rpt_month_year_error = validators.validate_header_rpt_month_year( + rpt_month_year_is_valid, rpt_month_year_error = category1.validate_header_rpt_month_year( datafile, header, util.make_generate_parser_error(datafile, 1) @@ -93,6 +103,7 @@ def parse_datafile(datafile, dfs): errors['document'] = [rpt_month_year_error] unsaved_parser_errors = {1: [rpt_month_year_error]} bulk_create_errors(unsaved_parser_errors, 1, flush=True) + update_meta_model(datafile, dfs) return errors line_errors = parse_datafile_lines(datafile, dfs, program_type, section, is_encrypted, case_consistency_validator) @@ -101,6 +112,11 @@ def parse_datafile(datafile, dfs): return errors +def update_meta_model(datafile, dfs): + """Update appropriate meta models.""" + ReparseMeta.increment_records_created(datafile.reparse_meta_models, dfs.total_number_of_records_created) + ReparseMeta.increment_files_completed(datafile.reparse_meta_models) + def bulk_create_records(unsaved_records, line_number, header_count, datafile, dfs, flush=False): """Bulk create passed in records.""" batch_size = settings.BULK_CREATE_BATCH_SIZE @@ -116,18 +132,22 @@ def bulk_create_records(unsaved_records, line_number, header_count, datafile, df num_db_records_created += len(created_objs) num_elastic_records_created += document.update(created_objs)[0] except ElasticsearchException as e: - logger.error(f"Encountered error while indexing datafile documents: {e}") - LogEntry.objects.log_action( - user_id=datafile.user.pk, - content_type_id=ContentType.objects.get_for_model(DataFile).pk, - object_id=datafile, - object_repr=f"Datafile id: {datafile.pk}; year: {datafile.year}, quarter: {datafile.quarter}", - action_flag=ADDITION, - change_message=f"Encountered error while indexing datafile documents: {e}", - ) + log_parser_exception(datafile, + f"Encountered error while indexing datafile documents: \n{e}", + "error" + ) continue + except DatabaseError as e: + log_parser_exception(datafile, + f"Encountered error while creating database records: \n{e}", + "error" + ) + return False except Exception as e: - logger.error(f"Encountered error while creating datafile records: {e}") + log_parser_exception(datafile, + f"Encountered generic exception while creating database records: \n{e}", + "error" + ) return False dfs.total_number_of_records_created += num_db_records_created @@ -187,30 +207,28 @@ def rollback_records(unsaved_records, datafile): except ElasticsearchException as e: # Caught an Elastic exception, to ensure the quality of the DB, we will force the DB deletion and let # Elastic clean up later. - logger.error("Encountered an Elastic exception, enforcing DB cleanup.") - logger.error(f"Elastic Error: {e}") - LogEntry.objects.log_action( - user_id=datafile.user.pk, - content_type_id=ContentType.objects.get_for_model(DataFile).pk, - object_id=datafile, - object_repr=f"Datafile id: {datafile.pk}; year: {datafile.year}, quarter: {datafile.quarter}", - action_flag=ADDITION, - change_message=f"Encountered error while indexing datafile documents: {e}", - ) + log_parser_exception(datafile, + f"Encountered error while indexing datafile documents: \n{e}", + "error" + ) + logger.warn("Encountered an Elastic exception, enforcing DB cleanup.") num_deleted, models = qset.delete() logger.info("Succesfully performed DB cleanup after elastic failure.") + log_parser_exception(datafile, + "Succesfully performed DB cleanup after elastic failure.", + "info" + ) + except DatabaseError as e: + log_parser_exception(datafile, + (f"Encountered error while deleting database records for model: {model}. " + f"Exception: \n{e}"), + "error" + ) except Exception as e: - logging.critical(f"Encountered error while deleting records of type {model}. NO RECORDS DELETED! " - f"Error message: {e}") - LogEntry.objects.log_action( - user_id=datafile.user.pk, - content_type_id=ContentType.objects.get_for_model(DataFile).pk, - object_id=datafile, - object_repr=f"Datafile id: {datafile.pk}; year: {datafile.year}, quarter: {datafile.quarter}", - action_flag=ADDITION, - change_message=f"Encountered error while deleting records of type {model}. NO RECORDS DELETED! " - f"Error message: {e}" - ) + log_parser_exception(datafile, + f"Encountered generic exception while trying to rollback records. Exception: \n{e}", + "error" + ) def rollback_parser_errors(datafile): """Delete created errors in the event of a failure.""" @@ -220,9 +238,18 @@ def rollback_parser_errors(datafile): # WARNING: we can use `_raw_delete` in this case because our error models don't have cascading dependencies. If # that ever changes, we should NOT use `_raw_delete`. num_deleted = qset._raw_delete(qset.db) - logger.debug(f"Deleted {num_deleted} {ParserError}.") + logger.debug(f"Deleted {num_deleted} ParserErrors.") + except DatabaseError as e: + log_parser_exception(datafile, + ("Encountered error while deleting database records for ParserErrors. " + f"Exception: \n{e}"), + "error" + ) except Exception as e: - logging.error(f"Encountered error while deleting records of type {ParserError}. Error message: {e}") + log_parser_exception(datafile, + f"Encountered generic exception while rolling back ParserErrors. Exception: \n{e}.", + "error" + ) def validate_case_consistency(case_consistency_validator): """Force category four validation if we have reached the last case in the file.""" @@ -274,34 +301,30 @@ def delete_serialized_records(duplicate_manager, dfs): except ElasticsearchException as e: # Caught an Elastic exception, to ensure the quality of the DB, we will force the DB deletion and let # Elastic clean up later. - logger.error("Encountered an Elastic exception, enforcing DB cleanup.") - logger.error(f"Elastic Error: {e}") - datafile = dfs.datafile - LogEntry.objects.log_action( - user_id=datafile.user.pk, - content_type_id=ContentType.objects.get_for_model(DataFile).pk, - object_id=datafile, - object_repr=f"Datafile id: {datafile.pk}; year: {datafile.year}, quarter: {datafile.quarter}", - action_flag=ADDITION, - change_message=f"Encountered error while indexing datafile documents: {e}", - ) + log_parser_exception(dfs.datafile, + ("Encountered error while indexing datafile documents. Enforcing DB cleanup. " + f"Exception: \n{e}"), + "error" + ) num_deleted, models = qset.delete() total_deleted += num_deleted dfs.total_number_of_records_created -= num_deleted - logger.info("Succesfully performed DB cleanup after elastic failure.") + log_parser_exception(dfs.datafile, + "Succesfully performed DB cleanup after elastic failure.", + "info" + ) + except DatabaseError as e: + log_parser_exception(dfs.datafile, + (f"Encountered error while deleting database records for model {model}. " + f"Exception: \n{e}"), + "error" + ) except Exception as e: - logging.critical(f"Encountered error while deleting records of type {model}. NO RECORDS DELETED! " - f"Error message: {e}") - datafile = dfs.datafile - LogEntry.objects.log_action( - user_id=datafile.user.pk, - content_type_id=ContentType.objects.get_for_model(DataFile).pk, - object_id=datafile, - object_repr=f"Datafile id: {datafile.pk}; year: {datafile.year}, quarter: {datafile.quarter}", - action_flag=ADDITION, - change_message=f"Encountered error while deleting records of type {model}. NO RECORDS DELETED! " - f"Error message: {e}" - ) + log_parser_exception(dfs.datafile, + (f"Encountered generic exception while deleting records of type {model}. " + f"Exception: \n{e}"), + "error" + ) if total_deleted: logger.info(f"Deleted a total of {total_deleted} records from the DB because of case consistenecy errors.") @@ -363,6 +386,7 @@ def parse_datafile_lines(datafile, dfs, program_type, section, is_encrypted, cas rollback_records(unsaved_records.get_bulk_create_struct(), datafile) rollback_parser_errors(datafile) bulk_create_errors(preparse_error, num_errors, flush=True) + update_meta_model(datafile, dfs) return errors if prev_sum != header_count + trailer_count: @@ -425,6 +449,7 @@ def parse_datafile_lines(datafile, dfs, program_type, section, is_encrypted, cas rollback_parser_errors(datafile) preparse_error = {line_number: [err_obj]} bulk_create_errors(preparse_error, num_errors, flush=True) + update_meta_model(datafile, dfs) return errors should_remove = validate_case_consistency(case_consistency_validator) @@ -445,6 +470,7 @@ def parse_datafile_lines(datafile, dfs, program_type, section, is_encrypted, cas logger.error(f"Not all parsed records created for file: {datafile.id}!") rollback_records(unsaved_records.get_bulk_create_struct(), datafile) bulk_create_errors(unsaved_parser_errors, num_errors, flush=True) + update_meta_model(datafile, dfs) return errors # Add any generated cat4 errors to our error data structure & clear our caches errors list @@ -461,6 +487,8 @@ def parse_datafile_lines(datafile, dfs, program_type, section, is_encrypted, cas f"validated {case_consistency_validator.total_cases_validated} of them.") dfs.save() + update_meta_model(datafile, dfs) + return errors @@ -473,8 +501,7 @@ def manager_parse_line(line, schema_manager, generate_error, datafile, is_encryp schema_manager.update_encrypted_fields(is_encrypted) records = schema_manager.parse_and_validate(line, generate_error) return records - except AttributeError as e: - logger.error(e) + except AttributeError: return [(None, False, [ generate_error( schema=None, diff --git a/tdrs-backend/tdpservice/parsers/row_schema.py b/tdrs-backend/tdpservice/parsers/row_schema.py index 7dd01556f..7c93298f1 100644 --- a/tdrs-backend/tdpservice/parsers/row_schema.py +++ b/tdrs-backend/tdpservice/parsers/row_schema.py @@ -1,7 +1,9 @@ """Row schema for datafile.""" from .models import ParserErrorCategoryChoices from .fields import Field, TransformField -from .validators import value_is_empty, format_error_context, ValidationErrorArgs +from .validators.util import value_is_empty, ValidationErrorArgs +from .validators.category2 import format_error_context +from .util import get_record_value_by_field_name import logging logger = logging.getLogger(__name__) @@ -59,7 +61,7 @@ def parse_and_validate(self, line, generate_error): ) is_quiet_preparser_errors = ( self.quiet_preparser_errors - if type(self.quiet_preparser_errors) == bool + if type(self.quiet_preparser_errors) is bool else self.quiet_preparser_errors(line) ) if not preparsing_is_valid: @@ -87,13 +89,21 @@ def run_preparsing_validators(self, line, generate_error): is_valid = True errors = [] + field = self.get_field_by_name('RecordType') + for validator in self.preparsing_validators: - validator_is_valid, validator_error = validator(line, self, "record type", "0") + eargs = ValidationErrorArgs( + value=line, + row_schema=self, + friendly_name=field.friendly_name if field else 'record type', + item_num=field.item if field else '0', + ) + validator_is_valid, validator_error = validator(line, eargs) is_valid = False if not validator_is_valid else is_valid is_quiet_preparser_errors = ( self.quiet_preparser_errors - if type(self.quiet_preparser_errors) == bool + if type(self.quiet_preparser_errors) is bool else self.quiet_preparser_errors(line) ) if validator_error and not is_quiet_preparser_errors: @@ -130,18 +140,20 @@ def run_field_validators(self, instance, generate_error): errors = [] for field in self.fields: - value = None - if isinstance(instance, dict): - value = instance.get(field.name, None) - else: - value = getattr(instance, field.name, None) + value = get_record_value_by_field_name(instance, field.name) + eargs = ValidationErrorArgs( + value=value, + row_schema=self, + friendly_name=field.friendly_name, + item_num=field.item, + ) is_empty = value_is_empty(value, field.endIndex-field.startIndex) should_validate = not field.required and not is_empty if (field.required and not is_empty) or should_validate: for validator in field.validators: - validator_is_valid, validator_error = validator(value, self, field.friendly_name, field.item) - is_valid = False if not validator_is_valid else is_valid + validator_is_valid, validator_error = validator(value, eargs) + is_valid = False if (not validator_is_valid and not field.ignore_errors) else is_valid if validator_error: errors.append( generate_error( @@ -154,14 +166,6 @@ def run_field_validators(self, instance, generate_error): ) elif field.required: is_valid = False - eargs = ValidationErrorArgs( - value=value, - row_schema=self, - friendly_name=field.friendly_name, - item_num=field.item, - error_context_format='prefix' - ) - errors.append( generate_error( schema=self, diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/header.py b/tdrs-backend/tdpservice/parsers/schema_defs/header.py index 67475fd5f..d0bb033f2 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/header.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/header.py @@ -3,16 +3,17 @@ from ..fields import Field from ..row_schema import RowSchema -from .. import validators +from tdpservice.parsers.validators import category1, category2 header = RowSchema( record_type="HEADER", document=None, preparsing_validators=[ - validators.recordHasLength(23), - validators.startsWith("HEADER", - lambda value: f"Your file does not begin with a {value} record."), + category1.recordHasLength(23), + category1.recordStartsWith( + "HEADER", lambda _: "Your file does not begin with a HEADER record." + ), ], postparsing_validators=[], fields=[ @@ -25,7 +26,7 @@ endIndex=6, required=True, validators=[ - validators.matches("HEADER"), + category2.isEqual("HEADER"), ], ), Field( @@ -36,7 +37,7 @@ startIndex=6, endIndex=10, required=True, - validators=[validators.isInLimits(2000, 2099)], + validators=[category2.isBetween(2000, 2099, inclusive=True)], ), Field( item="5", @@ -46,7 +47,7 @@ startIndex=10, endIndex=11, required=True, - validators=[validators.oneOf(["1", "2", "3", "4"])], + validators=[category2.isOneOf(["1", "2", "3", "4"])], ), Field( item="6", @@ -56,7 +57,7 @@ startIndex=11, endIndex=12, required=True, - validators=[validators.oneOf(["A", "C", "G", "S"])], + validators=[category2.isOneOf(["A", "C", "G", "S"])], ), Field( item="1", @@ -67,11 +68,13 @@ endIndex=14, required=False, validators=[ - validators.oneOf(["00", "01", "02", "04", "05", "06", "08", "09", "10", "11", "12", "13", - "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", - "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", - "37", "38", "39", "40", "41", "42", "44", "45", "46", "47", "48", - "49", "50", "51", "53", "54", "55", "56", "66", "72", "78"]), + category2.isOneOf([ + "00", "01", "02", "04", "05", "06", "08", "09", "10", "11", "12", "13", + "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", + "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", + "37", "38", "39", "40", "41", "42", "44", "45", "46", "47", "48", + "49", "50", "51", "53", "54", "55", "56", "66", "72", "78" + ]), ], ), Field( @@ -82,7 +85,7 @@ startIndex=14, endIndex=17, required=False, - validators=[validators.isInStringRange(0, 999)], + validators=[category2.isBetween(0, 999, inclusive=True, cast=int)], ), Field( item="7", @@ -92,7 +95,7 @@ startIndex=17, endIndex=20, required=True, - validators=[validators.oneOf(["TAN", "SSP"])], + validators=[category2.isOneOf(["TAN", "SSP"])], ), Field( item="8", @@ -102,7 +105,7 @@ startIndex=20, endIndex=21, required=True, - validators=[validators.oneOf(["1", "2"])], + validators=[category2.isOneOf(["1", "2"])], ), Field( item="9", @@ -112,7 +115,7 @@ startIndex=21, endIndex=22, required=False, - validators=[validators.oneOf([" ", "E"])], + validators=[category2.isOneOf([" ", "E"])], ), Field( item="10", @@ -122,11 +125,8 @@ startIndex=22, endIndex=23, required=True, - validators=[validators.matches("D", - error_func=lambda eargs: ("HEADER Update Indicator must be set to D " - f"instead of {eargs.value}. Please review " - "Exporting Complete Data Using FTANF in the " - "Knowledge Center."))], + validators=[category2.validateHeaderUpdateIndicator()], + ignore_errors=True, ), ], ) diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m1.py b/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m1.py index 48e4e7dac..81b406532 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m1.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m1.py @@ -3,7 +3,7 @@ from tdpservice.parsers.transforms import zero_pad from tdpservice.parsers.fields import Field, TransformField from tdpservice.parsers.row_schema import RowSchema, SchemaManager -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2, category3 from tdpservice.search_indexes.documents.ssp import SSP_M1DataSubmissionDocument from tdpservice.parsers.util import generate_t1_t4_hashes, get_t1_t4_partial_hash_members @@ -15,87 +15,87 @@ generate_hashes_func=generate_t1_t4_hashes, get_partial_hash_members_func=get_t1_t4_partial_hash_members, preparsing_validators=[ - validators.recordHasLengthBetween(113, 150), - validators.caseNumberNotEmpty(8, 19), - validators.or_priority_validators([ - validators.field_year_month_with_header_year_quarter(), - validators.validateRptMonthYear(), + category1.recordHasLengthBetween(113, 150), + category1.caseNumberNotEmpty(8, 19), + category1.or_priority_validators([ + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.validateRptMonthYear(), ]), ], postparsing_validators=[ - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='CASH_AMOUNT', - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name='NBR_MONTHS', - result_function=validators.isLargerThan(0), + result_function=category3.isGreaterThan(0), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='CC_AMOUNT', - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name='CHILDREN_COVERED', - result_function=validators.isLargerThan(0), + result_function=category3.isGreaterThan(0), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='CC_AMOUNT', - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name='CC_NBR_MONTHS', - result_function=validators.isLargerThan(0), + result_function=category3.isGreaterThan(0), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='TRANSP_AMOUNT', - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name='TRANSP_NBR_MONTHS', - result_function=validators.isLargerThan(0), + result_function=category3.isGreaterThan(0), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='SANC_REDUCTION_AMT', - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name='WORK_REQ_SANCTION', - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='SANC_REDUCTION_AMT', - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name='SANC_TEEN_PARENT', - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='SANC_REDUCTION_AMT', - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name='NON_COOPERATION_CSE', - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='SANC_REDUCTION_AMT', - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name='FAILURE_TO_COMPLY', - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='SANC_REDUCTION_AMT', - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name='OTHER_SANCTION', - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='OTHER_TOTAL_REDUCTIONS', - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name='FAMILY_CAP', - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='OTHER_TOTAL_REDUCTIONS', - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name='REDUCTIONS_ON_RECEIPTS', - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='OTHER_TOTAL_REDUCTIONS', - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name='OTHER_NON_SANCTION', - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.sumIsLarger([ + category3.sumIsLarger([ "AMT_FOOD_STAMP_ASSISTANCE", "AMT_SUB_CC", "CASH_AMOUNT", @@ -122,8 +122,8 @@ endIndex=8, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid(), + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid(), ] ), Field( @@ -134,7 +134,7 @@ startIndex=8, endIndex=19, required=True, - validators=[validators.notEmpty()] + validators=[category2.isNotEmpty()] ), TransformField( zero_pad(3), @@ -145,7 +145,7 @@ startIndex=19, endIndex=22, required=True, - validators=[validators.isNumber()], + validators=[category2.isNumber()], ), Field( item="4", @@ -155,7 +155,7 @@ startIndex=22, endIndex=24, required=False, - validators=[validators.isInStringRange(0, 99),] + validators=[category2.isBetween(0, 99, inclusive=True, cast=int),] ), Field( item="6", @@ -165,7 +165,7 @@ startIndex=24, endIndex=29, required=True, - validators=[validators.isNumber(),] + validators=[category2.isNumber(),] ), Field( item="7", @@ -175,7 +175,7 @@ startIndex=29, endIndex=30, required=True, - validators=[validators.oneOf([1, 2]),] + validators=[category2.isOneOf([1, 2]),] ), Field( item="8", @@ -185,7 +185,7 @@ startIndex=30, endIndex=32, required=True, - validators=[validators.isInLimits(1, 99),] + validators=[category2.isBetween(1, 99, inclusive=True),] ), Field( item="9", @@ -195,7 +195,7 @@ startIndex=32, endIndex=33, required=True, - validators=[validators.isInLimits(1, 3),] + validators=[category2.isBetween(1, 3, inclusive=True),] ), Field( item="10", @@ -205,7 +205,7 @@ startIndex=33, endIndex=34, required=True, - validators=[validators.isInLimits(1, 3),] + validators=[category2.isBetween(1, 3, inclusive=True),] ), Field( item="11", @@ -215,7 +215,7 @@ startIndex=34, endIndex=35, required=True, - validators=[validators.isInLimits(1, 2),] + validators=[category2.isBetween(1, 2, inclusive=True),] ), Field( item="12", @@ -225,7 +225,7 @@ startIndex=35, endIndex=36, required=True, - validators=[validators.isInLimits(1, 2),] + validators=[category2.isBetween(1, 2, inclusive=True),] ), Field( item="13", @@ -235,7 +235,7 @@ startIndex=36, endIndex=37, required=False, - validators=[validators.isInLimits(0, 2),] + validators=[category2.isBetween(0, 2, inclusive=True),] ), Field( item="14", @@ -245,7 +245,7 @@ startIndex=37, endIndex=41, required=True, - validators=[validators.isLargerThanOrEqualTo(0),] + validators=[category2.isGreaterThan(0, inclusive=True),] ), Field( item="15", @@ -255,7 +255,7 @@ startIndex=41, endIndex=42, required=False, - validators=[validators.isInLimits(0, 2),] + validators=[category2.isBetween(0, 2, inclusive=True),] ), Field( item="16", @@ -265,7 +265,7 @@ startIndex=42, endIndex=46, required=True, - validators=[validators.isLargerThanOrEqualTo(0),] + validators=[category2.isGreaterThan(0, inclusive=True),] ), Field( item="17", @@ -275,7 +275,7 @@ startIndex=46, endIndex=50, required=True, - validators=[validators.isLargerThanOrEqualTo(0),] + validators=[category2.isGreaterThan(0, inclusive=True),] ), Field( item="18", @@ -285,7 +285,7 @@ startIndex=50, endIndex=54, required=True, - validators=[validators.isLargerThanOrEqualTo(0),] + validators=[category2.isGreaterThan(0, inclusive=True),] ), Field( item="19A", @@ -295,7 +295,7 @@ startIndex=54, endIndex=58, required=True, - validators=[validators.isLargerThanOrEqualTo(0),] + validators=[category2.isGreaterThan(0, inclusive=True),] ), Field( item="19B", @@ -305,7 +305,7 @@ startIndex=58, endIndex=61, required=True, - validators=[validators.isLargerThanOrEqualTo(0),] + validators=[category2.isGreaterThan(0, inclusive=True),] ), Field( item="20A", @@ -315,7 +315,7 @@ startIndex=61, endIndex=65, required=True, - validators=[validators.isLargerThanOrEqualTo(0),] + validators=[category2.isGreaterThan(0, inclusive=True),] ), Field( item="20B", @@ -325,7 +325,7 @@ startIndex=65, endIndex=67, required=True, - validators=[validators.isLargerThanOrEqualTo(0),] + validators=[category2.isGreaterThan(0, inclusive=True),] ), Field( item="20C", @@ -335,7 +335,7 @@ startIndex=67, endIndex=70, required=True, - validators=[validators.isLargerThanOrEqualTo(0),] + validators=[category2.isGreaterThan(0, inclusive=True),] ), Field( item="21A", @@ -345,7 +345,7 @@ startIndex=70, endIndex=74, required=True, - validators=[validators.isLargerThanOrEqualTo(0),] + validators=[category2.isGreaterThan(0, inclusive=True),] ), Field( item="21B", @@ -355,7 +355,7 @@ startIndex=74, endIndex=77, required=True, - validators=[validators.isLargerThanOrEqualTo(0),] + validators=[category2.isGreaterThan(0, inclusive=True),] ), Field( item="22A", @@ -365,7 +365,7 @@ startIndex=77, endIndex=81, required=False, - validators=[validators.isLargerThanOrEqualTo(0),] + validators=[category2.isGreaterThan(0, inclusive=True),] ), Field( item="22B", @@ -375,7 +375,7 @@ startIndex=81, endIndex=84, required=False, - validators=[validators.isLargerThanOrEqualTo(0),] + validators=[category2.isGreaterThan(0, inclusive=True),] ), Field( item="23A", @@ -385,7 +385,7 @@ startIndex=84, endIndex=88, required=False, - validators=[validators.isLargerThanOrEqualTo(0),] + validators=[category2.isGreaterThan(0, inclusive=True),] ), Field( item="23B", @@ -395,7 +395,7 @@ startIndex=88, endIndex=91, required=False, - validators=[validators.isLargerThanOrEqualTo(0),] + validators=[category2.isGreaterThan(0, inclusive=True),] ), Field( item="24AI", @@ -405,7 +405,7 @@ startIndex=91, endIndex=95, required=True, - validators=[validators.isLargerThanOrEqualTo(0),] + validators=[category2.isGreaterThan(0, inclusive=True),] ), Field( item="24AII", @@ -415,7 +415,7 @@ startIndex=95, endIndex=96, required=True, - validators=[validators.oneOf([1, 2]),] + validators=[category2.isOneOf([1, 2]),] ), Field( item="24AIII", @@ -425,7 +425,7 @@ startIndex=96, endIndex=97, required=False, - validators=[validators.isInLimits(0, 9),] + validators=[category2.isBetween(0, 9, inclusive=True),] ), Field( item="24AIV", @@ -435,7 +435,7 @@ startIndex=97, endIndex=98, required=True, - validators=[validators.oneOf([1, 2]),] + validators=[category2.isOneOf([1, 2]),] ), Field( item="24AV", @@ -445,7 +445,7 @@ startIndex=98, endIndex=99, required=True, - validators=[validators.oneOf([1, 2]),] + validators=[category2.isOneOf([1, 2]),] ), Field( item="24AVI", @@ -455,7 +455,7 @@ startIndex=99, endIndex=100, required=True, - validators=[validators.oneOf([1, 2]),] + validators=[category2.isOneOf([1, 2]),] ), Field( item="24AVII", @@ -465,7 +465,7 @@ startIndex=100, endIndex=101, required=True, - validators=[validators.oneOf([1, 2]),] + validators=[category2.isOneOf([1, 2]),] ), Field( item="24B", @@ -475,7 +475,7 @@ startIndex=101, endIndex=105, required=True, - validators=[validators.isLargerThanOrEqualTo(0),] + validators=[category2.isGreaterThan(0, inclusive=True),] ), Field( item="24CI", @@ -485,7 +485,7 @@ startIndex=105, endIndex=109, required=True, - validators=[validators.isLargerThanOrEqualTo(0),] + validators=[category2.isGreaterThan(0, inclusive=True),] ), Field( item="24CII", @@ -495,7 +495,7 @@ startIndex=109, endIndex=110, required=True, - validators=[validators.oneOf([1, 2]),] + validators=[category2.isOneOf([1, 2]),] ), Field( item="24CIII", @@ -505,7 +505,7 @@ startIndex=110, endIndex=111, required=True, - validators=[validators.oneOf([1, 2]),] + validators=[category2.isOneOf([1, 2]),] ), Field( item="24CIV", @@ -515,7 +515,7 @@ startIndex=111, endIndex=112, required=True, - validators=[validators.oneOf([1, 2]),] + validators=[category2.isOneOf([1, 2]),] ), Field( item="25", @@ -525,7 +525,7 @@ startIndex=112, endIndex=113, required=False, - validators=[validators.isInLimits(0, 9),] + validators=[category2.isBetween(0, 9, inclusive=True),] ), Field( item="-1", diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m2.py b/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m2.py index 0f9f408c4..82d5c2c46 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m2.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m2.py @@ -4,7 +4,7 @@ from tdpservice.parsers.transforms import ssp_ssn_decryption_func from tdpservice.parsers.fields import TransformField, Field from tdpservice.parsers.row_schema import RowSchema, SchemaManager -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2, category3 from tdpservice.search_indexes.documents.ssp import SSP_M2DataSubmissionDocument from tdpservice.parsers.util import generate_t2_t3_t5_hashes, get_t2_t3_t5_partial_hash_members @@ -18,116 +18,116 @@ should_skip_partial_dup_func=lambda record: record.FAMILY_AFFILIATION in {3, 5}, get_partial_hash_members_func=get_t2_t3_t5_partial_hash_members, preparsing_validators=[ - validators.recordHasLength(150), - validators.caseNumberNotEmpty(8, 19), - validators.or_priority_validators([ - validators.field_year_month_with_header_year_quarter(), - validators.validateRptMonthYear(), + category1.recordHasLength(150), + category1.caseNumberNotEmpty(8, 19), + category1.or_priority_validators([ + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.validateRptMonthYear(), ]), ], postparsing_validators=[ - validators.validate__FAM_AFF__SSN(), - validators.if_then_validator( + category3.validate__FAM_AFF__SSN(), + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name='SSN', - result_function=validators.validateSSN(), + result_function=category3.validateSSN(), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name='RACE_HISPANIC', - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name='RACE_AMER_INDIAN', - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name='RACE_ASIAN', - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name='RACE_BLACK', - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name='RACE_HAWAIIAN', - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name='RACE_WHITE', - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name='MARITAL_STATUS', - result_function=validators.isInLimits(1, 5), + result_function=category3.isBetween(1, 5, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.isInLimits(1, 2), + condition_function=category3.isBetween(1, 2, inclusive=True), result_field_name='PARENT_MINOR_CHILD', - result_function=validators.isInLimits(1, 3), + result_function=category3.isBetween(1, 3, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name='EDUCATION_LEVEL', - result_function=validators.or_validators( - validators.isInStringRange(1, 16), - validators.isInStringRange(98, 99), - ), + result_function=category3.orValidators([ + category3.isBetween(1, 16, inclusive=True, cast=int), + category3.isBetween(98, 99, inclusive=True, cast=int), + ], if_result=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name='CITIZENSHIP_STATUS', - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name='COOPERATION_CHILD_SUPPORT', - result_function=validators.oneOf((1, 2, 9)), + result_function=category3.isOneOf((1, 2, 9)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name='EMPLOYMENT_STATUS', - result_function=validators.isInLimits(1, 3), + result_function=category3.isBetween(1, 3, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name='WORK_ELIGIBLE_INDICATOR', - result_function=validators.or_validators( - validators.isInLimits(1, 9), - validators.oneOf((11, 12)) - ), + result_function=category3.orValidators([ + category3.isBetween(1, 9, inclusive=True), + category3.isOneOf((11, 12)) + ], if_result=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name='WORK_PART_STATUS', - result_function=validators.oneOf([1, 2, 5, 7, 9, 15, 16, 17, 18, 99]), + result_function=category3.isOneOf([1, 2, 5, 7, 9, 15, 16, 17, 18, 99]), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='WORK_ELIGIBLE_INDICATOR', - condition_function=validators.isInLimits(1, 5), + condition_function=category3.isBetween(1, 5, inclusive=True), result_field_name='WORK_PART_STATUS', - result_function=validators.notMatches(99), + result_function=category3.isNotEqual(99), ), ], fields=[ @@ -150,8 +150,8 @@ endIndex=8, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid(), + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid(), ] ), Field( @@ -162,7 +162,7 @@ startIndex=8, endIndex=19, required=True, - validators=[validators.notEmpty()] + validators=[category2.isNotEmpty()] ), Field( item="26", @@ -172,7 +172,7 @@ startIndex=19, endIndex=20, required=True, - validators=[validators.oneOf([1, 2, 3, 5])] + validators=[category2.isOneOf([1, 2, 3, 5])] ), Field( item="27", @@ -182,7 +182,7 @@ startIndex=20, endIndex=21, required=True, - validators=[validators.oneOf([1, 2])] + validators=[category2.isOneOf([1, 2])] ), Field( item="28", @@ -192,10 +192,10 @@ startIndex=21, endIndex=29, required=True, - validators=[validators.intHasLength(8), - validators.dateYearIsLargerThan(1900), - validators.dateMonthIsValid(), - validators.dateDayIsValid()] + validators=[category2.intHasLength(8), + category2.dateYearIsLargerThan(1900), + category2.dateMonthIsValid(), + category2.dateDayIsValid()] ), TransformField( transform_func=ssp_ssn_decryption_func, @@ -206,7 +206,7 @@ startIndex=29, endIndex=38, required=True, - validators=[validators.isNumber()], + validators=[category2.isNumber()], is_encrypted=False ), Field( @@ -217,7 +217,7 @@ startIndex=38, endIndex=39, required=False, - validators=[validators.isInLimits(0, 2)] + validators=[category2.isBetween(0, 2, inclusive=True)] ), Field( item="30B", @@ -227,7 +227,7 @@ startIndex=39, endIndex=40, required=False, - validators=[validators.isInLimits(0, 2)] + validators=[category2.isBetween(0, 2, inclusive=True)] ), Field( item="30C", @@ -237,7 +237,7 @@ startIndex=40, endIndex=41, required=False, - validators=[validators.isInLimits(0, 2)] + validators=[category2.isBetween(0, 2, inclusive=True)] ), Field( item="30D", @@ -247,7 +247,7 @@ startIndex=41, endIndex=42, required=False, - validators=[validators.isInLimits(0, 2)] + validators=[category2.isBetween(0, 2, inclusive=True)] ), Field( item="30E", @@ -257,7 +257,7 @@ startIndex=42, endIndex=43, required=False, - validators=[validators.isInLimits(0, 2)] + validators=[category2.isBetween(0, 2, inclusive=True)] ), Field( item="30F", @@ -267,7 +267,7 @@ startIndex=43, endIndex=44, required=False, - validators=[validators.isInLimits(0, 2)] + validators=[category2.isBetween(0, 2, inclusive=True)] ), Field( item="31", @@ -277,7 +277,7 @@ startIndex=44, endIndex=45, required=True, - validators=[validators.isLargerThanOrEqualTo(0)] + validators=[category2.isGreaterThan(0, inclusive=True)] ), Field( item="32A", @@ -287,7 +287,7 @@ startIndex=45, endIndex=46, required=True, - validators=[validators.oneOf([1, 2])] + validators=[category2.isOneOf([1, 2])] ), Field( item="32B", @@ -297,7 +297,7 @@ startIndex=46, endIndex=47, required=True, - validators=[validators.oneOf([1, 2])] + validators=[category2.isOneOf([1, 2])] ), Field( item="32C", @@ -307,7 +307,7 @@ startIndex=47, endIndex=48, required=True, - validators=[validators.oneOf([1, 2])] + validators=[category2.isOneOf([1, 2])] ), Field( item="32D", @@ -317,7 +317,7 @@ startIndex=48, endIndex=49, required=False, - validators=[validators.isLargerThanOrEqualTo(0)] + validators=[category2.isGreaterThan(0)] ), Field( item="32E", @@ -327,7 +327,7 @@ startIndex=49, endIndex=50, required=True, - validators=[validators.oneOf([1, 2])] + validators=[category2.isOneOf([1, 2])] ), Field( item="33", @@ -337,7 +337,7 @@ startIndex=50, endIndex=51, required=False, - validators=[validators.isInLimits(0, 5)] + validators=[category2.isBetween(0, 5, inclusive=True)] ), Field( item="34", @@ -347,7 +347,7 @@ startIndex=51, endIndex=53, required=True, - validators=[validators.isInStringRange(1, 10)] + validators=[category2.isBetween(1, 10, inclusive=True, cast=int)] ), Field( item="35", @@ -357,7 +357,7 @@ startIndex=53, endIndex=54, required=False, - validators=[validators.isInLimits(0, 3)] + validators=[category2.isBetween(0, 3, inclusive=True)] ), Field( item="36", @@ -367,7 +367,7 @@ startIndex=54, endIndex=55, required=False, - validators=[validators.isInLimits(0, 9)] + validators=[category2.isBetween(0, 9, inclusive=True)] ), Field( item="37", @@ -378,9 +378,10 @@ endIndex=57, required=False, validators=[ - validators.or_validators( - validators.isInStringRange(1, 16), validators.isInStringRange(98, 99) - ), + category3.orValidators([ + category3.isBetween(1, 16, inclusive=True, cast=int), + category3.isBetween(98, 99, inclusive=True, cast=int) + ]), ] ), Field( @@ -391,7 +392,7 @@ startIndex=57, endIndex=58, required=False, - validators=[validators.oneOf([1, 2, 3, 9])] + validators=[category2.isOneOf([1, 2, 3, 9])] ), Field( item="39", @@ -401,7 +402,7 @@ startIndex=58, endIndex=59, required=False, - validators=[validators.oneOf([1, 2, 9])] + validators=[category2.isOneOf([1, 2, 9])] ), Field( item="40", @@ -411,7 +412,7 @@ startIndex=59, endIndex=60, required=False, - validators=[validators.isInLimits(0, 3)] + validators=[category2.isBetween(0, 3, inclusive=True)] ), Field( item="41", @@ -422,11 +423,11 @@ endIndex=62, required=True, validators=[ - validators.or_validators( - validators.isInLimits(1, 4), - validators.isInLimits(6, 9), - validators.isInLimits(11, 12), - ) + category3.orValidators([ + category3.isBetween(1, 4, inclusive=True), + category3.isBetween(6, 9, inclusive=True), + category3.isBetween(11, 12, inclusive=True), + ]) ] ), Field( @@ -437,7 +438,7 @@ startIndex=62, endIndex=64, required=False, - validators=[validators.oneOf([1, 2, 5, 7, 9, 15, 16, 17, 18, 19, 99])] + validators=[category2.isOneOf([1, 2, 5, 7, 9, 15, 16, 17, 18, 19, 99])] ), Field( item="43", @@ -447,7 +448,7 @@ startIndex=64, endIndex=66, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="44", @@ -457,7 +458,7 @@ startIndex=66, endIndex=68, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="45", @@ -467,7 +468,7 @@ startIndex=68, endIndex=70, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="46A", @@ -477,7 +478,7 @@ startIndex=70, endIndex=72, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="46B", @@ -487,7 +488,7 @@ startIndex=72, endIndex=74, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="46C", @@ -497,7 +498,7 @@ startIndex=74, endIndex=76, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="47", @@ -507,7 +508,7 @@ startIndex=76, endIndex=78, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="48A", @@ -517,7 +518,7 @@ startIndex=78, endIndex=80, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="48B", @@ -527,7 +528,7 @@ startIndex=80, endIndex=82, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="48C", @@ -537,7 +538,7 @@ startIndex=82, endIndex=84, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="49A", @@ -547,7 +548,7 @@ startIndex=84, endIndex=86, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="49B", @@ -557,7 +558,7 @@ startIndex=86, endIndex=88, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="49C", @@ -567,7 +568,7 @@ startIndex=88, endIndex=90, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="50A", @@ -577,7 +578,7 @@ startIndex=90, endIndex=92, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="50B", @@ -587,7 +588,7 @@ startIndex=92, endIndex=94, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="50C", @@ -597,7 +598,7 @@ startIndex=94, endIndex=96, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="51A", @@ -607,7 +608,7 @@ startIndex=96, endIndex=98, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="51B", @@ -617,7 +618,7 @@ startIndex=98, endIndex=100, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="51C", @@ -627,7 +628,7 @@ startIndex=100, endIndex=102, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="52A", @@ -638,7 +639,7 @@ startIndex=102, endIndex=104, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="52B", @@ -649,7 +650,7 @@ startIndex=104, endIndex=106, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="52C", @@ -660,7 +661,7 @@ startIndex=106, endIndex=108, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="53A", @@ -671,7 +672,7 @@ startIndex=108, endIndex=110, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="53B", @@ -682,7 +683,7 @@ startIndex=110, endIndex=112, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="53C", @@ -693,7 +694,7 @@ startIndex=112, endIndex=114, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="54A", @@ -704,7 +705,7 @@ startIndex=114, endIndex=116, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="54B", @@ -715,7 +716,7 @@ startIndex=116, endIndex=118, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="54C", @@ -726,7 +727,7 @@ startIndex=118, endIndex=120, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="55", @@ -736,7 +737,7 @@ startIndex=120, endIndex=122, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="56", @@ -746,7 +747,7 @@ startIndex=122, endIndex=124, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="57", @@ -756,7 +757,7 @@ startIndex=124, endIndex=126, required=False, - validators=[validators.isInLimits(0, 99)] + validators=[category2.isBetween(0, 99, inclusive=True)] ), Field( item="58", @@ -766,7 +767,7 @@ startIndex=126, endIndex=130, required=True, - validators=[validators.isInLimits(0, 9999)] + validators=[category2.isBetween(0, 9999, inclusive=True)] ), Field( item="59A", @@ -776,7 +777,7 @@ startIndex=130, endIndex=134, required=False, - validators=[validators.isInLimits(0, 9999)] + validators=[category2.isBetween(0, 9999, inclusive=True)] ), Field( item="59B", @@ -786,7 +787,7 @@ startIndex=134, endIndex=138, required=True, - validators=[validators.isInLimits(0, 9999)] + validators=[category2.isBetween(0, 9999, inclusive=True)] ), Field( item="59C", @@ -796,7 +797,7 @@ startIndex=138, endIndex=142, required=True, - validators=[validators.isInLimits(0, 9999)] + validators=[category2.isBetween(0, 9999, inclusive=True)] ), Field( item="59D", @@ -806,7 +807,7 @@ startIndex=142, endIndex=146, required=True, - validators=[validators.isInLimits(0, 9999)] + validators=[category2.isBetween(0, 9999, inclusive=True)] ), Field( item="59E", @@ -816,7 +817,7 @@ startIndex=146, endIndex=150, required=True, - validators=[validators.isInLimits(0, 9999)] + validators=[category2.isBetween(0, 9999, inclusive=True)] ), ], ) diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m3.py b/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m3.py index 9705145c9..6f44c551e 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m3.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m3.py @@ -4,7 +4,8 @@ from tdpservice.parsers.transforms import ssp_ssn_decryption_func from tdpservice.parsers.fields import TransformField, Field from tdpservice.parsers.row_schema import RowSchema, SchemaManager -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2, category3 +from tdpservice.parsers.validators.util import is_quiet_preparser_errors from tdpservice.search_indexes.documents.ssp import SSP_M3DataSubmissionDocument from tdpservice.parsers.util import generate_t2_t3_t5_hashes, get_t2_t3_t5_partial_hash_members @@ -18,86 +19,86 @@ should_skip_partial_dup_func=lambda record: record.FAMILY_AFFILIATION in {2, 4, 5}, get_partial_hash_members_func=get_t2_t3_t5_partial_hash_members, preparsing_validators=[ - validators.t3_m3_child_validator(FIRST_CHILD), - validators.caseNumberNotEmpty(8, 19), - validators.or_priority_validators([ - validators.field_year_month_with_header_year_quarter(), - validators.validateRptMonthYear(), - ]), - validators.notEmpty(8, 19) + category1.t3_m3_child_validator(FIRST_CHILD), + category1.caseNumberNotEmpty(8, 19), + category1.or_priority_validators([ + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.validateRptMonthYear(), + ]), + category1.recordIsNotEmpty(8, 19) ], postparsing_validators=[ - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name='SSN', - result_function=validators.validateSSN(), + result_function=category3.validateSSN(), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name='RACE_HISPANIC', - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name='RACE_AMER_INDIAN', - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name='RACE_ASIAN', - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name='RACE_BLACK', - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name='RACE_HAWAIIAN', - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name='RACE_WHITE', - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name='RELATIONSHIP_HOH', - result_function=validators.isInLimits(4, 9), + result_function=category3.isBetween(4, 9, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name='PARENT_MINOR_CHILD', - result_function=validators.oneOf((1, 2, 3)), + result_function=category3.isOneOf((1, 2, 3)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name='EDUCATION_LEVEL', - result_function=validators.notMatches(99), + result_function=category3.isNotEqual(99), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name='CITIZENSHIP_STATUS', - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.matches(2), + condition_function=category3.isEqual(2), result_field_name='CITIZENSHIP_STATUS', - result_function=validators.oneOf((1, 2, 3, 9)), + result_function=category3.isOneOf((1, 2, 3, 9)), ), ], fields=[ @@ -120,8 +121,8 @@ endIndex=8, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid(), + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid(), ] ), Field( @@ -132,7 +133,7 @@ startIndex=8, endIndex=19, required=True, - validators=[validators.notEmpty()] + validators=[category2.isNotEmpty()] ), Field( item="60", @@ -142,7 +143,7 @@ startIndex=19, endIndex=20, required=True, - validators=[validators.oneOf([1, 2, 4])] + validators=[category2.isOneOf([1, 2, 4])] ), Field( item="61", @@ -152,10 +153,10 @@ startIndex=20, endIndex=28, required=True, - validators=[validators.intHasLength(8), - validators.dateYearIsLargerThan(1900), - validators.dateMonthIsValid(), - validators.dateDayIsValid() + validators=[category2.intHasLength(8), + category2.dateYearIsLargerThan(1900), + category2.dateMonthIsValid(), + category2.dateDayIsValid() ] ), TransformField( @@ -168,7 +169,7 @@ endIndex=37, required=True, is_encrypted=False, - validators=[validators.isNumber()] + validators=[category2.isNumber()] ), Field( item="63A", @@ -178,7 +179,7 @@ startIndex=37, endIndex=38, required=False, - validators=[validators.isInLimits(0, 2)] + validators=[category2.isBetween(0, 2, inclusive=True)] ), Field( item="63B", @@ -188,7 +189,7 @@ startIndex=38, endIndex=39, required=False, - validators=[validators.isInLimits(0, 2)] + validators=[category2.isBetween(0, 2, inclusive=True)] ), Field( item="63C", @@ -198,7 +199,7 @@ startIndex=39, endIndex=40, required=False, - validators=[validators.isInLimits(0, 2)] + validators=[category2.isBetween(0, 2, inclusive=True)] ), Field( item="63D", @@ -208,7 +209,7 @@ startIndex=40, endIndex=41, required=False, - validators=[validators.isInLimits(0, 2)] + validators=[category2.isBetween(0, 2, inclusive=True)] ), Field( item="63E", @@ -218,7 +219,7 @@ startIndex=41, endIndex=42, required=False, - validators=[validators.isInLimits(0, 2)] + validators=[category2.isBetween(0, 2, inclusive=True)] ), Field( item="63F", @@ -228,7 +229,7 @@ startIndex=42, endIndex=43, required=False, - validators=[validators.isInLimits(0, 2)] + validators=[category2.isBetween(0, 2, inclusive=True)] ), Field( item="64", @@ -238,7 +239,7 @@ startIndex=43, endIndex=44, required=True, - validators=[validators.isInLimits(0, 9)] + validators=[category2.isBetween(0, 9, inclusive=True)] ), Field( item="65A", @@ -248,7 +249,7 @@ startIndex=44, endIndex=45, required=True, - validators=[validators.oneOf([1, 2])] + validators=[category2.isOneOf([1, 2])] ), Field( item="65B", @@ -258,7 +259,7 @@ startIndex=45, endIndex=46, required=True, - validators=[validators.oneOf([1, 2])] + validators=[category2.isOneOf([1, 2])] ), Field( item="66", @@ -268,7 +269,7 @@ startIndex=46, endIndex=48, required=False, - validators=[validators.isInStringRange(0, 10)] + validators=[category2.isBetween(0, 10, inclusive=True, cast=int)] ), Field( item="67", @@ -278,7 +279,7 @@ startIndex=48, endIndex=49, required=False, - validators=[validators.oneOf([0, 2, 3])] + validators=[category2.isOneOf([0, 2, 3])] ), Field( item="68", @@ -289,10 +290,10 @@ endIndex=51, required=True, validators=[ - validators.or_validators( - validators.isInStringRange(1, 16), - validators.isInStringRange(98, 99) - ), + category3.orValidators([ + category3.isBetween(1, 16, inclusive=True, cast=int), + category3.isBetween(98, 99, inclusive=True, cast=int) + ]), ] ), Field( @@ -303,7 +304,7 @@ startIndex=51, endIndex=52, required=False, - validators=[validators.oneOf([1, 2, 3, 9])] + validators=[category2.isOneOf([1, 2, 3, 9])] ), Field( item="70A", @@ -313,7 +314,7 @@ startIndex=52, endIndex=56, required=True, - validators=[validators.isInLimits(0, 9999)] + validators=[category2.isBetween(0, 9999, inclusive=True)] ), Field( item="70B", @@ -323,7 +324,7 @@ startIndex=56, endIndex=60, required=True, - validators=[validators.isInLimits(0, 9999)] + validators=[category2.isBetween(0, 9999, inclusive=True)] ) ] ) @@ -334,87 +335,87 @@ generate_hashes_func=generate_t2_t3_t5_hashes, should_skip_partial_dup_func=lambda record: record.FAMILY_AFFILIATION in {2, 4, 5}, get_partial_hash_members_func=get_t2_t3_t5_partial_hash_members, - quiet_preparser_errors=validators.is_quiet_preparser_errors(min_length=61), + quiet_preparser_errors=is_quiet_preparser_errors(min_length=61), preparsing_validators=[ - validators.t3_m3_child_validator(SECOND_CHILD), - validators.caseNumberNotEmpty(8, 19), - validators.or_priority_validators([ - validators.field_year_month_with_header_year_quarter(), - validators.validateRptMonthYear(), + category1.t3_m3_child_validator(SECOND_CHILD), + category1.caseNumberNotEmpty(8, 19), + category1.or_priority_validators([ + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.validateRptMonthYear(), ]), ], postparsing_validators=[ - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name='SSN', - result_function=validators.validateSSN(), + result_function=category3.validateSSN(), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name='RACE_HISPANIC', - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name='RACE_AMER_INDIAN', - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name='RACE_ASIAN', - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name='RACE_BLACK', - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name='RACE_HAWAIIAN', - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name='RACE_WHITE', - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name='RELATIONSHIP_HOH', - result_function=validators.isInStringRange(4, 9), + result_function=category3.isBetween(4, 9, inclusive=True, cast=int), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name='PARENT_MINOR_CHILD', - result_function=validators.oneOf((1, 2, 3)), + result_function=category3.isOneOf((1, 2, 3)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name='EDUCATION_LEVEL', - result_function=validators.notMatches(99), + result_function=category3.isNotEqual(99), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name='CITIZENSHIP_STATUS', - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name='FAMILY_AFFILIATION', - condition_function=validators.matches(2), + condition_function=category3.isEqual(2), result_field_name='CITIZENSHIP_STATUS', - result_function=validators.oneOf((1, 2, 3, 9)), + result_function=category3.isOneOf((1, 2, 3, 9)), ), ], fields=[ @@ -437,8 +438,8 @@ endIndex=8, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid(), + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid(), ] ), Field( @@ -449,7 +450,7 @@ startIndex=8, endIndex=19, required=True, - validators=[validators.notEmpty()] + validators=[category2.isNotEmpty()] ), Field( item="60", @@ -459,7 +460,7 @@ startIndex=60, endIndex=61, required=True, - validators=[validators.oneOf([1, 2, 4])] + validators=[category2.isOneOf([1, 2, 4])] ), Field( item="61", @@ -469,10 +470,10 @@ startIndex=61, endIndex=69, required=True, - validators=[validators.intHasLength(8), - validators.dateYearIsLargerThan(1900), - validators.dateMonthIsValid(), - validators.dateDayIsValid() + validators=[category2.intHasLength(8), + category2.dateYearIsLargerThan(1900), + category2.dateMonthIsValid(), + category2.dateDayIsValid() ] ), TransformField( @@ -485,7 +486,7 @@ endIndex=78, required=True, is_encrypted=False, - validators=[validators.isNumber()] + validators=[category2.isNumber()] ), Field( item="63A", @@ -495,7 +496,7 @@ startIndex=78, endIndex=79, required=False, - validators=[validators.isInLimits(0, 2)] + validators=[category2.isBetween(0, 2, inclusive=True)] ), Field( item="63B", @@ -505,7 +506,7 @@ startIndex=79, endIndex=80, required=False, - validators=[validators.isInLimits(0, 2)] + validators=[category2.isBetween(0, 2, inclusive=True)] ), Field( item="63C", @@ -515,7 +516,7 @@ startIndex=80, endIndex=81, required=False, - validators=[validators.isInLimits(0, 2)] + validators=[category2.isBetween(0, 2, inclusive=True)] ), Field( item="63D", @@ -525,7 +526,7 @@ startIndex=81, endIndex=82, required=False, - validators=[validators.isInLimits(0, 2)] + validators=[category2.isBetween(0, 2, inclusive=True)] ), Field( item="63E", @@ -535,7 +536,7 @@ startIndex=82, endIndex=83, required=False, - validators=[validators.isInLimits(0, 2)] + validators=[category2.isBetween(0, 2, inclusive=True)] ), Field( item="63F", @@ -545,7 +546,7 @@ startIndex=83, endIndex=84, required=False, - validators=[validators.isInLimits(0, 2)] + validators=[category2.isBetween(0, 2, inclusive=True)] ), Field( item="64", @@ -555,7 +556,7 @@ startIndex=84, endIndex=85, required=True, - validators=[validators.isInLimits(0, 9)] + validators=[category2.isBetween(0, 9, inclusive=True)] ), Field( item="65A", @@ -565,7 +566,7 @@ startIndex=85, endIndex=86, required=True, - validators=[validators.oneOf([1, 2])] + validators=[category2.isOneOf([1, 2])] ), Field( item="65B", @@ -575,7 +576,7 @@ startIndex=86, endIndex=87, required=True, - validators=[validators.oneOf([1, 2])] + validators=[category2.isOneOf([1, 2])] ), Field( item="66", @@ -585,7 +586,7 @@ startIndex=87, endIndex=89, required=False, - validators=[validators.isInLimits(0, 10)] + validators=[category2.isBetween(0, 10, inclusive=True)] ), Field( item="67", @@ -595,7 +596,7 @@ startIndex=89, endIndex=90, required=False, - validators=[validators.oneOf([0, 2, 3])] + validators=[category2.isOneOf([0, 2, 3])] ), Field( item="68", @@ -606,10 +607,10 @@ endIndex=92, required=True, validators=[ - validators.or_validators( - validators.isInStringRange(1, 16), - validators.isInStringRange(98, 99) - ) + category3.orValidators([ + category3.isBetween(1, 16, inclusive=True, cast=int), + category3.isBetween(98, 99, inclusive=True, cast=int) + ]) ] ), Field( @@ -620,7 +621,7 @@ startIndex=92, endIndex=93, required=False, - validators=[validators.oneOf([1, 2, 3, 9])] + validators=[category2.isOneOf([1, 2, 3, 9])] ), Field( item="70A", @@ -630,7 +631,7 @@ startIndex=93, endIndex=97, required=True, - validators=[validators.isInLimits(0, 9999)] + validators=[category2.isBetween(0, 9999, inclusive=True)] ), Field( item="70B", @@ -640,7 +641,7 @@ startIndex=97, endIndex=101, required=True, - validators=[validators.isInLimits(0, 9999)] + validators=[category2.isBetween(0, 9999, inclusive=True)] ) ] ) diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m4.py b/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m4.py index f35c26483..931e447eb 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m4.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m4.py @@ -3,7 +3,7 @@ from tdpservice.parsers.transforms import zero_pad from tdpservice.parsers.fields import Field, TransformField from tdpservice.parsers.row_schema import RowSchema, SchemaManager -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2, category3 from tdpservice.search_indexes.documents.ssp import SSP_M4DataSubmissionDocument from tdpservice.parsers.util import generate_t1_t4_hashes, get_t1_t4_partial_hash_members @@ -15,11 +15,11 @@ generate_hashes_func=generate_t1_t4_hashes, get_partial_hash_members_func=get_t1_t4_partial_hash_members, preparsing_validators=[ - validators.recordHasLengthBetween(34, 66), - validators.caseNumberNotEmpty(8, 19), - validators.or_priority_validators([ - validators.field_year_month_with_header_year_quarter(), - validators.validateRptMonthYear(), + category1.recordHasLengthBetween(34, 66), + category1.caseNumberNotEmpty(8, 19), + category1.or_priority_validators([ + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.validateRptMonthYear(), ]), ], postparsing_validators=[], @@ -43,8 +43,8 @@ endIndex=8, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid(), + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid(), ], ), Field( @@ -55,7 +55,7 @@ startIndex=8, endIndex=19, required=True, - validators=[validators.notEmpty()], + validators=[category2.isNotEmpty()], ), TransformField( zero_pad(3), @@ -66,7 +66,7 @@ startIndex=19, endIndex=22, required=True, - validators=[validators.isNumber()], + validators=[category2.isNumber()], ), Field( item="4", @@ -76,7 +76,7 @@ startIndex=22, endIndex=24, required=False, - validators=[validators.isInStringRange(0, 99)], + validators=[category2.isBetween(0, 99, inclusive=True, cast=int)], ), Field( item="6", @@ -86,7 +86,7 @@ startIndex=24, endIndex=29, required=True, - validators=[validators.isInStringRange(0, 99999)], + validators=[category2.isBetween(0, 99999, inclusive=True, cast=int)], ), Field( item="7", @@ -96,7 +96,7 @@ startIndex=29, endIndex=30, required=True, - validators=[validators.matches(1)], + validators=[category2.isEqual(1)], ), Field( item="8", @@ -107,10 +107,10 @@ endIndex=32, required=True, validators=[ - validators.or_validators( - validators.isInStringRange(1, 19), - validators.matches("99") - ) + category3.orValidators([ + category3.isBetween(1, 19, inclusive=True, cast=int), + category3.isEqual("99") + ]) ], ), Field( @@ -121,7 +121,7 @@ startIndex=32, endIndex=33, required=True, - validators=[validators.isInLimits(1, 2)], + validators=[category2.isBetween(1, 2, inclusive=True)], ), Field( item="10`", @@ -131,7 +131,7 @@ startIndex=33, endIndex=34, required=True, - validators=[validators.isInLimits(1, 2)], + validators=[category2.isBetween(1, 2, inclusive=True)], ), Field( item="11", @@ -141,7 +141,7 @@ startIndex=34, endIndex=35, required=True, - validators=[validators.isInLimits(1, 2)], + validators=[category2.isBetween(1, 2, inclusive=True)], ), Field( item="12", @@ -151,7 +151,7 @@ startIndex=35, endIndex=36, required=True, - validators=[validators.isInLimits(1, 2)], + validators=[category2.isBetween(1, 2, inclusive=True)], ), Field( item="-1", diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m5.py b/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m5.py index 14961f751..60ea5bef7 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m5.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m5.py @@ -4,7 +4,7 @@ from tdpservice.parsers.transforms import ssp_ssn_decryption_func from tdpservice.parsers.fields import TransformField, Field from tdpservice.parsers.row_schema import RowSchema, SchemaManager -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2, category3 from tdpservice.search_indexes.documents.ssp import SSP_M5DataSubmissionDocument from tdpservice.parsers.util import generate_t2_t3_t5_hashes, get_t2_t3_t5_partial_hash_members @@ -18,95 +18,95 @@ should_skip_partial_dup_func=lambda record: record.FAMILY_AFFILIATION in {3, 4, 5}, get_partial_hash_members_func=get_t2_t3_t5_partial_hash_members, preparsing_validators=[ - validators.recordHasLength(66), - validators.caseNumberNotEmpty(8, 19), - validators.or_priority_validators([ - validators.field_year_month_with_header_year_quarter(), - validators.validateRptMonthYear(), + category1.recordHasLength(66), + category1.caseNumberNotEmpty(8, 19), + category1.or_priority_validators([ + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.validateRptMonthYear(), ]), ], postparsing_validators=[ - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="SSN", - result_function=validators.validateSSN(), + result_function=category3.validateSSN(), ), - validators.validate__FAM_AFF__SSN(), - validators.if_then_validator( + category3.validate__FAM_AFF__SSN(), + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_HISPANIC", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_AMER_INDIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_ASIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_BLACK", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_HAWAIIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_WHITE", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="MARITAL_STATUS", - result_function=validators.isInLimits(1, 5), + result_function=category3.isBetween(1, 5, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 2), + condition_function=category3.isBetween(1, 2, inclusive=True), result_field_name="PARENT_MINOR_CHILD", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="EDUCATION_LEVEL", - result_function=validators.or_validators( - validators.isInStringRange(1, 16), - validators.isInStringRange(98, 99), - ), + result_function=category3.orValidators([ + category3.isBetween(1, 16, inclusive=True, cast=int), + category3.isBetween(98, 99, inclusive=True, cast=int), + ], if_result=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="CITIZENSHIP_STATUS", - result_function=validators.isInLimits(1, 3), + result_function=category3.isBetween(1, 3, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="DATE_OF_BIRTH", - condition_function=validators.olderThan(18), + condition_function=category3.isOlderThan(18), result_field_name="REC_OASDI_INSURANCE", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="REC_FEDERAL_DISABILITY", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), ], fields=[ @@ -129,8 +129,8 @@ endIndex=8, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid(), + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid(), ], ), Field( @@ -141,7 +141,7 @@ startIndex=8, endIndex=19, required=True, - validators=[validators.notEmpty()], + validators=[category2.isNotEmpty()], ), Field( item="13", @@ -151,7 +151,7 @@ startIndex=19, endIndex=20, required=True, - validators=[validators.isInLimits(1, 5)], + validators=[category2.isBetween(1, 5, inclusive=True)], ), Field( item="14", @@ -161,10 +161,10 @@ startIndex=20, endIndex=28, required=True, - validators=[validators.intHasLength(8), - validators.dateYearIsLargerThan(1900), - validators.dateMonthIsValid(), - validators.dateDayIsValid() + validators=[category2.intHasLength(8), + category2.dateYearIsLargerThan(1900), + category2.dateMonthIsValid(), + category2.dateDayIsValid() ], ), TransformField( @@ -176,7 +176,7 @@ startIndex=28, endIndex=37, required=True, - validators=[validators.isNumber()], + validators=[category2.isNumber()], is_encrypted=False, ), Field( @@ -187,7 +187,7 @@ startIndex=37, endIndex=38, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="16B", @@ -197,7 +197,7 @@ startIndex=38, endIndex=39, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="16C", @@ -207,7 +207,7 @@ startIndex=39, endIndex=40, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="16D", @@ -217,7 +217,7 @@ startIndex=40, endIndex=41, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="16E", @@ -227,7 +227,7 @@ startIndex=41, endIndex=42, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="16F", @@ -237,7 +237,7 @@ startIndex=42, endIndex=43, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="17", @@ -247,7 +247,7 @@ startIndex=43, endIndex=44, required=True, - validators=[validators.isInLimits(0, 9)], + validators=[category2.isBetween(0, 9, inclusive=True)], ), Field( item="18A", @@ -257,7 +257,7 @@ startIndex=44, endIndex=45, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="18B", @@ -267,7 +267,7 @@ startIndex=45, endIndex=46, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="18C", @@ -277,7 +277,7 @@ startIndex=46, endIndex=47, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="18D", @@ -287,7 +287,7 @@ startIndex=47, endIndex=48, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="18E", @@ -297,7 +297,7 @@ startIndex=48, endIndex=49, required=True, - validators=[validators.isInLimits(1, 2)], + validators=[category2.isBetween(1, 2, inclusive=True)], ), Field( item="19", @@ -307,7 +307,7 @@ startIndex=49, endIndex=50, required=False, - validators=[validators.isInLimits(0, 5)], + validators=[category2.isBetween(0, 5, inclusive=True)], ), Field( item="20", @@ -317,7 +317,7 @@ startIndex=50, endIndex=52, required=True, - validators=[validators.isInStringRange(1, 10)], + validators=[category2.isBetween(1, 10, inclusive=True, cast=int)], ), Field( item="21", @@ -327,7 +327,7 @@ startIndex=52, endIndex=53, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="22", @@ -337,7 +337,7 @@ startIndex=53, endIndex=54, required=False, - validators=[validators.isInLimits(0, 9)], + validators=[category2.isBetween(0, 9, inclusive=True)], ), Field( item="23", @@ -348,11 +348,11 @@ endIndex=56, required=False, validators=[ - validators.or_validators( - validators.isInStringRange(0, 16), - validators.isInStringRange(98, 99), - ), - validators.notMatches("00") + category3.orValidators([ + category3.isBetween(0, 16, inclusive=True, cast=int), + category3.isBetween(98, 99, inclusive=True, cast=int), + ]), + category2.isNotEqual("00") ], ), Field( @@ -364,7 +364,7 @@ endIndex=57, required=False, validators=[ - validators.oneOf([1, 2, 3, 9]), + category2.isOneOf([1, 2, 3, 9]), ], ), Field( @@ -375,7 +375,7 @@ startIndex=57, endIndex=58, required=False, - validators=[validators.isInLimits(0, 3)], + validators=[category2.isBetween(0, 3, inclusive=True)], ), Field( item="26", @@ -385,7 +385,7 @@ startIndex=58, endIndex=62, required=False, - validators=[validators.isInStringRange(0, 9999)], + validators=[category2.isBetween(0, 9999, inclusive=True, cast=int)], ), Field( item="27", @@ -395,7 +395,7 @@ startIndex=62, endIndex=66, required=False, - validators=[validators.isInStringRange(0, 9999)], + validators=[category2.isBetween(0, 9999, inclusive=True, cast=int)], ), ], ) diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m6.py b/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m6.py index 43d9ec7f5..670969fae 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m6.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m6.py @@ -4,26 +4,26 @@ from tdpservice.parsers.transforms import calendar_quarter_to_rpt_month_year from tdpservice.parsers.fields import Field, TransformField from tdpservice.parsers.row_schema import RowSchema, SchemaManager -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2, category3 from tdpservice.search_indexes.documents.ssp import SSP_M6DataSubmissionDocument s1 = RowSchema( record_type="M6", document=SSP_M6DataSubmissionDocument(), preparsing_validators=[ - validators.recordHasLength(259), - validators.field_year_month_with_header_year_quarter(), - validators.calendarQuarterIsValid(2, 7), + category1.recordHasLength(259), + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.calendarQuarterIsValid(2, 7), ], postparsing_validators=[ - validators.sumIsEqual( + category3.sumIsEqual( "SSPMOE_FAMILIES", [ "NUM_2_PARENTS", "NUM_1_PARENTS", "NUM_NO_PARENTS" ] ), - validators.sumIsEqual( + category3.sumIsEqual( "NUM_RECIPIENTS", [ "ADULT_RECIPIENTS", "CHILD_RECIPIENTS" @@ -50,8 +50,8 @@ endIndex=7, required=True, validators=[ - validators.dateYearIsLargerThan(2020), - validators.quarterIsValid() + category2.dateYearIsLargerThan(2019), + category2.quarterIsValid() ] ), TransformField( @@ -64,8 +64,8 @@ endIndex=7, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid() + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid() ] ), Field( @@ -76,7 +76,7 @@ startIndex=7, endIndex=15, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="4A", @@ -86,7 +86,7 @@ startIndex=31, endIndex=39, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="5A", @@ -96,7 +96,7 @@ startIndex=55, endIndex=63, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="6A", @@ -106,7 +106,7 @@ startIndex=79, endIndex=87, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="7A", @@ -116,7 +116,7 @@ startIndex=103, endIndex=111, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="8A", @@ -126,7 +126,7 @@ startIndex=127, endIndex=135, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="9A", @@ -136,7 +136,7 @@ startIndex=151, endIndex=159, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="10A", @@ -146,7 +146,7 @@ startIndex=175, endIndex=183, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="11A", @@ -156,7 +156,7 @@ startIndex=199, endIndex=211, required=True, - validators=[validators.isInLimits(0, 999999999999)] + validators=[category2.isBetween(0, 999999999999, inclusive=True)] ), Field( item="12A", @@ -166,7 +166,7 @@ startIndex=235, endIndex=243, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), ], ) @@ -176,19 +176,19 @@ document=SSP_M6DataSubmissionDocument(), quiet_preparser_errors=True, preparsing_validators=[ - validators.recordHasLength(259), - validators.field_year_month_with_header_year_quarter(), - validators.calendarQuarterIsValid(2, 7), + category1.recordHasLength(259), + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.calendarQuarterIsValid(2, 7), ], postparsing_validators=[ - validators.sumIsEqual( + category3.sumIsEqual( "SSPMOE_FAMILIES", [ "NUM_2_PARENTS", "NUM_1_PARENTS", "NUM_NO_PARENTS" ] ), - validators.sumIsEqual( + category3.sumIsEqual( "NUM_RECIPIENTS", [ "ADULT_RECIPIENTS", "CHILD_RECIPIENTS" @@ -215,8 +215,8 @@ endIndex=7, required=True, validators=[ - validators.dateYearIsLargerThan(2020), - validators.quarterIsValid() + category2.dateYearIsLargerThan(2019), + category2.quarterIsValid() ] ), TransformField( @@ -229,8 +229,8 @@ endIndex=7, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid() + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid() ] ), Field( @@ -241,7 +241,7 @@ startIndex=15, endIndex=23, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="4B", @@ -251,7 +251,7 @@ startIndex=39, endIndex=47, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="5B", @@ -261,7 +261,7 @@ startIndex=63, endIndex=71, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="6B", @@ -271,7 +271,7 @@ startIndex=87, endIndex=95, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="7B", @@ -281,7 +281,7 @@ startIndex=111, endIndex=119, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="8B", @@ -291,7 +291,7 @@ startIndex=135, endIndex=143, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="9B", @@ -301,7 +301,7 @@ startIndex=159, endIndex=167, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="10B", @@ -311,7 +311,7 @@ startIndex=183, endIndex=191, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="11B", @@ -321,7 +321,7 @@ startIndex=211, endIndex=223, required=True, - validators=[validators.isInLimits(0, 999999999999)] + validators=[category2.isBetween(0, 999999999999, inclusive=True)] ), Field( item="12B", @@ -331,7 +331,7 @@ startIndex=243, endIndex=251, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), ], ) @@ -341,19 +341,19 @@ document=SSP_M6DataSubmissionDocument(), quiet_preparser_errors=True, preparsing_validators=[ - validators.recordHasLength(259), - validators.field_year_month_with_header_year_quarter(), - validators.calendarQuarterIsValid(2, 7), + category1.recordHasLength(259), + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.calendarQuarterIsValid(2, 7), ], postparsing_validators=[ - validators.sumIsEqual( + category3.sumIsEqual( "SSPMOE_FAMILIES", [ "NUM_2_PARENTS", "NUM_1_PARENTS", "NUM_NO_PARENTS" ] ), - validators.sumIsEqual( + category3.sumIsEqual( "NUM_RECIPIENTS", [ "ADULT_RECIPIENTS", "CHILD_RECIPIENTS" @@ -380,8 +380,8 @@ endIndex=7, required=True, validators=[ - validators.dateYearIsLargerThan(2020), - validators.quarterIsValid() + category2.dateYearIsLargerThan(2019), + category2.quarterIsValid() ] ), TransformField( @@ -394,8 +394,8 @@ endIndex=7, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid() + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid() ] ), Field( @@ -406,7 +406,7 @@ startIndex=23, endIndex=31, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="4C", @@ -416,7 +416,7 @@ startIndex=47, endIndex=55, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="5C", @@ -426,7 +426,7 @@ startIndex=71, endIndex=79, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="6C", @@ -436,7 +436,7 @@ startIndex=95, endIndex=103, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="7C", @@ -446,7 +446,7 @@ startIndex=119, endIndex=127, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="8C", @@ -456,7 +456,7 @@ startIndex=143, endIndex=151, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="9C", @@ -466,7 +466,7 @@ startIndex=167, endIndex=175, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="10C", @@ -476,7 +476,7 @@ startIndex=191, endIndex=199, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), Field( item="11C", @@ -486,7 +486,7 @@ startIndex=223, endIndex=235, required=True, - validators=[validators.isInLimits(0, 999999999999)] + validators=[category2.isBetween(0, 999999999999, inclusive=True)] ), Field( item="12C", @@ -496,7 +496,7 @@ startIndex=251, endIndex=259, required=True, - validators=[validators.isInLimits(0, 99999999)] + validators=[category2.isBetween(0, 99999999, inclusive=True)] ), ], ) diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m7.py b/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m7.py index 5075b12b9..cefe17095 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m7.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/ssp/m7.py @@ -3,7 +3,7 @@ from tdpservice.parsers.transforms import calendar_quarter_to_rpt_month_year from tdpservice.parsers.fields import Field, TransformField from tdpservice.parsers.row_schema import RowSchema, SchemaManager -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2 from tdpservice.search_indexes.documents.ssp import SSP_M7DataSubmissionDocument schemas = [] @@ -23,11 +23,11 @@ document=SSP_M7DataSubmissionDocument(), quiet_preparser_errors=i > 1, preparsing_validators=[ - validators.recordHasLength(247), - validators.notEmpty(0, 7), - validators.notEmpty(validator_index, validator_index + 24), - validators.field_year_month_with_header_year_quarter(), - validators.calendarQuarterIsValid(2, 7), + category1.recordHasLength(247), + category1.recordIsNotEmpty(0, 7), + category1.recordIsNotEmpty(validator_index, validator_index + 24), + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.calendarQuarterIsValid(2, 7), ], postparsing_validators=[], fields=[ @@ -50,8 +50,8 @@ endIndex=7, required=True, validators=[ - validators.dateYearIsLargerThan(2020), - validators.quarterIsValid(), + category2.dateYearIsLargerThan(2019), + category2.quarterIsValid(), ], ), TransformField( @@ -64,8 +64,8 @@ endIndex=7, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid(), + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid(), ], ), Field( @@ -76,7 +76,7 @@ startIndex=section_ind_index, endIndex=section_ind_index + 1, required=True, - validators=[validators.oneOf(["1", "2"])], + validators=[category2.isOneOf(["1", "2"])], ), Field( item="4", @@ -86,7 +86,7 @@ startIndex=stratum_index, endIndex=stratum_index + 2, required=True, - validators=[validators.isInStringRange(0, 99)], + validators=[category2.isBetween(0, 99, inclusive=True, cast=int)], ), Field( item=families_item_numbers[i - 1], @@ -96,7 +96,7 @@ startIndex=families_index, endIndex=families_index + 7, required=True, - validators=[validators.isInLimits(0, 9999999)], + validators=[category2.isBetween(0, 9999999, inclusive=True)], ), ], ) diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t1.py b/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t1.py index fc3d7c34b..8f9aba575 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t1.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t1.py @@ -3,7 +3,7 @@ from tdpservice.parsers.transforms import zero_pad from tdpservice.parsers.fields import Field, TransformField from tdpservice.parsers.row_schema import RowSchema, SchemaManager -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2, category3 from tdpservice.search_indexes.documents.tanf import TANF_T1DataSubmissionDocument from tdpservice.parsers.util import generate_t1_t4_hashes, get_t1_t4_partial_hash_members @@ -16,105 +16,105 @@ generate_hashes_func=generate_t1_t4_hashes, get_partial_hash_members_func=get_t1_t4_partial_hash_members, preparsing_validators=[ - validators.recordHasLengthBetween(117, 156), - validators.caseNumberNotEmpty(8, 19), - validators.or_priority_validators([ - validators.field_year_month_with_header_year_quarter(), - validators.validateRptMonthYear(), + category1.recordHasLengthBetween(117, 156), + category1.caseNumberNotEmpty(8, 19), + category1.or_priority_validators([ + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.validateRptMonthYear(), ]), ], postparsing_validators=[ - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="CASH_AMOUNT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="NBR_MONTHS", - result_function=validators.isLargerThan(0), + result_function=category3.isGreaterThan(0) ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="CC_AMOUNT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="CHILDREN_COVERED", - result_function=validators.isLargerThan(0), + result_function=category3.isGreaterThan(0), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="CC_AMOUNT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="CC_NBR_MONTHS", - result_function=validators.isLargerThan(0), + result_function=category3.isGreaterThan(0), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="TRANSP_AMOUNT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="TRANSP_NBR_MONTHS", - result_function=validators.isLargerThan(0), + result_function=category3.isGreaterThan(0), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="TRANSITION_SERVICES_AMOUNT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="TRANSITION_NBR_MONTHS", - result_function=validators.isLargerThan(0), + result_function=category3.isGreaterThan(0), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="OTHER_AMOUNT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="OTHER_NBR_MONTHS", - result_function=validators.isLargerThan(0), + result_function=category3.isGreaterThan(0), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="SANC_REDUCTION_AMT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="WORK_REQ_SANCTION", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="SANC_REDUCTION_AMT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="FAMILY_SANC_ADULT", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="SANC_REDUCTION_AMT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="SANC_TEEN_PARENT", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="SANC_REDUCTION_AMT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="NON_COOPERATION_CSE", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="SANC_REDUCTION_AMT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="FAILURE_TO_COMPLY", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="SANC_REDUCTION_AMT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="OTHER_SANCTION", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="OTHER_TOTAL_REDUCTIONS", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="FAMILY_CAP", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="OTHER_TOTAL_REDUCTIONS", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="REDUCTIONS_ON_RECEIPTS", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="OTHER_TOTAL_REDUCTIONS", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="OTHER_NON_SANCTION", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.sumIsLarger( + category3.sumIsLarger( ( "AMT_FOOD_STAMP_ASSISTANCE", "AMT_SUB_CC", @@ -145,8 +145,8 @@ endIndex=8, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid(), + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid(), ], ), Field( @@ -157,7 +157,7 @@ startIndex=8, endIndex=19, required=True, - validators=[validators.notEmpty()], + validators=[category2.isNotEmpty()], ), TransformField( zero_pad(3), @@ -168,7 +168,7 @@ startIndex=19, endIndex=22, required=True, - validators=[validators.isNumber()], + validators=[category2.isNumber()], ), Field( item="5", @@ -179,7 +179,7 @@ endIndex=24, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -191,7 +191,7 @@ endIndex=29, required=True, validators=[ - validators.isNumber(), + category2.isNumber(), ], ), Field( @@ -203,7 +203,7 @@ endIndex=30, required=True, validators=[ - validators.isInLimits(1, 2), + category2.isBetween(1, 2, inclusive=True), ], ), Field( @@ -215,7 +215,7 @@ endIndex=31, required=True, validators=[ - validators.matches(1), + category2.isEqual(1), ], ), Field( @@ -227,7 +227,7 @@ endIndex=32, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -239,7 +239,7 @@ endIndex=34, required=True, validators=[ - validators.isLargerThan(0), + category2.isGreaterThan(0), ], ), Field( @@ -251,7 +251,7 @@ endIndex=35, required=True, validators=[ - validators.isInLimits(1, 3), + category2.isBetween(1, 3, inclusive=True), ], ), Field( @@ -263,7 +263,7 @@ endIndex=36, required=True, validators=[ - validators.isInLimits(1, 2), + category2.isBetween(1, 2, inclusive=True), ], ), Field( @@ -275,7 +275,7 @@ endIndex=37, required=True, validators=[ - validators.isInLimits(1, 2), + category2.isBetween(1, 2, inclusive=True), ], ), Field( @@ -287,7 +287,7 @@ endIndex=38, required=False, validators=[ - validators.isInLimits(0, 2), + category2.isBetween(0, 2, inclusive=True), ], ), Field( @@ -299,7 +299,7 @@ endIndex=42, required=True, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -311,7 +311,7 @@ endIndex=43, required=False, validators=[ - validators.isInLimits(0, 3), + category2.isBetween(0, 3, inclusive=True), ], ), Field( @@ -323,7 +323,7 @@ endIndex=47, required=True, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -335,7 +335,7 @@ endIndex=51, required=True, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -347,7 +347,7 @@ endIndex=55, required=True, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -359,7 +359,7 @@ endIndex=59, required=True, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -371,7 +371,7 @@ endIndex=62, required=True, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -383,7 +383,7 @@ endIndex=66, required=True, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -395,7 +395,7 @@ endIndex=68, required=True, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -407,7 +407,7 @@ endIndex=71, required=True, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -419,7 +419,7 @@ endIndex=75, required=True, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -431,7 +431,7 @@ endIndex=78, required=True, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -443,7 +443,7 @@ endIndex=82, required=False, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -455,7 +455,7 @@ endIndex=85, required=False, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -467,7 +467,7 @@ endIndex=89, required=False, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -479,7 +479,7 @@ endIndex=92, required=False, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -491,7 +491,7 @@ endIndex=96, required=True, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -503,7 +503,7 @@ endIndex=97, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -515,7 +515,7 @@ endIndex=98, required=False, validators=[ - validators.oneOf([0, 1, 2]), + category2.isOneOf([0, 1, 2]), ], ), Field( @@ -527,7 +527,7 @@ endIndex=99, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -539,7 +539,7 @@ endIndex=100, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -551,7 +551,7 @@ endIndex=101, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -563,7 +563,7 @@ endIndex=102, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -575,7 +575,7 @@ endIndex=106, required=True, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -587,7 +587,7 @@ endIndex=110, required=True, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -599,7 +599,7 @@ endIndex=111, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -611,7 +611,7 @@ endIndex=112, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -623,7 +623,7 @@ endIndex=113, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -635,8 +635,8 @@ endIndex=114, required=False, validators=[ - validators.oneOf(["9", " "]), - validators.isAlphaNumeric(), + category2.isOneOf(["9", " "]), + category2.isAlphaNumeric(), ], ), Field( @@ -647,7 +647,7 @@ startIndex=114, endIndex=116, required=True, - validators=[validators.oneOf([1, 2, 3, 4, 6, 7, 8, 9])], + validators=[category2.isOneOf([1, 2, 3, 4, 6, 7, 8, 9])], ), Field( item="29", @@ -658,7 +658,7 @@ endIndex=117, required=False, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t2.py b/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t2.py index 78d83709e..98ebebd06 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t2.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t2.py @@ -4,7 +4,7 @@ from tdpservice.parsers.transforms import tanf_ssn_decryption_func from tdpservice.parsers.fields import TransformField, Field from tdpservice.parsers.row_schema import RowSchema, SchemaManager -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2, category3 from tdpservice.search_indexes.documents.tanf import TANF_T2DataSubmissionDocument from tdpservice.parsers.util import generate_t2_t3_t5_hashes, get_t2_t3_t5_partial_hash_members @@ -18,119 +18,120 @@ should_skip_partial_dup_func=lambda record: record.FAMILY_AFFILIATION in {3, 5}, get_partial_hash_members_func=get_t2_t3_t5_partial_hash_members, preparsing_validators=[ - validators.recordHasLength(156), - validators.caseNumberNotEmpty(8, 19), - validators.or_priority_validators([ - validators.field_year_month_with_header_year_quarter(), - validators.validateRptMonthYear(), + category1.recordHasLength(156), + category1.caseNumberNotEmpty(8, 19), + category1.or_priority_validators([ + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.validateRptMonthYear(), ]), ], postparsing_validators=[ - validators.validate__FAM_AFF__SSN(), - validators.if_then_validator( + category3.validate__FAM_AFF__SSN(), + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="SSN", - result_function=validators.validateSSN(), + result_function=category3.validateSSN(), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_HISPANIC", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_AMER_INDIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_ASIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_BLACK", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_HAWAIIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_WHITE", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="MARITAL_STATUS", - result_function=validators.isInLimits(1, 5), + result_function=category3.isBetween(1, 5, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 2), + condition_function=category3.isBetween(1, 2, inclusive=True), result_field_name="PARENT_MINOR_CHILD", - result_function=validators.isInLimits(1, 3), + result_function=category3.isBetween(1, 3, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="EDUCATION_LEVEL", - result_function=validators.or_validators( - validators.isInStringRange(0, 16), - validators.isInStringRange(98, 99), - ), + result_function=category3.orValidators([ + category3.isBetween(0, 16, inclusive=True, cast=int), + category3.isBetween(98, 99, inclusive=True, cast=int), + ], if_result=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="CITIZENSHIP_STATUS", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="COOPERATION_CHILD_SUPPORT", - result_function=validators.oneOf((1, 2, 9)), + result_function=category3.isOneOf((1, 2, 9)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="EMPLOYMENT_STATUS", - result_function=validators.isInLimits(1, 3), + result_function=category3.isBetween(1, 3, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="WORK_ELIGIBLE_INDICATOR", - result_function=validators.or_validators( - validators.isInStringRange(1, 9), validators.oneOf(("11", "12")) - ), + result_function=category3.orValidators([ + category3.isBetween(1, 9, inclusive=True, cast=int), + category3.isOneOf(("11", "12")) + ], if_result=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="WORK_PART_STATUS", - result_function=validators.oneOf( + result_function=category3.isOneOf( ["01", "02", "05", "07", "09", "15", "17", "18", "19", "99"] ), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="WORK_ELIGIBLE_INDICATOR", - condition_function=validators.isInStringRange(1, 5), + condition_function=category3.isBetween(1, 5, inclusive=True, cast=int), result_field_name="WORK_PART_STATUS", - result_function=validators.notMatches("99"), + result_function=category3.isNotEqual("99"), ), - validators.validate__WORK_ELIGIBLE_INDICATOR__HOH__AGE(), + category3.validate__WORK_ELIGIBLE_INDICATOR__HOH__AGE(), ], fields=[ Field( @@ -152,8 +153,8 @@ endIndex=8, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid(), + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid(), ], ), Field( @@ -164,7 +165,7 @@ startIndex=8, endIndex=19, required=True, - validators=[validators.notEmpty()], + validators=[category2.isNotEmpty()], ), Field( item="30", @@ -174,7 +175,7 @@ startIndex=19, endIndex=20, required=True, - validators=[validators.oneOf([1, 2, 3, 5])], + validators=[category2.isOneOf([1, 2, 3, 5])], ), Field( item="31", @@ -184,7 +185,7 @@ startIndex=20, endIndex=21, required=True, - validators=[validators.oneOf([1, 2])], + validators=[category2.isOneOf([1, 2])], ), Field( item="32", @@ -194,10 +195,10 @@ startIndex=21, endIndex=29, required=True, - validators=[validators.intHasLength(8), - validators.dateYearIsLargerThan(1900), - validators.dateMonthIsValid(), - validators.dateDayIsValid() + validators=[category2.intHasLength(8), + category2.dateYearIsLargerThan(1900), + category2.dateMonthIsValid(), + category2.dateDayIsValid() ] ), TransformField( @@ -209,7 +210,7 @@ startIndex=29, endIndex=38, required=True, - validators=[validators.isNumber()], + validators=[category2.isNumber()], is_encrypted=False, ), Field( @@ -220,7 +221,7 @@ startIndex=38, endIndex=39, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="34B", @@ -230,7 +231,7 @@ startIndex=39, endIndex=40, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="34C", @@ -240,7 +241,7 @@ startIndex=40, endIndex=41, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="34D", @@ -250,7 +251,7 @@ startIndex=41, endIndex=42, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="34E", @@ -260,7 +261,7 @@ startIndex=42, endIndex=43, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="34F", @@ -270,7 +271,7 @@ startIndex=43, endIndex=44, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="35", @@ -281,7 +282,7 @@ endIndex=45, required=True, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -292,7 +293,7 @@ startIndex=45, endIndex=46, required=True, - validators=[validators.oneOf([1, 2])], + validators=[category2.isOneOf([1, 2])], ), Field( item="36B", @@ -302,7 +303,7 @@ startIndex=46, endIndex=47, required=True, - validators=[validators.oneOf([1, 2])], + validators=[category2.isOneOf([1, 2])], ), Field( item="36C", @@ -313,10 +314,10 @@ endIndex=48, required=True, validators=[ - validators.or_validators( - validators.oneOf(["1", "2"]), - validators.isBlank() - ) + category3.orValidators([ + category3.isOneOf(["1", "2"]), + category3.isBlank() + ]) ], ), Field( @@ -328,7 +329,7 @@ endIndex=49, required=False, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -340,7 +341,7 @@ endIndex=50, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -352,7 +353,7 @@ endIndex=51, required=False, validators=[ - validators.isInLimits(0, 5), + category2.isBetween(0, 5, inclusive=True), ], ), Field( @@ -364,7 +365,7 @@ endIndex=53, required=True, validators=[ - validators.isInStringRange(1, 10), + category2.isBetween(1, 10, inclusive=True, cast=int), ], ), Field( @@ -376,7 +377,7 @@ endIndex=54, required=False, validators=[ - validators.isInLimits(0, 3), + category2.isBetween(0, 3, inclusive=True), ], ), Field( @@ -388,7 +389,7 @@ endIndex=55, required=False, validators=[ - validators.isInLimits(0, 9), + category2.isBetween(0, 9, inclusive=True), ], ), Field( @@ -400,10 +401,10 @@ endIndex=57, required=False, validators=[ - validators.or_validators( - validators.isInStringRange(0, 16), - validators.isInStringRange(98, 99), - ) + category3.orValidators([ + category3.isBetween(0, 16, inclusive=True, cast=int), + category3.isBetween(98, 99, inclusive=True, cast=int), + ]) ], ), Field( @@ -414,7 +415,7 @@ startIndex=57, endIndex=58, required=False, - validators=[validators.oneOf([0, 1, 2, 9])], + validators=[category2.isOneOf([0, 1, 2, 9])], ), Field( item="43", @@ -425,7 +426,7 @@ endIndex=59, required=False, validators=[ - validators.oneOf([0, 1, 2, 9]), + category2.isOneOf([0, 1, 2, 9]), ], ), Field( @@ -437,7 +438,7 @@ endIndex=62, required=False, validators=[ - validators.isInStringRange(0, 999), + category2.isBetween(0, 999, inclusive=True, cast=int), ], ), Field( @@ -449,7 +450,7 @@ endIndex=64, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -461,7 +462,7 @@ endIndex=65, required=False, validators=[ - validators.isInLimits(0, 9), + category2.isBetween(0, 9, inclusive=True), ], ), Field( @@ -473,7 +474,7 @@ endIndex=66, required=False, validators=[ - validators.isInLimits(0, 3), + category2.isBetween(0, 3, inclusive=True), ], ), Field( @@ -485,10 +486,10 @@ endIndex=68, required=True, validators=[ - validators.or_validators( - validators.isInStringRange(0, 9), - validators.oneOf(("11", "12")), - ) + category3.orValidators([ + category3.isBetween(0, 9, inclusive=True, cast=int), + category3.isOneOf(("11", "12")), + ]) ], ), Field( @@ -500,7 +501,7 @@ endIndex=70, required=True, validators=[ - validators.oneOf( + category2.isOneOf( [ "01", "02", @@ -525,7 +526,7 @@ endIndex=72, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -537,7 +538,7 @@ endIndex=74, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -549,7 +550,7 @@ endIndex=76, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -561,7 +562,7 @@ endIndex=78, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -573,7 +574,7 @@ endIndex=80, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -585,7 +586,7 @@ endIndex=82, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -597,7 +598,7 @@ endIndex=84, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -609,7 +610,7 @@ endIndex=86, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -621,7 +622,7 @@ endIndex=88, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -633,7 +634,7 @@ endIndex=90, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -645,7 +646,7 @@ endIndex=92, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -657,7 +658,7 @@ endIndex=94, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -669,7 +670,7 @@ endIndex=96, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -681,7 +682,7 @@ endIndex=98, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -693,7 +694,7 @@ endIndex=100, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -705,7 +706,7 @@ endIndex=102, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -717,7 +718,7 @@ endIndex=104, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -729,7 +730,7 @@ endIndex=106, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -741,7 +742,7 @@ endIndex=108, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -754,7 +755,7 @@ endIndex=110, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -767,7 +768,7 @@ endIndex=112, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -780,7 +781,7 @@ endIndex=114, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -793,7 +794,7 @@ endIndex=116, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -806,7 +807,7 @@ endIndex=118, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -819,7 +820,7 @@ endIndex=120, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -832,7 +833,7 @@ endIndex=122, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -845,7 +846,7 @@ endIndex=124, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -858,7 +859,7 @@ endIndex=126, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -870,7 +871,7 @@ endIndex=128, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -882,7 +883,7 @@ endIndex=130, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -894,7 +895,7 @@ endIndex=132, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -906,7 +907,7 @@ endIndex=136, required=True, validators=[ - validators.isInStringRange(0, 9999), + category2.isBetween(0, 9999, inclusive=True, cast=int), ], ), Field( @@ -918,7 +919,7 @@ endIndex=140, required=False, validators=[ - validators.isInStringRange(0, 9999), + category2.isBetween(0, 9999, inclusive=True, cast=int), ], ), Field( @@ -930,7 +931,7 @@ endIndex=144, required=True, validators=[ - validators.isInStringRange(0, 9999), + category2.isBetween(0, 9999, inclusive=True, cast=int), ], ), Field( @@ -942,7 +943,7 @@ endIndex=148, required=True, validators=[ - validators.isInStringRange(0, 9999), + category2.isBetween(0, 9999, inclusive=True, cast=int), ], ), Field( @@ -954,7 +955,7 @@ endIndex=152, required=True, validators=[ - validators.isInStringRange(0, 9999), + category2.isBetween(0, 9999, inclusive=True, cast=int), ], ), Field( @@ -966,7 +967,7 @@ endIndex=156, required=True, validators=[ - validators.isInStringRange(0, 9999), + category2.isBetween(0, 9999, inclusive=True, cast=int), ], ), ], diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t3.py b/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t3.py index 901271a89..531e92af7 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t3.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t3.py @@ -4,7 +4,8 @@ from tdpservice.parsers.transforms import tanf_ssn_decryption_func from tdpservice.parsers.fields import TransformField, Field from tdpservice.parsers.row_schema import RowSchema, SchemaManager -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2, category3 +from tdpservice.parsers.validators.util import is_quiet_preparser_errors from tdpservice.search_indexes.documents.tanf import TANF_T3DataSubmissionDocument from tdpservice.parsers.util import generate_t2_t3_t5_hashes, get_t2_t3_t5_partial_hash_members @@ -18,85 +19,85 @@ should_skip_partial_dup_func=lambda record: record.FAMILY_AFFILIATION in {2, 4, 5}, get_partial_hash_members_func=get_t2_t3_t5_partial_hash_members, preparsing_validators=[ - validators.t3_m3_child_validator(FIRST_CHILD), - validators.caseNumberNotEmpty(8, 19), - validators.or_priority_validators([ - validators.field_year_month_with_header_year_quarter(), - validators.validateRptMonthYear(), + category1.t3_m3_child_validator(FIRST_CHILD), + category1.caseNumberNotEmpty(8, 19), + category1.or_priority_validators([ + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.validateRptMonthYear(), ]), ], postparsing_validators=[ - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="SSN", - result_function=validators.validateSSN(), + result_function=category3.validateSSN(), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_HISPANIC", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_AMER_INDIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_ASIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_BLACK", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_HAWAIIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_WHITE", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RELATIONSHIP_HOH", - result_function=validators.isInStringRange(4, 9), + result_function=category3.isBetween(4, 9, inclusive=True, cast=int), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="PARENT_MINOR_CHILD", - result_function=validators.oneOf((2, 3)), + result_function=category3.isOneOf((2, 3)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="EDUCATION_LEVEL", - result_function=validators.notMatches("99"), + result_function=category3.isNotEqual("99"), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="CITIZENSHIP_STATUS", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(2), + condition_function=category3.isEqual(2), result_field_name="CITIZENSHIP_STATUS", - result_function=validators.oneOf((1, 2, 9)), + result_function=category3.isOneOf((1, 2, 9)), ), ], fields=[ @@ -128,7 +129,7 @@ startIndex=8, endIndex=19, required=True, - validators=[validators.notEmpty()], + validators=[category2.isNotEmpty()], ), Field( item="67", @@ -138,7 +139,7 @@ startIndex=19, endIndex=20, required=True, - validators=[validators.oneOf([1, 2, 4])], + validators=[category2.isOneOf([1, 2, 4])], ), Field( item="68", @@ -148,10 +149,10 @@ startIndex=20, endIndex=28, required=True, - validators=[validators.intHasLength(8), - validators.dateYearIsLargerThan(1900), - validators.dateMonthIsValid(), - validators.dateDayIsValid() + validators=[category2.intHasLength(8), + category2.dateYearIsLargerThan(1900), + category2.dateMonthIsValid(), + category2.dateDayIsValid() ], ), TransformField( @@ -163,7 +164,7 @@ startIndex=28, endIndex=37, required=True, - validators=[validators.isNumber()], + validators=[category2.isNumber()], is_encrypted=False, ), Field( @@ -174,7 +175,7 @@ startIndex=37, endIndex=38, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="70B", @@ -184,7 +185,7 @@ startIndex=38, endIndex=39, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="70C", @@ -194,7 +195,7 @@ startIndex=39, endIndex=40, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="70D", @@ -204,7 +205,7 @@ startIndex=40, endIndex=41, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="70E", @@ -214,7 +215,7 @@ startIndex=41, endIndex=42, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="70F", @@ -224,7 +225,7 @@ startIndex=42, endIndex=43, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="71", @@ -234,7 +235,7 @@ startIndex=43, endIndex=44, required=True, - validators=[validators.isInLimits(0, 9)], + validators=[category2.isBetween(0, 9, inclusive=True)], ), Field( item="72A", @@ -244,7 +245,7 @@ startIndex=44, endIndex=45, required=True, - validators=[validators.oneOf([1, 2])], + validators=[category2.isOneOf([1, 2])], ), Field( item="72B", @@ -254,7 +255,7 @@ startIndex=45, endIndex=46, required=True, - validators=[validators.oneOf([1, 2])], + validators=[category2.isOneOf([1, 2])], ), Field( item="73", @@ -264,7 +265,7 @@ startIndex=46, endIndex=48, required=False, - validators=[validators.isInStringRange(0, 10)], + validators=[category2.isBetween(0, 10, inclusive=True, cast=int)], ), Field( item="74", @@ -274,7 +275,7 @@ startIndex=48, endIndex=49, required=False, - validators=[validators.oneOf([0, 2, 3])], + validators=[category2.isOneOf([0, 2, 3])], ), Field( item="75", @@ -285,10 +286,10 @@ endIndex=51, required=True, validators=[ - validators.or_validators( - validators.isInStringRange(0, 16), - validators.isInStringRange(98, 99), - ) + category3.orValidators([ + category3.isBetween(0, 16, inclusive=True, cast=int), + category3.isBetween(98, 99, inclusive=True, cast=int), + ]) ], ), Field( @@ -299,7 +300,7 @@ startIndex=51, endIndex=52, required=False, - validators=[validators.oneOf([1, 2, 9])], + validators=[category2.isOneOf([1, 2, 9])], ), Field( item="77A", @@ -309,7 +310,7 @@ startIndex=52, endIndex=56, required=False, - validators=[validators.isInStringRange(0, 9999)], + validators=[category2.isBetween(0, 9999, inclusive=True, cast=int)], ), Field( item="77B", @@ -319,7 +320,7 @@ startIndex=56, endIndex=60, required=False, - validators=[validators.isInStringRange(0, 9999)], + validators=[category2.isBetween(0, 9999, inclusive=True, cast=int)], ), ], ) @@ -331,88 +332,88 @@ generate_hashes_func=generate_t2_t3_t5_hashes, should_skip_partial_dup_func=lambda record: record.FAMILY_AFFILIATION in {2, 4, 5}, get_partial_hash_members_func=get_t2_t3_t5_partial_hash_members, - quiet_preparser_errors=validators.is_quiet_preparser_errors(min_length=61), + quiet_preparser_errors=is_quiet_preparser_errors(min_length=61), preparsing_validators=[ - validators.t3_m3_child_validator(SECOND_CHILD), - validators.caseNumberNotEmpty(8, 19), - validators.or_priority_validators([ - validators.field_year_month_with_header_year_quarter(), - validators.validateRptMonthYear(), + category1.t3_m3_child_validator(SECOND_CHILD), + category1.caseNumberNotEmpty(8, 19), + category1.or_priority_validators([ + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.validateRptMonthYear(), ]), ], # all conditions from first child should be met, otherwise we don't parse second child postparsing_validators=[ - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="SSN", - result_function=validators.validateSSN(), + result_function=category3.validateSSN(), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_HISPANIC", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_AMER_INDIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_ASIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_BLACK", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_HAWAIIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_WHITE", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RELATIONSHIP_HOH", - result_function=validators.isInStringRange(4, 9), + result_function=category3.isBetween(4, 9, inclusive=True, cast=int), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="PARENT_MINOR_CHILD", - result_function=validators.oneOf((2, 3)), + result_function=category3.isOneOf((2, 3)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="EDUCATION_LEVEL", - result_function=validators.notMatches("99"), + result_function=category3.isNotEqual("99"), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="CITIZENSHIP_STATUS", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(2), + condition_function=category3.isEqual(2), result_field_name="CITIZENSHIP_STATUS", - result_function=validators.oneOf((1, 2, 9)), + result_function=category3.isOneOf((1, 2, 9)), ), ], fields=[ @@ -444,7 +445,7 @@ startIndex=8, endIndex=19, required=True, - validators=[validators.notEmpty()], + validators=[category2.isNotEmpty()], ), Field( item="67", @@ -454,7 +455,7 @@ startIndex=60, endIndex=61, required=True, - validators=[validators.oneOf([1, 2, 4])], + validators=[category2.isOneOf([1, 2, 4])], ), Field( item="68", @@ -464,10 +465,10 @@ startIndex=61, endIndex=69, required=True, - validators=[validators.intHasLength(8), - validators.dateYearIsLargerThan(1900), - validators.dateMonthIsValid(), - validators.dateDayIsValid() + validators=[category2.intHasLength(8), + category2.dateYearIsLargerThan(1900), + category2.dateMonthIsValid(), + category2.dateDayIsValid() ] ), TransformField( @@ -479,7 +480,7 @@ startIndex=69, endIndex=78, required=True, - validators=[validators.isNumber()], + validators=[category2.isNumber()], is_encrypted=False, ), Field( @@ -490,7 +491,7 @@ startIndex=78, endIndex=79, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="70B", @@ -500,7 +501,7 @@ startIndex=79, endIndex=80, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="70C", @@ -510,7 +511,7 @@ startIndex=80, endIndex=81, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="70D", @@ -520,7 +521,7 @@ startIndex=81, endIndex=82, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="70E", @@ -530,7 +531,7 @@ startIndex=82, endIndex=83, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="70F", @@ -540,7 +541,7 @@ startIndex=83, endIndex=84, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="71", @@ -550,7 +551,7 @@ startIndex=84, endIndex=85, required=True, - validators=[validators.isInLimits(0, 9)], + validators=[category2.isBetween(0, 9, inclusive=True)], ), Field( item="72A", @@ -560,7 +561,7 @@ startIndex=85, endIndex=86, required=True, - validators=[validators.oneOf([1, 2])], + validators=[category2.isOneOf([1, 2])], ), Field( item="72B", @@ -570,7 +571,7 @@ startIndex=86, endIndex=87, required=True, - validators=[validators.oneOf([1, 2])], + validators=[category2.isOneOf([1, 2])], ), Field( item="73", @@ -580,7 +581,7 @@ startIndex=87, endIndex=89, required=False, - validators=[validators.isInStringRange(0, 10)], + validators=[category2.isBetween(0, 10, inclusive=True, cast=int)], ), Field( item="74", @@ -590,7 +591,7 @@ startIndex=89, endIndex=90, required=False, - validators=[validators.oneOf([0, 2, 3])], + validators=[category2.isOneOf([0, 2, 3])], ), Field( item="75", @@ -601,10 +602,10 @@ endIndex=92, required=True, validators=[ - validators.or_validators( - validators.isInStringRange(0, 16), - validators.oneOf(["98", "99"]) - ) + category3.orValidators([ + category3.isBetween(0, 16, inclusive=True, cast=int), + category3.isOneOf(["98", "99"]) + ]) ], ), Field( @@ -615,7 +616,7 @@ startIndex=92, endIndex=93, required=False, - validators=[validators.oneOf([1, 2, 9])], + validators=[category2.isOneOf([1, 2, 9])], ), Field( item="77A", @@ -625,7 +626,7 @@ startIndex=93, endIndex=97, required=False, - validators=[validators.isInStringRange(0, 9999)], + validators=[category2.isBetween(0, 9999, inclusive=True, cast=int)], ), Field( item="77B", @@ -635,7 +636,7 @@ startIndex=97, endIndex=101, required=False, - validators=[validators.isInStringRange(0, 9999)], + validators=[category2.isBetween(0, 9999, inclusive=True, cast=int)], ), ], ) diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t4.py b/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t4.py index bb8082e50..4426e0248 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t4.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t4.py @@ -3,7 +3,7 @@ from tdpservice.parsers.transforms import zero_pad from tdpservice.parsers.fields import Field, TransformField from tdpservice.parsers.row_schema import RowSchema, SchemaManager -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2, category3 from tdpservice.search_indexes.documents.tanf import TANF_T4DataSubmissionDocument from tdpservice.parsers.util import generate_t1_t4_hashes, get_t1_t4_partial_hash_members @@ -16,11 +16,11 @@ generate_hashes_func=generate_t1_t4_hashes, get_partial_hash_members_func=get_t1_t4_partial_hash_members, preparsing_validators=[ - validators.recordHasLengthBetween(36, 71), - validators.caseNumberNotEmpty(8, 19), - validators.or_priority_validators([ - validators.field_year_month_with_header_year_quarter(), - validators.validateRptMonthYear(), + category1.recordHasLengthBetween(36, 71), + category1.caseNumberNotEmpty(8, 19), + category1.or_priority_validators([ + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.validateRptMonthYear(), ]), ], postparsing_validators=[], @@ -44,8 +44,8 @@ endIndex=8, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid(), + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid(), ], ), Field( @@ -56,7 +56,7 @@ startIndex=8, endIndex=19, required=True, - validators=[validators.notEmpty()], + validators=[category2.isNotEmpty()], ), TransformField( zero_pad(3), @@ -67,7 +67,7 @@ startIndex=19, endIndex=22, required=True, - validators=[validators.isNumber()], + validators=[category2.isNumber()], ), Field( item="5", @@ -77,7 +77,7 @@ startIndex=22, endIndex=24, required=False, - validators=[validators.isInStringRange(0, 99)], + validators=[category2.isBetween(0, 99, inclusive=True, cast=int)], ), Field( item="7", @@ -97,7 +97,7 @@ startIndex=29, endIndex=30, required=True, - validators=[validators.oneOf([1, 2])], + validators=[category2.isOneOf([1, 2])], ), Field( item="9", @@ -108,10 +108,10 @@ endIndex=32, required=True, validators=[ - validators.or_validators( - validators.isInStringRange(1, 19), - validators.matches("99") - ) + category3.orValidators([ + category3.isBetween(1, 19, inclusive=True, cast=int), + category3.isEqual("99") + ]) ], ), Field( @@ -122,7 +122,7 @@ startIndex=32, endIndex=33, required=True, - validators=[validators.isInLimits(1, 2)], + validators=[category2.isBetween(1, 2, inclusive=True)], ), Field( item="11", @@ -132,7 +132,7 @@ startIndex=33, endIndex=34, required=True, - validators=[validators.isInLimits(1, 2)], + validators=[category2.isBetween(1, 2, inclusive=True)], ), Field( item="12", @@ -142,7 +142,7 @@ startIndex=34, endIndex=35, required=True, - validators=[validators.isInLimits(1, 2)], + validators=[category2.isBetween(1, 2, inclusive=True)], ), Field( item="13", @@ -152,7 +152,7 @@ startIndex=35, endIndex=36, required=True, - validators=[validators.isInLimits(1, 3)], + validators=[category2.isBetween(1, 3, inclusive=True)], ), Field( item="-1", diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t5.py b/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t5.py index 471a067cc..206d18e48 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t5.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t5.py @@ -4,7 +4,7 @@ from tdpservice.parsers.transforms import tanf_ssn_decryption_func from tdpservice.parsers.fields import TransformField, Field from tdpservice.parsers.row_schema import RowSchema, SchemaManager -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2, category3 from tdpservice.search_indexes.documents.tanf import TANF_T5DataSubmissionDocument from tdpservice.parsers.util import generate_t2_t3_t5_hashes, get_t2_t3_t5_partial_hash_members @@ -18,95 +18,95 @@ should_skip_partial_dup_func=lambda record: record.FAMILY_AFFILIATION in {3, 4, 5}, get_partial_hash_members_func=get_t2_t3_t5_partial_hash_members, preparsing_validators=[ - validators.recordHasLength(71), - validators.caseNumberNotEmpty(8, 19), - validators.or_priority_validators([ - validators.field_year_month_with_header_year_quarter(), - validators.validateRptMonthYear(), + category1.recordHasLength(71), + category1.caseNumberNotEmpty(8, 19), + category1.or_priority_validators([ + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.validateRptMonthYear(), ]), ], postparsing_validators=[ - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="SSN", - result_function=validators.validateSSN(), + result_function=category3.validateSSN(), ), - validators.validate__FAM_AFF__SSN(), - validators.if_then_validator( + category3.validate__FAM_AFF__SSN(), + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_HISPANIC", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_AMER_INDIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_ASIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_BLACK", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_HAWAIIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_WHITE", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="MARITAL_STATUS", - result_function=validators.isInLimits(1, 5), + result_function=category3.isBetween(1, 5, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 2), + condition_function=category3.isBetween(1, 2, inclusive=True), result_field_name="PARENT_MINOR_CHILD", - result_function=validators.isInLimits(1, 3), + result_function=category3.isBetween(1, 3, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="EDUCATION_LEVEL", - result_function=validators.or_validators( - validators.isInStringRange(1, 16), - validators.isInStringRange(98, 99), - ), + result_function=category3.orValidators([ + category3.isBetween(1, 16, inclusive=True, cast=int), + category3.isBetween(98, 99, inclusive=True, cast=int), + ], if_result=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="CITIZENSHIP_STATUS", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="DATE_OF_BIRTH", - condition_function=validators.olderThan(18), + condition_function=category3.isOlderThan(18), result_field_name="REC_OASDI_INSURANCE", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="REC_FEDERAL_DISABILITY", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), ], fields=[ @@ -129,8 +129,8 @@ endIndex=8, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid(), + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid(), ], ), Field( @@ -141,7 +141,7 @@ startIndex=8, endIndex=19, required=True, - validators=[validators.notEmpty()], + validators=[category2.isNotEmpty()], ), Field( item="14", @@ -151,7 +151,7 @@ startIndex=19, endIndex=20, required=True, - validators=[validators.isInLimits(1, 5)], + validators=[category2.isBetween(1, 5, inclusive=True)], ), Field( item="15", @@ -161,10 +161,10 @@ startIndex=20, endIndex=28, required=True, - validators=[validators.intHasLength(8), - validators.dateYearIsLargerThan(1900), - validators.dateMonthIsValid(), - validators.dateDayIsValid() + validators=[category2.intHasLength(8), + category2.dateYearIsLargerThan(1900), + category2.dateMonthIsValid(), + category2.dateDayIsValid() ], ), TransformField( @@ -176,7 +176,7 @@ startIndex=28, endIndex=37, required=True, - validators=[validators.isNumber()], + validators=[category2.isNumber()], is_encrypted=False, ), Field( @@ -187,7 +187,7 @@ startIndex=37, endIndex=38, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="17B", @@ -197,7 +197,7 @@ startIndex=38, endIndex=39, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="17C", @@ -207,7 +207,7 @@ startIndex=39, endIndex=40, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="17D", @@ -217,7 +217,7 @@ startIndex=40, endIndex=41, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="17E", @@ -227,7 +227,7 @@ startIndex=41, endIndex=42, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="17F", @@ -237,7 +237,7 @@ startIndex=42, endIndex=43, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="18", @@ -247,7 +247,7 @@ startIndex=43, endIndex=44, required=True, - validators=[validators.isInLimits(0, 9)], + validators=[category2.isBetween(0, 9, inclusive=True)], ), Field( item="19A", @@ -257,7 +257,7 @@ startIndex=44, endIndex=45, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="19B", @@ -267,7 +267,7 @@ startIndex=45, endIndex=46, required=False, - validators=[validators.isInLimits(1, 2)], + validators=[category2.isBetween(1, 2, inclusive=True)], ), Field( item="19C", @@ -277,7 +277,7 @@ startIndex=46, endIndex=47, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="19D", @@ -287,7 +287,7 @@ startIndex=47, endIndex=48, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="19E", @@ -297,7 +297,7 @@ startIndex=48, endIndex=49, required=True, - validators=[validators.isInLimits(1, 2)], + validators=[category2.isBetween(1, 2, inclusive=True)], ), Field( item="20", @@ -307,7 +307,7 @@ startIndex=49, endIndex=50, required=False, - validators=[validators.isInLimits(0, 5)], + validators=[category2.isBetween(0, 5, inclusive=True)], ), Field( item="21", @@ -317,7 +317,7 @@ startIndex=50, endIndex=52, required=True, - validators=[validators.isInStringRange(1, 10)], + validators=[category2.isBetween(1, 10, inclusive=True, cast=int)], ), Field( item="22", @@ -327,7 +327,7 @@ startIndex=52, endIndex=53, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="23", @@ -337,7 +337,7 @@ startIndex=53, endIndex=54, required=False, - validators=[validators.isInLimits(0, 9)], + validators=[category2.isBetween(0, 9, inclusive=True)], ), Field( item="24", @@ -348,10 +348,10 @@ endIndex=56, required=False, validators=[ - validators.or_validators( - validators.isInStringRange(0, 16), - validators.isInStringRange(98, 99), - ) + category3.orValidators([ + category3.isBetween(0, 16, inclusive=True, cast=int), + category3.isBetween(98, 99, inclusive=True, cast=int), + ]) ], ), Field( @@ -363,10 +363,10 @@ endIndex=57, required=False, validators=[ - validators.or_validators( - validators.isInLimits(0, 2), - validators.matches(9) - ) + category3.orValidators([ + category3.isBetween(0, 2, inclusive=True), + category3.isEqual(9) + ]) ], ), Field( @@ -377,7 +377,7 @@ startIndex=57, endIndex=60, required=False, - validators=[validators.isInStringRange(0, 999)], + validators=[category2.isBetween(0, 999, inclusive=True, cast=int)], ), Field( item="27", @@ -387,7 +387,7 @@ startIndex=60, endIndex=62, required=False, - validators=[validators.isInStringRange(0, 99)], + validators=[category2.isBetween(0, 99, inclusive=True, cast=int)], ), Field( item="28", @@ -397,7 +397,7 @@ startIndex=62, endIndex=63, required=False, - validators=[validators.isInLimits(0, 3)], + validators=[category2.isBetween(0, 3, inclusive=True)], ), Field( item="29", @@ -407,7 +407,7 @@ startIndex=63, endIndex=67, required=False, - validators=[validators.isInStringRange(0, 9999)], + validators=[category2.isBetween(0, 9999, inclusive=True, cast=int)], ), Field( item="30", @@ -417,7 +417,7 @@ startIndex=67, endIndex=71, required=False, - validators=[validators.isInStringRange(0, 9999)], + validators=[category2.isBetween(0, 9999, inclusive=True, cast=int)], ), ], ) diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t6.py b/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t6.py index 4b355c4ed..c60c676cf 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t6.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t6.py @@ -4,32 +4,32 @@ from tdpservice.parsers.transforms import calendar_quarter_to_rpt_month_year from tdpservice.parsers.fields import Field, TransformField from tdpservice.parsers.row_schema import RowSchema, SchemaManager -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2, category3 from tdpservice.search_indexes.documents.tanf import TANF_T6DataSubmissionDocument s1 = RowSchema( record_type="T6", document=TANF_T6DataSubmissionDocument(), preparsing_validators=[ - validators.recordHasLength(379), - validators.field_year_month_with_header_year_quarter(), - validators.calendarQuarterIsValid(2, 7), + category1.recordHasLength(379), + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.calendarQuarterIsValid(2, 7), ], postparsing_validators=[ - validators.sumIsEqual( + category3.sumIsEqual( "NUM_APPLICATIONS", [ "NUM_APPROVED", "NUM_DENIED" ] ), - validators.sumIsEqual( + category3.sumIsEqual( "NUM_FAMILIES", [ "NUM_2_PARENTS", "NUM_1_PARENTS", "NUM_NO_PARENTS" ] ), - validators.sumIsEqual( + category3.sumIsEqual( "NUM_RECIPIENTS", [ "NUM_ADULT_RECIPIENTS", "NUM_CHILD_RECIPIENTS" @@ -56,8 +56,8 @@ endIndex=7, required=True, validators=[ - validators.dateYearIsLargerThan(2020), - validators.quarterIsValid(), + category2.dateYearIsLargerThan(2019), + category2.quarterIsValid(), ], ), TransformField( @@ -70,8 +70,8 @@ endIndex=7, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid(), + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid(), ], ), Field( @@ -82,7 +82,7 @@ startIndex=7, endIndex=15, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="5A", @@ -92,7 +92,7 @@ startIndex=31, endIndex=39, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="6A", @@ -102,7 +102,7 @@ startIndex=55, endIndex=63, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="7A", @@ -112,7 +112,7 @@ startIndex=79, endIndex=91, required=True, - validators=[validators.isInLimits(0, 999999999999)], + validators=[category2.isBetween(0, 999999999999, inclusive=True)], ), Field( item="8A", @@ -122,7 +122,7 @@ startIndex=115, endIndex=123, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="9A", @@ -132,7 +132,7 @@ startIndex=139, endIndex=147, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="10A", @@ -142,7 +142,7 @@ startIndex=163, endIndex=171, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="11A", @@ -152,7 +152,7 @@ startIndex=187, endIndex=195, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="12A", @@ -162,7 +162,7 @@ startIndex=211, endIndex=219, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="13A", @@ -172,7 +172,7 @@ startIndex=235, endIndex=243, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="14A", @@ -182,7 +182,7 @@ startIndex=259, endIndex=267, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="15A", @@ -192,7 +192,7 @@ startIndex=283, endIndex=291, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="16A", @@ -202,7 +202,7 @@ startIndex=307, endIndex=315, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="17A", @@ -212,7 +212,7 @@ startIndex=331, endIndex=339, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="18A", @@ -222,7 +222,7 @@ startIndex=355, endIndex=363, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), ], ) @@ -232,25 +232,25 @@ document=TANF_T6DataSubmissionDocument(), quiet_preparser_errors=True, preparsing_validators=[ - validators.recordHasLength(379), - validators.field_year_month_with_header_year_quarter(), - validators.calendarQuarterIsValid(2, 7), + category1.recordHasLength(379), + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.calendarQuarterIsValid(2, 7), ], postparsing_validators=[ - validators.sumIsEqual( + category3.sumIsEqual( "NUM_APPLICATIONS", [ "NUM_APPROVED", "NUM_DENIED" ] ), - validators.sumIsEqual( + category3.sumIsEqual( "NUM_FAMILIES", [ "NUM_2_PARENTS", "NUM_1_PARENTS", "NUM_NO_PARENTS" ] ), - validators.sumIsEqual( + category3.sumIsEqual( "NUM_RECIPIENTS", [ "NUM_ADULT_RECIPIENTS", "NUM_CHILD_RECIPIENTS" @@ -276,7 +276,10 @@ startIndex=2, endIndex=7, required=True, - validators=[], + validators=[ + category2.dateYearIsLargerThan(2019), + category2.quarterIsValid(), + ], ), TransformField( calendar_quarter_to_rpt_month_year(1), @@ -297,7 +300,7 @@ startIndex=15, endIndex=23, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="5B", @@ -307,7 +310,7 @@ startIndex=39, endIndex=47, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="6B", @@ -317,7 +320,7 @@ startIndex=63, endIndex=71, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="7B", @@ -327,7 +330,7 @@ startIndex=91, endIndex=103, required=True, - validators=[validators.isInLimits(0, 999999999999)], + validators=[category2.isBetween(0, 999999999999, inclusive=True)], ), Field( item="8B", @@ -337,7 +340,7 @@ startIndex=123, endIndex=131, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="9B", @@ -347,7 +350,7 @@ startIndex=147, endIndex=155, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="10B", @@ -357,7 +360,7 @@ startIndex=171, endIndex=179, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="11B", @@ -367,7 +370,7 @@ startIndex=195, endIndex=203, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="12B", @@ -377,7 +380,7 @@ startIndex=219, endIndex=227, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="13B", @@ -387,7 +390,7 @@ startIndex=243, endIndex=251, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="14B", @@ -397,7 +400,7 @@ startIndex=267, endIndex=275, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="15B", @@ -407,7 +410,7 @@ startIndex=291, endIndex=299, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="16B", @@ -417,7 +420,7 @@ startIndex=315, endIndex=323, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="17B", @@ -427,7 +430,7 @@ startIndex=339, endIndex=347, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="18B", @@ -437,7 +440,7 @@ startIndex=363, endIndex=371, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), ], ) @@ -447,25 +450,25 @@ document=TANF_T6DataSubmissionDocument(), quiet_preparser_errors=True, preparsing_validators=[ - validators.recordHasLength(379), - validators.field_year_month_with_header_year_quarter(), - validators.calendarQuarterIsValid(2, 7), + category1.recordHasLength(379), + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.calendarQuarterIsValid(2, 7), ], postparsing_validators=[ - validators.sumIsEqual( + category3.sumIsEqual( "NUM_APPLICATIONS", [ "NUM_APPROVED", "NUM_DENIED" ] ), - validators.sumIsEqual( + category3.sumIsEqual( "NUM_FAMILIES", [ "NUM_2_PARENTS", "NUM_1_PARENTS", "NUM_NO_PARENTS" ] ), - validators.sumIsEqual( + category3.sumIsEqual( "NUM_RECIPIENTS", [ "NUM_ADULT_RECIPIENTS", "NUM_CHILD_RECIPIENTS" @@ -491,7 +494,10 @@ startIndex=2, endIndex=7, required=True, - validators=[], + validators=[ + category2.dateYearIsLargerThan(2019), + category2.quarterIsValid(), + ], ), TransformField( calendar_quarter_to_rpt_month_year(2), @@ -512,7 +518,7 @@ startIndex=23, endIndex=31, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="5C", @@ -522,7 +528,7 @@ startIndex=47, endIndex=55, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="6C", @@ -532,7 +538,7 @@ startIndex=71, endIndex=79, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="7C", @@ -542,7 +548,7 @@ startIndex=103, endIndex=115, required=True, - validators=[validators.isInLimits(0, 999999999999)], + validators=[category2.isBetween(0, 999999999999, inclusive=True)], ), Field( item="8C", @@ -552,7 +558,7 @@ startIndex=131, endIndex=139, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="9C", @@ -562,7 +568,7 @@ startIndex=155, endIndex=163, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="10C", @@ -572,7 +578,7 @@ startIndex=179, endIndex=187, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="11C", @@ -582,7 +588,7 @@ startIndex=203, endIndex=211, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="12C", @@ -592,7 +598,7 @@ startIndex=227, endIndex=235, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="13C", @@ -602,7 +608,7 @@ startIndex=251, endIndex=259, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="14C", @@ -612,7 +618,7 @@ startIndex=275, endIndex=283, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="15C", @@ -622,7 +628,7 @@ startIndex=299, endIndex=307, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="16C", @@ -632,7 +638,7 @@ startIndex=323, endIndex=331, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="17C", @@ -642,7 +648,7 @@ startIndex=347, endIndex=355, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="18C", @@ -652,7 +658,7 @@ startIndex=371, endIndex=379, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), ], ) diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t7.py b/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t7.py index 7916a2b8c..9fa960ab1 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t7.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/tanf/t7.py @@ -3,7 +3,7 @@ from tdpservice.parsers.fields import Field, TransformField from tdpservice.parsers.row_schema import RowSchema, SchemaManager from tdpservice.parsers.transforms import calendar_quarter_to_rpt_month_year -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2 from tdpservice.search_indexes.documents.tanf import TANF_T7DataSubmissionDocument schemas = [] @@ -23,11 +23,11 @@ document=TANF_T7DataSubmissionDocument(), quiet_preparser_errors=i > 1, preparsing_validators=[ - validators.recordHasLength(247), - validators.notEmpty(0, 7), - validators.notEmpty(validator_index, validator_index + 24), - validators.field_year_month_with_header_year_quarter(), - validators.calendarQuarterIsValid(2, 7), + category1.recordHasLength(247), + category1.recordIsNotEmpty(0, 7), + category1.recordIsNotEmpty(validator_index, validator_index + 24), + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.calendarQuarterIsValid(2, 7), ], postparsing_validators=[], fields=[ @@ -50,8 +50,8 @@ endIndex=7, required=True, validators=[ - validators.dateYearIsLargerThan(2020), - validators.quarterIsValid(), + category2.dateYearIsLargerThan(2019), + category2.quarterIsValid(), ], ), TransformField( @@ -64,8 +64,8 @@ endIndex=7, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid(), + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid(), ], ), Field( @@ -76,7 +76,7 @@ startIndex=section_ind_index, endIndex=section_ind_index + 1, required=True, - validators=[validators.oneOf(["1", "2"])], + validators=[category2.isOneOf(["1", "2"])], ), Field( item="5", @@ -86,7 +86,7 @@ startIndex=stratum_index, endIndex=stratum_index + 2, required=True, - validators=[validators.isInStringRange(1, 99)], + validators=[category2.isBetween(1, 99, inclusive=True, cast=int)], ), Field( item=families_value_item_number, @@ -96,7 +96,7 @@ startIndex=families_index, endIndex=families_index + 7, required=True, - validators=[validators.isInLimits(0, 9999999)], + validators=[category2.isBetween(0, 9999999, inclusive=True)], ), ], ) diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/trailer.py b/tdrs-backend/tdpservice/parsers/schema_defs/trailer.py index c9aa92cfe..9fbe02398 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/trailer.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/trailer.py @@ -3,16 +3,17 @@ from ..fields import Field from ..row_schema import RowSchema -from .. import validators +from tdpservice.parsers.validators import category1, category2 trailer = RowSchema( record_type="TRAILER", document=None, preparsing_validators=[ - validators.recordHasLength(23), - validators.startsWith("TRAILER", - lambda value: f"Your file does not end with a {value} record."), + category1.recordHasLength(23), + category1.recordStartsWith( + "TRAILER", lambda _: "Your file does not end with a TRAILER record." + ), ], postparsing_validators=[], fields=[ @@ -25,7 +26,7 @@ endIndex=7, required=True, validators=[ - validators.matches('TRAILER') + category2.isEqual('TRAILER') ] ), Field( @@ -37,7 +38,7 @@ endIndex=14, required=True, validators=[ - validators.between(0, 9999999) + category2.isBetween(0, 9999999, inclusive=True) ] ), Field( @@ -49,7 +50,7 @@ endIndex=23, required=False, validators=[ - validators.matches(' ') + category2.isEqual(' ') ] ), ], diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t1.py b/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t1.py index 54b1414bd..6b84eaa28 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t1.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t1.py @@ -3,7 +3,7 @@ from tdpservice.parsers.transforms import zero_pad from tdpservice.parsers.fields import Field, TransformField from tdpservice.parsers.row_schema import RowSchema, SchemaManager -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2, category3 from tdpservice.search_indexes.documents.tribal import Tribal_TANF_T1DataSubmissionDocument from tdpservice.parsers.util import generate_t1_t4_hashes, get_t1_t4_partial_hash_members @@ -16,105 +16,105 @@ generate_hashes_func=generate_t1_t4_hashes, get_partial_hash_members_func=get_t1_t4_partial_hash_members, preparsing_validators=[ - validators.recordHasLengthBetween(117, 122), - validators.caseNumberNotEmpty(8, 19), - validators.or_priority_validators([ - validators.field_year_month_with_header_year_quarter(), - validators.validateRptMonthYear(), + category1.recordHasLengthBetween(117, 122), + category1.caseNumberNotEmpty(8, 19), + category1.or_priority_validators([ + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.validateRptMonthYear(), ]), ], postparsing_validators=[ - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="CASH_AMOUNT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="NBR_MONTHS", - result_function=validators.isLargerThan(0), + result_function=category3.isGreaterThan(0), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="CC_AMOUNT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="CHILDREN_COVERED", - result_function=validators.isLargerThan(0), + result_function=category3.isGreaterThan(0), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="CC_AMOUNT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="CC_NBR_MONTHS", - result_function=validators.isLargerThan(0), + result_function=category3.isGreaterThan(0), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="TRANSP_AMOUNT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="TRANSP_NBR_MONTHS", - result_function=validators.isLargerThan(0), + result_function=category3.isGreaterThan(0), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="TRANSITION_SERVICES_AMOUNT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="TRANSITION_NBR_MONTHS", - result_function=validators.isLargerThan(0), + result_function=category3.isGreaterThan(0), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="OTHER_AMOUNT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="OTHER_NBR_MONTHS", - result_function=validators.isLargerThan(0), + result_function=category3.isGreaterThan(0), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="SANC_REDUCTION_AMT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="WORK_REQ_SANCTION", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="SANC_REDUCTION_AMT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="FAMILY_SANC_ADULT", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="SANC_REDUCTION_AMT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="SANC_TEEN_PARENT", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="SANC_REDUCTION_AMT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="NON_COOPERATION_CSE", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="SANC_REDUCTION_AMT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="FAILURE_TO_COMPLY", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="SANC_REDUCTION_AMT", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="OTHER_SANCTION", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="OTHER_TOTAL_REDUCTIONS", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="FAMILY_CAP", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="OTHER_TOTAL_REDUCTIONS", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="REDUCTIONS_ON_RECEIPTS", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="OTHER_TOTAL_REDUCTIONS", - condition_function=validators.isLargerThan(0), + condition_function=category3.isGreaterThan(0), result_field_name="OTHER_NON_SANCTION", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.sumIsLarger( + category3.sumIsLarger( ( "AMT_FOOD_STAMP_ASSISTANCE", "AMT_SUB_CC", @@ -147,8 +147,8 @@ endIndex=8, required=True, validators=[ - validators.dateYearIsLargerThan(1900), - validators.dateMonthIsValid(), + category2.dateYearIsLargerThan(1900), + category2.dateMonthIsValid(), ], ), Field( @@ -159,7 +159,7 @@ startIndex=8, endIndex=19, required=True, - validators=[validators.notEmpty()], + validators=[category2.isNotEmpty()], ), TransformField( zero_pad(3), @@ -170,7 +170,7 @@ startIndex=19, endIndex=22, required=False, - validators=[validators.isNumber()], + validators=[category2.isNumber()], ), Field( item="5", @@ -181,7 +181,7 @@ endIndex=24, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -193,7 +193,7 @@ endIndex=29, required=True, validators=[ - validators.isNumber(), + category2.isNumber(), ], ), Field( @@ -205,7 +205,7 @@ endIndex=30, required=True, validators=[ - validators.isInLimits(1, 2), + category2.isBetween(1, 2, inclusive=True), ], ), Field( @@ -217,7 +217,7 @@ endIndex=31, required=True, validators=[ - validators.matches(1), + category2.isEqual(1), ], ), Field( @@ -229,7 +229,7 @@ endIndex=32, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -241,7 +241,7 @@ endIndex=34, required=True, validators=[ - validators.isInLimits(1, 99), + category2.isBetween(1, 99, inclusive=True), ], ), Field( @@ -253,7 +253,7 @@ endIndex=35, required=True, validators=[ - validators.isInLimits(1, 3), + category2.isBetween(1, 3, inclusive=True), ], ), Field( @@ -265,7 +265,7 @@ endIndex=36, required=True, validators=[ - validators.isInLimits(1, 3), + category2.isBetween(1, 3, inclusive=True), ], ), Field( @@ -277,7 +277,7 @@ endIndex=37, required=True, validators=[ - validators.isInLimits(1, 2), + category2.isBetween(1, 2, inclusive=True), ], ), Field( @@ -289,7 +289,7 @@ endIndex=38, required=False, validators=[ - validators.isInLimits(0, 2), + category2.isBetween(0, 2, inclusive=True), ], ), Field( @@ -301,7 +301,7 @@ endIndex=42, required=True, validators=[ - validators.isInLimits(0, 9999), + category2.isBetween(0, 9999, inclusive=True), ], ), Field( @@ -313,7 +313,7 @@ endIndex=43, required=False, validators=[ - validators.isInLimits(0, 3), + category2.isBetween(0, 3, inclusive=True), ], ), Field( @@ -325,7 +325,7 @@ endIndex=47, required=True, validators=[ - validators.isInLimits(0, 9999), + category2.isBetween(0, 9999, inclusive=True), ], ), Field( @@ -337,7 +337,7 @@ endIndex=51, required=True, validators=[ - validators.isInLimits(0, 9999), + category2.isBetween(0, 9999, inclusive=True), ], ), Field( @@ -349,7 +349,7 @@ endIndex=55, required=True, validators=[ - validators.isInLimits(0, 9999), + category2.isBetween(0, 9999, inclusive=True), ], ), Field( @@ -361,7 +361,7 @@ endIndex=59, required=True, validators=[ - validators.isInLimits(0, 9999), + category2.isBetween(0, 9999, inclusive=True), ], ), Field( @@ -373,7 +373,7 @@ endIndex=62, required=True, validators=[ - validators.isInLimits(0, 999), + category2.isBetween(0, 999, inclusive=True), ], ), Field( @@ -385,7 +385,7 @@ endIndex=66, required=True, validators=[ - validators.isInLimits(0, 9999), + category2.isBetween(0, 9999, inclusive=True), ], ), Field( @@ -397,7 +397,7 @@ endIndex=68, required=True, validators=[ - validators.isInLimits(0, 99), + category2.isBetween(0, 99, inclusive=True), ], ), Field( @@ -409,7 +409,7 @@ endIndex=71, required=True, validators=[ - validators.isInLimits(0, 999), + category2.isBetween(0, 999, inclusive=True), ], ), Field( @@ -421,7 +421,7 @@ endIndex=75, required=True, validators=[ - validators.isInLimits(0, 9999), + category2.isBetween(0, 9999, inclusive=True), ], ), Field( @@ -433,7 +433,7 @@ endIndex=78, required=True, validators=[ - validators.isInLimits(0, 999), + category2.isBetween(0, 999, inclusive=True), ], ), Field( @@ -445,7 +445,7 @@ endIndex=82, required=False, validators=[ - validators.isInLimits(0, 9999), + category2.isBetween(0, 9999, inclusive=True), ], ), Field( @@ -457,7 +457,7 @@ endIndex=85, required=False, validators=[ - validators.isInLimits(0, 999), + category2.isBetween(0, 999, inclusive=True), ], ), Field( @@ -469,7 +469,7 @@ endIndex=89, required=False, validators=[ - validators.isInLimits(0, 9999), + category2.isBetween(0, 9999, inclusive=True), ], ), Field( @@ -481,7 +481,7 @@ endIndex=92, required=False, validators=[ - validators.isInLimits(0, 999), + category2.isBetween(0, 999, inclusive=True), ], ), Field( @@ -493,7 +493,7 @@ endIndex=96, required=True, validators=[ - validators.isInLimits(0, 9999), + category2.isBetween(0, 9999, inclusive=True), ], ), Field( @@ -505,7 +505,7 @@ endIndex=97, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -517,7 +517,7 @@ endIndex=98, required=False, validators=[ - validators.oneOf([0, 1, 2]), + category2.isOneOf([0, 1, 2]), ], ), Field( @@ -529,7 +529,7 @@ endIndex=99, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -541,7 +541,7 @@ endIndex=100, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -553,7 +553,7 @@ endIndex=101, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -565,7 +565,7 @@ endIndex=102, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -577,7 +577,7 @@ endIndex=106, required=True, validators=[ - validators.isInLimits(0, 9999), + category2.isBetween(0, 9999, inclusive=True), ], ), Field( @@ -589,7 +589,7 @@ endIndex=110, required=True, validators=[ - validators.isInLimits(0, 9999), + category2.isBetween(0, 9999, inclusive=True), ], ), Field( @@ -601,7 +601,7 @@ endIndex=111, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -613,7 +613,7 @@ endIndex=112, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -625,7 +625,7 @@ endIndex=113, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -636,7 +636,7 @@ startIndex=113, endIndex=114, required=False, - validators=[validators.isInStringRange(0, 9)], + validators=[category2.isBetween(0, 9, inclusive=True, cast=int)], ), Field( item="28", @@ -646,7 +646,7 @@ startIndex=114, endIndex=116, required=True, - validators=[validators.isInLimits(0, 9)], + validators=[category2.isBetween(0, 9, inclusive=True)], ), Field( item="29", @@ -657,7 +657,7 @@ endIndex=117, required=False, validators=[ - validators.oneOf([0, 1, 2]), + category2.isOneOf([0, 1, 2]), ], ), Field( diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t2.py b/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t2.py index a067b58ce..14ce84df7 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t2.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t2.py @@ -4,7 +4,7 @@ from tdpservice.parsers.transforms import tanf_ssn_decryption_func, zero_pad from tdpservice.parsers.fields import Field, TransformField from tdpservice.parsers.row_schema import RowSchema, SchemaManager -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2, category3 from tdpservice.search_indexes.documents.tribal import Tribal_TANF_T2DataSubmissionDocument from tdpservice.parsers.util import generate_t2_t3_t5_hashes, get_t2_t3_t5_partial_hash_members @@ -18,107 +18,107 @@ should_skip_partial_dup_func=lambda record: record.FAMILY_AFFILIATION in {3, 5}, get_partial_hash_members_func=get_t2_t3_t5_partial_hash_members, preparsing_validators=[ - validators.recordHasLength(122), - validators.caseNumberNotEmpty(8, 19), - validators.or_priority_validators([ - validators.field_year_month_with_header_year_quarter(), - validators.validateRptMonthYear(), + category1.recordHasLength(122), + category1.caseNumberNotEmpty(8, 19), + category1.or_priority_validators([ + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.validateRptMonthYear(), ]), ], postparsing_validators=[ - validators.validate__FAM_AFF__SSN(), - validators.if_then_validator( + category3.validate__FAM_AFF__SSN(), + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="SSN", - result_function=validators.validateSSN(), + result_function=category3.validateSSN(), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_HISPANIC", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_AMER_INDIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_ASIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_BLACK", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_HAWAIIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_WHITE", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="MARITAL_STATUS", - result_function=validators.isInLimits(1, 5), + result_function=category3.isBetween(1, 5, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 2), + condition_function=category3.isBetween(1, 2, inclusive=True), result_field_name="PARENT_MINOR_CHILD", - result_function=validators.isInLimits(1, 3), + result_function=category3.isBetween(1, 3, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="EDUCATION_LEVEL", - result_function=validators.or_validators( - validators.isInStringRange(0, 16), - validators.isInStringRange(98, 99), - ), + result_function=category3.orValidators([ + category3.isBetween(0, 16, inclusive=True, cast=int), + category3.isBetween(98, 99, inclusive=True, cast=int), + ], if_result=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="CITIZENSHIP_STATUS", - result_function=validators.matches(1), + result_function=category3.isEqual(1), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="COOPERATION_CHILD_SUPPORT", - result_function=validators.oneOf((1, 2, 9)), + result_function=category3.isOneOf((1, 2, 9)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="EMPLOYMENT_STATUS", - result_function=validators.isInLimits(1, 3), + result_function=category3.isBetween(1, 3, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="WORK_PART_STATUS", - result_function=validators.or_validators( - validators.isInStringRange(1, 3), - validators.isInStringRange(5, 9), - validators.isInStringRange(11, 19), - validators.matches("99"), - ), + result_function=category3.orValidators([ + category3.isBetween(1, 3, inclusive=True, cast=int), + category3.isBetween(5, 9, inclusive=True, cast=int), + category3.isBetween(11, 19, inclusive=True, cast=int), + category3.isEqual("99"), + ], if_result=True), ), ], fields=[ @@ -141,8 +141,8 @@ endIndex=8, required=True, validators=[ - validators.dateYearIsLargerThan(1900), - validators.dateMonthIsValid(), + category2.dateYearIsLargerThan(1900), + category2.dateMonthIsValid(), ], ), Field( @@ -153,7 +153,7 @@ startIndex=8, endIndex=19, required=True, - validators=[validators.notEmpty()], + validators=[category2.isNotEmpty()], ), Field( item="30", @@ -163,7 +163,7 @@ startIndex=19, endIndex=20, required=True, - validators=[validators.oneOf([1, 2, 3, 5])], + validators=[category2.isOneOf([1, 2, 3, 5])], ), Field( item="31", @@ -173,7 +173,7 @@ startIndex=20, endIndex=21, required=True, - validators=[validators.oneOf([1, 2])], + validators=[category2.isOneOf([1, 2])], ), Field( item="32", @@ -183,10 +183,10 @@ startIndex=21, endIndex=29, required=True, - validators=[validators.intHasLength(8), - validators.dateYearIsLargerThan(1900), - validators.dateMonthIsValid(), - validators.dateDayIsValid() + validators=[category2.intHasLength(8), + category2.dateYearIsLargerThan(1900), + category2.dateMonthIsValid(), + category2.dateDayIsValid() ] ), TransformField( @@ -198,7 +198,7 @@ startIndex=29, endIndex=38, required=True, - validators=[validators.isNumber()], + validators=[category2.isNumber()], is_encrypted=False, ), Field( @@ -209,7 +209,7 @@ startIndex=38, endIndex=39, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="34B", @@ -219,7 +219,7 @@ startIndex=39, endIndex=40, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="34C", @@ -229,7 +229,7 @@ startIndex=40, endIndex=41, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="34D", @@ -239,7 +239,7 @@ startIndex=41, endIndex=42, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="34E", @@ -249,7 +249,7 @@ startIndex=42, endIndex=43, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="34F", @@ -259,7 +259,7 @@ startIndex=43, endIndex=44, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="35", @@ -270,7 +270,7 @@ endIndex=45, required=False, validators=[ - validators.isLargerThanOrEqualTo(0), + category2.isGreaterThan(0, inclusive=True), ], ), Field( @@ -281,7 +281,7 @@ startIndex=45, endIndex=46, required=True, - validators=[validators.oneOf([1, 2])], + validators=[category2.isOneOf([1, 2])], ), Field( item="36B", @@ -291,7 +291,7 @@ startIndex=46, endIndex=47, required=True, - validators=[validators.oneOf([1, 2])], + validators=[category2.isOneOf([1, 2])], ), Field( item="36C", @@ -302,9 +302,10 @@ endIndex=48, required=True, validators=[ - validators.or_validators( - validators.oneOf(["1", "2"]), validators.isBlank() - ) + category3.orValidators([ + category3.isOneOf(["1", "2"]), + category3.isBlank() + ]) ], ), Field( @@ -316,7 +317,7 @@ endIndex=49, required=False, validators=[ - validators.isInLimits(0, 2), + category2.isBetween(0, 2, inclusive=True), ], ), Field( @@ -328,7 +329,7 @@ endIndex=50, required=True, validators=[ - validators.oneOf([1, 2]), + category2.isOneOf([1, 2]), ], ), Field( @@ -340,7 +341,7 @@ endIndex=51, required=False, validators=[ - validators.isInLimits(0, 5), + category2.isBetween(0, 5, inclusive=True), ], ), Field( @@ -352,7 +353,7 @@ endIndex=53, required=True, validators=[ - validators.isInStringRange(1, 10), + category2.isBetween(1, 10, inclusive=True, cast=int), ], ), Field( @@ -364,7 +365,7 @@ endIndex=54, required=False, validators=[ - validators.isInLimits(0, 3), + category2.isBetween(0, 3, inclusive=True), ], ), Field( @@ -376,7 +377,7 @@ endIndex=55, required=False, validators=[ - validators.isInLimits(0, 2), + category2.isBetween(0, 2, inclusive=True), ], ), Field( @@ -388,10 +389,10 @@ endIndex=57, required=False, validators=[ - validators.or_validators( - validators.isInStringRange(0, 16), - validators.isInStringRange(98, 99), - ) + category3.orValidators([ + category3.isBetween(0, 16, inclusive=True, cast=int), + category3.isBetween(98, 99, inclusive=True, cast=int), + ]) ], ), Field( @@ -402,7 +403,7 @@ startIndex=57, endIndex=58, required=False, - validators=[validators.oneOf([0, 1, 2, 9])], + validators=[category2.isOneOf([0, 1, 2, 9])], ), Field( item="43", @@ -413,7 +414,7 @@ endIndex=59, required=False, validators=[ - validators.oneOf([0, 1, 2, 9]), + category2.isOneOf([0, 1, 2, 9]), ], ), Field( @@ -425,7 +426,7 @@ endIndex=62, required=False, validators=[ - validators.isInStringRange(0, 999), + category2.isBetween(0, 999, inclusive=True, cast=int), ], ), Field( @@ -437,7 +438,7 @@ endIndex=64, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -449,7 +450,7 @@ endIndex=65, required=False, validators=[ - validators.isInLimits(0, 2), + category2.isBetween(0, 2, inclusive=True), ], ), Field( @@ -461,7 +462,7 @@ endIndex=66, required=False, validators=[ - validators.isInLimits(0, 3), + category2.isBetween(0, 3, inclusive=True), ], ), Field( @@ -473,12 +474,12 @@ endIndex=68, required=False, validators=[ - validators.or_validators( - validators.isInStringRange(0, 3), - validators.isInStringRange(5, 9), - validators.isInStringRange(11, 19), - validators.matches("99"), - ) + category3.orValidators([ + category3.isBetween(0, 3, inclusive=True, cast=int), + category3.isBetween(5, 9, inclusive=True, cast=int), + category3.isBetween(11, 19, inclusive=True, cast=int), + category3.isEqual("99"), + ]) ], ), Field( @@ -490,7 +491,7 @@ endIndex=70, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -502,7 +503,7 @@ endIndex=72, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -514,7 +515,7 @@ endIndex=74, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -526,7 +527,7 @@ endIndex=76, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -538,7 +539,7 @@ endIndex=78, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -550,7 +551,7 @@ endIndex=80, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -562,7 +563,7 @@ endIndex=82, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -574,7 +575,7 @@ endIndex=84, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -586,7 +587,7 @@ endIndex=86, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -598,7 +599,7 @@ endIndex=88, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -610,7 +611,7 @@ endIndex=90, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -622,7 +623,7 @@ endIndex=92, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), TransformField( @@ -635,7 +636,7 @@ endIndex=94, required=False, validators=[ - validators.matches("00"), + category2.isEqual("00"), ], ), Field( @@ -647,7 +648,7 @@ endIndex=96, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -659,7 +660,7 @@ endIndex=98, required=False, validators=[ - validators.isInStringRange(0, 99), + category2.isBetween(0, 99, inclusive=True, cast=int), ], ), Field( @@ -671,7 +672,7 @@ endIndex=102, required=False, validators=[ - validators.isInStringRange(0, 9999), + category2.isBetween(0, 9999, inclusive=True, cast=int), ], ), Field( @@ -683,7 +684,7 @@ endIndex=106, required=False, validators=[ - validators.isInStringRange(0, 9999), + category2.isBetween(0, 9999, inclusive=True, cast=int), ], ), Field( @@ -695,7 +696,7 @@ endIndex=110, required=True, validators=[ - validators.isInStringRange(0, 9999), + category2.isBetween(0, 9999, inclusive=True, cast=int), ], ), Field( @@ -707,7 +708,7 @@ endIndex=114, required=True, validators=[ - validators.isInStringRange(0, 9999), + category2.isBetween(0, 9999, inclusive=True, cast=int), ], ), Field( @@ -719,7 +720,7 @@ endIndex=118, required=True, validators=[ - validators.isInStringRange(0, 9999), + category2.isBetween(0, 9999, inclusive=True, cast=int), ], ), Field( @@ -731,7 +732,7 @@ endIndex=122, required=True, validators=[ - validators.isInStringRange(0, 9999), + category2.isBetween(0, 9999, inclusive=True, cast=int), ], ), Field( diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t3.py b/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t3.py index 9f67894f2..eb8cf342f 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t3.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t3.py @@ -4,7 +4,8 @@ from tdpservice.parsers.transforms import tanf_ssn_decryption_func from tdpservice.parsers.fields import Field, TransformField from tdpservice.parsers.row_schema import RowSchema, SchemaManager -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2, category3 +from tdpservice.parsers.validators.util import is_quiet_preparser_errors from tdpservice.search_indexes.documents.tribal import Tribal_TANF_T3DataSubmissionDocument from tdpservice.parsers.util import generate_t2_t3_t5_hashes, get_t2_t3_t5_partial_hash_members @@ -18,85 +19,85 @@ should_skip_partial_dup_func=lambda record: record.FAMILY_AFFILIATION in {2, 4, 5}, get_partial_hash_members_func=get_t2_t3_t5_partial_hash_members, preparsing_validators=[ - validators.t3_m3_child_validator(FIRST_CHILD), - validators.caseNumberNotEmpty(8, 19), - validators.or_priority_validators([ - validators.field_year_month_with_header_year_quarter(), - validators.validateRptMonthYear(), + category1.t3_m3_child_validator(FIRST_CHILD), + category1.caseNumberNotEmpty(8, 19), + category1.or_priority_validators([ + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.validateRptMonthYear(), ]), ], postparsing_validators=[ - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="SSN", - result_function=validators.validateSSN(), + result_function=category3.validateSSN(), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_HISPANIC", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_AMER_INDIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_ASIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_BLACK", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_HAWAIIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_WHITE", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RELATIONSHIP_HOH", - result_function=validators.isInStringRange(4, 9), + result_function=category3.isBetween(4, 9, inclusive=True, cast=int), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="PARENT_MINOR_CHILD", - result_function=validators.oneOf((2, 3)), + result_function=category3.isOneOf((2, 3)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="EDUCATION_LEVEL", - result_function=validators.notMatches("99"), + result_function=category3.isNotEqual("99"), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="CITIZENSHIP_STATUS", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(2), + condition_function=category3.isEqual(2), result_field_name="CITIZENSHIP_STATUS", - result_function=validators.oneOf((1, 2, 9)), + result_function=category3.isOneOf((1, 2, 9)), ), ], fields=[ @@ -128,7 +129,7 @@ startIndex=8, endIndex=19, required=True, - validators=[validators.notEmpty()], + validators=[category2.isNotEmpty()], ), Field( item="66", @@ -138,7 +139,7 @@ startIndex=19, endIndex=20, required=True, - validators=[validators.oneOf([1, 2, 4])], + validators=[category2.isOneOf([1, 2, 4])], ), Field( item="67", @@ -148,10 +149,10 @@ startIndex=20, endIndex=28, required=True, - validators=[validators.intHasLength(8), - validators.dateYearIsLargerThan(1900), - validators.dateMonthIsValid(), - validators.dateDayIsValid() + validators=[category2.intHasLength(8), + category2.dateYearIsLargerThan(1900), + category2.dateMonthIsValid(), + category2.dateDayIsValid() ] ), TransformField( @@ -163,7 +164,7 @@ startIndex=28, endIndex=37, required=True, - validators=[validators.isNumber()], + validators=[category2.isNumber()], is_encrypted=False, ), Field( @@ -174,7 +175,7 @@ startIndex=37, endIndex=38, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="69B", @@ -184,7 +185,7 @@ startIndex=38, endIndex=39, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="69C", @@ -194,7 +195,7 @@ startIndex=39, endIndex=40, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="69D", @@ -204,7 +205,7 @@ startIndex=40, endIndex=41, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="69E", @@ -214,7 +215,7 @@ startIndex=41, endIndex=42, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="69F", @@ -224,7 +225,7 @@ startIndex=42, endIndex=43, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="70", @@ -234,7 +235,7 @@ startIndex=43, endIndex=44, required=False, - validators=[validators.isInLimits(0, 9)], + validators=[category2.isBetween(0, 9, inclusive=True)], ), Field( item="71A", @@ -244,7 +245,7 @@ startIndex=44, endIndex=45, required=True, - validators=[validators.oneOf([1, 2])], + validators=[category2.isOneOf([1, 2])], ), Field( item="71B", @@ -254,7 +255,7 @@ startIndex=45, endIndex=46, required=True, - validators=[validators.oneOf([1, 2])], + validators=[category2.isOneOf([1, 2])], ), Field( item="72", @@ -264,7 +265,7 @@ startIndex=46, endIndex=48, required=False, - validators=[validators.isInStringRange(0, 10)], + validators=[category2.isBetween(0, 10, inclusive=True, cast=int)], ), Field( item="73", @@ -274,7 +275,7 @@ startIndex=48, endIndex=49, required=False, - validators=[validators.oneOf([0, 2, 3])], + validators=[category2.isOneOf([0, 2, 3])], ), Field( item="74", @@ -285,10 +286,10 @@ endIndex=51, required=True, validators=[ - validators.or_validators( - validators.isInStringRange(0, 16), - validators.isInStringRange(98, 99), - ) + category3.orValidators([ + category3.isBetween(0, 16, inclusive=True, cast=int), + category3.isBetween(98, 99, inclusive=True, cast=int), + ]) ], ), Field( @@ -299,7 +300,7 @@ startIndex=51, endIndex=52, required=False, - validators=[validators.oneOf([0, 1, 2, 9])], + validators=[category2.isOneOf([0, 1, 2, 9])], ), Field( item="76A", @@ -309,7 +310,7 @@ startIndex=52, endIndex=56, required=True, - validators=[validators.isInStringRange(0, 9999)], + validators=[category2.isBetween(0, 9999, inclusive=True, cast=int)], ), Field( item="76B", @@ -319,7 +320,7 @@ startIndex=56, endIndex=60, required=True, - validators=[validators.isInStringRange(0, 9999)], + validators=[category2.isBetween(0, 9999, inclusive=True, cast=int)], ), ], ) @@ -330,87 +331,87 @@ generate_hashes_func=generate_t2_t3_t5_hashes, should_skip_partial_dup_func=lambda record: record.FAMILY_AFFILIATION in {2, 4, 5}, get_partial_hash_members_func=get_t2_t3_t5_partial_hash_members, - quiet_preparser_errors=validators.is_quiet_preparser_errors(min_length=61), + quiet_preparser_errors=is_quiet_preparser_errors(min_length=61), preparsing_validators=[ - validators.t3_m3_child_validator(SECOND_CHILD), - validators.caseNumberNotEmpty(8, 19), - validators.or_priority_validators([ - validators.field_year_month_with_header_year_quarter(), - validators.validateRptMonthYear(), + category1.t3_m3_child_validator(SECOND_CHILD), + category1.caseNumberNotEmpty(8, 19), + category1.or_priority_validators([ + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.validateRptMonthYear(), ]), ], postparsing_validators=[ - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="SSN", - result_function=validators.validateSSN(), + result_function=category3.validateSSN(), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_HISPANIC", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_AMER_INDIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_ASIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_BLACK", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_HAWAIIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RACE_WHITE", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="RELATIONSHIP_HOH", - result_function=validators.isInStringRange(4, 9), + result_function=category3.isBetween(4, 9, inclusive=True, cast=int), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.oneOf((1, 2)), + condition_function=category3.isOneOf((1, 2)), result_field_name="PARENT_MINOR_CHILD", - result_function=validators.oneOf((2, 3)), + result_function=category3.isOneOf((2, 3)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="EDUCATION_LEVEL", - result_function=validators.notMatches("99"), + result_function=category3.isNotEqual("99"), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="CITIZENSHIP_STATUS", - result_function=validators.oneOf((1, 2)), + result_function=category3.isOneOf((1, 2)), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(2), + condition_function=category3.isEqual(2), result_field_name="CITIZENSHIP_STATUS", - result_function=validators.oneOf((1, 2, 9)), + result_function=category3.isOneOf((1, 2, 9)), ), ], fields=[ @@ -442,7 +443,7 @@ startIndex=8, endIndex=19, required=True, - validators=[validators.notEmpty()], + validators=[category2.isNotEmpty()], ), Field( item="66", @@ -452,7 +453,7 @@ startIndex=60, endIndex=61, required=True, - validators=[validators.oneOf([1, 2, 4])], + validators=[category2.isOneOf([1, 2, 4])], ), Field( item="67", @@ -462,10 +463,10 @@ startIndex=61, endIndex=69, required=True, - validators=[validators.intHasLength(8), - validators.dateYearIsLargerThan(1900), - validators.dateMonthIsValid(), - validators.dateDayIsValid() + validators=[category2.intHasLength(8), + category2.dateYearIsLargerThan(1900), + category2.dateMonthIsValid(), + category2.dateDayIsValid() ] ), TransformField( @@ -477,7 +478,7 @@ startIndex=69, endIndex=78, required=True, - validators=[validators.isNumber()], + validators=[category2.isNumber()], is_encrypted=False, ), Field( @@ -488,7 +489,7 @@ startIndex=78, endIndex=79, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="69B", @@ -498,7 +499,7 @@ startIndex=79, endIndex=80, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="69C", @@ -508,7 +509,7 @@ startIndex=80, endIndex=81, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="69D", @@ -518,7 +519,7 @@ startIndex=81, endIndex=82, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="69E", @@ -528,7 +529,7 @@ startIndex=82, endIndex=83, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="69F", @@ -538,7 +539,7 @@ startIndex=83, endIndex=84, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="70", @@ -548,7 +549,7 @@ startIndex=84, endIndex=85, required=False, - validators=[validators.isInLimits(0, 9)], + validators=[category2.isBetween(0, 9, inclusive=True)], ), Field( item="71A", @@ -558,7 +559,7 @@ startIndex=85, endIndex=86, required=True, - validators=[validators.oneOf([1, 2])], + validators=[category2.isOneOf([1, 2])], ), Field( item="71B", @@ -568,7 +569,7 @@ startIndex=86, endIndex=87, required=True, - validators=[validators.oneOf([1, 2])], + validators=[category2.isOneOf([1, 2])], ), Field( item="72", @@ -578,7 +579,7 @@ startIndex=87, endIndex=89, required=False, - validators=[validators.isInStringRange(0, 10)], + validators=[category2.isBetween(0, 10, inclusive=True, cast=int)], ), Field( item="73", @@ -588,7 +589,7 @@ startIndex=89, endIndex=90, required=False, - validators=[validators.oneOf([0, 2, 3])], + validators=[category2.isOneOf([0, 2, 3])], ), Field( item="74", @@ -599,9 +600,10 @@ endIndex=92, required=True, validators=[ - validators.or_validators( - validators.isInStringRange(0, 16), validators.oneOf(["98", "99"]) - ) + category3.orValidators([ + category3.isBetween(0, 16, inclusive=True, cast=int), + category3.isOneOf(["98", "99"]) + ]) ], ), Field( @@ -612,7 +614,7 @@ startIndex=92, endIndex=93, required=False, - validators=[validators.oneOf([0, 1, 2, 9])], + validators=[category2.isOneOf([0, 1, 2, 9])], ), Field( item="76A", @@ -622,7 +624,7 @@ startIndex=93, endIndex=97, required=True, - validators=[validators.isInStringRange(0, 9999)], + validators=[category2.isBetween(0, 9999, inclusive=True, cast=int)], ), Field( item="76B", @@ -632,7 +634,7 @@ startIndex=97, endIndex=101, required=True, - validators=[validators.isInStringRange(0, 9999)], + validators=[category2.isBetween(0, 9999, inclusive=True, cast=int)], ), ], ) diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t4.py b/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t4.py index aaa2d3c30..a092c8fe3 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t4.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t4.py @@ -3,7 +3,7 @@ from tdpservice.parsers.transforms import zero_pad from tdpservice.parsers.fields import Field, TransformField from tdpservice.parsers.row_schema import RowSchema, SchemaManager -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2, category3 from tdpservice.search_indexes.documents.tribal import Tribal_TANF_T4DataSubmissionDocument from tdpservice.parsers.util import generate_t1_t4_hashes, get_t1_t4_partial_hash_members @@ -16,11 +16,11 @@ generate_hashes_func=generate_t1_t4_hashes, get_partial_hash_members_func=get_t1_t4_partial_hash_members, preparsing_validators=[ - validators.recordHasLengthBetween(36, 71), - validators.caseNumberNotEmpty(8, 19), - validators.or_priority_validators([ - validators.field_year_month_with_header_year_quarter(), - validators.validateRptMonthYear(), + category1.recordHasLengthBetween(36, 71), + category1.caseNumberNotEmpty(8, 19), + category1.or_priority_validators([ + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.validateRptMonthYear(), ]), ], postparsing_validators=[], @@ -44,8 +44,8 @@ endIndex=8, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid(), + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid(), ], ), Field( @@ -56,7 +56,7 @@ startIndex=8, endIndex=19, required=True, - validators=[validators.notEmpty()], + validators=[category2.isNotEmpty()], ), TransformField( zero_pad(3), @@ -67,7 +67,7 @@ startIndex=19, endIndex=22, required=False, - validators=[validators.isNumber()], + validators=[category2.isNumber()], ), Field( item="5", @@ -77,7 +77,7 @@ startIndex=22, endIndex=24, required=False, - validators=[validators.isInStringRange(0, 99)], + validators=[category2.isBetween(0, 99, inclusive=True, cast=int)], ), Field( item="7", @@ -87,7 +87,7 @@ startIndex=24, endIndex=29, required=True, - validators=[validators.isInStringRange(0, 99999)], + validators=[category2.isBetween(0, 99999, inclusive=True, cast=int)], ), Field( item="8", @@ -97,7 +97,7 @@ startIndex=29, endIndex=30, required=True, - validators=[validators.oneOf([1, 2])], + validators=[category2.isOneOf([1, 2])], ), Field( item="9", @@ -108,9 +108,10 @@ endIndex=32, required=True, validators=[ - validators.or_validators( - validators.isInStringRange(1, 18), validators.matches("99") - ) + category3.orValidators([ + category3.isBetween(1, 18, inclusive=True, cast=int), + category3.isEqual("99") + ]) ], ), Field( @@ -121,7 +122,7 @@ startIndex=32, endIndex=33, required=True, - validators=[validators.isInLimits(1, 3)], + validators=[category2.isBetween(1, 3, inclusive=True)], ), Field( item="11", @@ -131,7 +132,7 @@ startIndex=33, endIndex=34, required=True, - validators=[validators.isInLimits(1, 2)], + validators=[category2.isBetween(1, 2, inclusive=True)], ), Field( item="12", @@ -141,7 +142,7 @@ startIndex=34, endIndex=35, required=True, - validators=[validators.isInLimits(1, 2)], + validators=[category2.isBetween(1, 2, inclusive=True)], ), Field( item="13", @@ -151,7 +152,7 @@ startIndex=35, endIndex=36, required=True, - validators=[validators.isInLimits(1, 3)], + validators=[category2.isBetween(1, 3, inclusive=True)], ), Field( item="-1", diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t5.py b/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t5.py index 884a451b5..5fddf4bd1 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t5.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t5.py @@ -4,7 +4,7 @@ from tdpservice.parsers.transforms import tanf_ssn_decryption_func from tdpservice.parsers.fields import Field, TransformField from tdpservice.parsers.row_schema import RowSchema, SchemaManager -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2, category3 from tdpservice.search_indexes.documents.tribal import Tribal_TANF_T5DataSubmissionDocument from tdpservice.parsers.util import generate_t2_t3_t5_hashes, get_t2_t3_t5_partial_hash_members @@ -18,90 +18,90 @@ should_skip_partial_dup_func=lambda record: record.FAMILY_AFFILIATION in {3, 4, 5}, get_partial_hash_members_func=get_t2_t3_t5_partial_hash_members, preparsing_validators=[ - validators.recordHasLength(71), - validators.caseNumberNotEmpty(8, 19), - validators.or_priority_validators([ - validators.field_year_month_with_header_year_quarter(), - validators.validateRptMonthYear(), + category1.recordHasLength(71), + category1.caseNumberNotEmpty(8, 19), + category1.or_priority_validators([ + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.validateRptMonthYear(), ]), ], postparsing_validators=[ - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="SSN", - result_function=validators.validateSSN(), + result_function=category3.validateSSN(), ), - validators.validate__FAM_AFF__SSN(), - validators.if_then_validator( + category3.validate__FAM_AFF__SSN(), + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_HISPANIC", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_AMER_INDIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_ASIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_BLACK", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_HAWAIIAN", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="RACE_WHITE", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="MARITAL_STATUS", - result_function=validators.isInLimits(1, 5), + result_function=category3.isBetween(1, 5, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 2), + condition_function=category3.isBetween(1, 2, inclusive=True), result_field_name="PARENT_MINOR_CHILD", - result_function=validators.isInLimits(1, 3), + result_function=category3.isBetween(1, 3, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.isInLimits(1, 3), + condition_function=category3.isBetween(1, 3, inclusive=True), result_field_name="EDUCATION_LEVEL", - result_function=validators.or_validators( - validators.isInStringRange(1, 16), - validators.isInStringRange(98, 99), - ), + result_function=category3.orValidators([ + category3.isBetween(1, 16, inclusive=True, cast=int), + category3.isBetween(98, 99, inclusive=True, cast=int), + ], if_result=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="CITIZENSHIP_STATUS", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), - validators.if_then_validator( + category3.ifThenAlso( condition_field_name="FAMILY_AFFILIATION", - condition_function=validators.matches(1), + condition_function=category3.isEqual(1), result_field_name="REC_FEDERAL_DISABILITY", - result_function=validators.isInLimits(1, 2), + result_function=category3.isBetween(1, 2, inclusive=True), ), ], fields=[ @@ -124,8 +124,8 @@ endIndex=8, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid(), + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid(), ], ), Field( @@ -136,7 +136,7 @@ startIndex=8, endIndex=19, required=True, - validators=[validators.notEmpty()], + validators=[category2.isNotEmpty()], ), Field( item="14", @@ -146,7 +146,7 @@ startIndex=19, endIndex=20, required=True, - validators=[validators.isInLimits(1, 5)], + validators=[category2.isBetween(1, 5, inclusive=True)], ), Field( item="15", @@ -156,10 +156,10 @@ startIndex=20, endIndex=28, required=True, - validators=[validators.intHasLength(8), - validators.dateYearIsLargerThan(1900), - validators.dateMonthIsValid(), - validators.dateDayIsValid() + validators=[category2.intHasLength(8), + category2.dateYearIsLargerThan(1900), + category2.dateMonthIsValid(), + category2.dateDayIsValid() ], ), TransformField( @@ -171,7 +171,7 @@ startIndex=28, endIndex=37, required=True, - validators=[validators.isNumber()], + validators=[category2.isNumber()], is_encrypted=False, ), Field( @@ -182,7 +182,7 @@ startIndex=37, endIndex=38, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="17B", @@ -192,7 +192,7 @@ startIndex=38, endIndex=39, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="17C", @@ -202,7 +202,7 @@ startIndex=39, endIndex=40, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="17D", @@ -212,7 +212,7 @@ startIndex=40, endIndex=41, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="17E", @@ -222,7 +222,7 @@ startIndex=41, endIndex=42, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="17F", @@ -232,7 +232,7 @@ startIndex=42, endIndex=43, required=False, - validators=[validators.validateRace()], + validators=[category2.validateRace()], ), Field( item="18", @@ -242,7 +242,7 @@ startIndex=43, endIndex=44, required=False, - validators=[validators.isInLimits(0, 9)], + validators=[category2.isBetween(0, 9, inclusive=True)], ), Field( item="19A", @@ -252,7 +252,7 @@ startIndex=44, endIndex=45, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="19B", @@ -262,7 +262,7 @@ startIndex=45, endIndex=46, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="19C", @@ -272,7 +272,7 @@ startIndex=46, endIndex=47, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="19D", @@ -282,7 +282,7 @@ startIndex=47, endIndex=48, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="19E", @@ -292,7 +292,7 @@ startIndex=48, endIndex=49, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="20", @@ -302,7 +302,7 @@ startIndex=49, endIndex=50, required=False, - validators=[validators.isInLimits(0, 5)], + validators=[category2.isBetween(0, 5, inclusive=True)], ), Field( item="21", @@ -312,7 +312,7 @@ startIndex=50, endIndex=52, required=True, - validators=[validators.isInStringRange(1, 10)], + validators=[category2.isBetween(1, 10, inclusive=True, cast=int)], ), Field( item="22", @@ -322,7 +322,7 @@ startIndex=52, endIndex=53, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="23", @@ -332,7 +332,7 @@ startIndex=53, endIndex=54, required=False, - validators=[validators.isInLimits(0, 2)], + validators=[category2.isBetween(0, 2, inclusive=True)], ), Field( item="24", @@ -343,10 +343,10 @@ endIndex=56, required=False, validators=[ - validators.or_validators( - validators.isInStringRange(0, 16), - validators.isInStringRange(98, 99), - ) + category3.orValidators([ + category3.isBetween(0, 16, inclusive=True, cast=int), + category3.isBetween(98, 99, inclusive=True, cast=int), + ]) ], ), Field( @@ -358,9 +358,10 @@ endIndex=57, required=False, validators=[ - validators.or_validators( - validators.isInLimits(0, 2), validators.matches(9) - ) + category3.orValidators([ + category3.isBetween(0, 2, inclusive=True), + category3.isEqual(9) + ]) ], ), Field( @@ -371,7 +372,7 @@ startIndex=57, endIndex=60, required=False, - validators=[validators.isInStringRange(0, 999)], + validators=[category2.isBetween(0, 999, inclusive=True, cast=int)], ), Field( item="27", @@ -381,7 +382,7 @@ startIndex=60, endIndex=62, required=False, - validators=[validators.isInStringRange(0, 99)], + validators=[category2.isBetween(0, 99, inclusive=True, cast=int)], ), Field( item="28", @@ -391,7 +392,7 @@ startIndex=62, endIndex=63, required=False, - validators=[validators.isInLimits(0, 3)], + validators=[category2.isBetween(0, 3, inclusive=True)], ), Field( item="29", @@ -401,7 +402,7 @@ startIndex=63, endIndex=67, required=True, - validators=[validators.isInStringRange(0, 9999)], + validators=[category2.isBetween(0, 9999, inclusive=True, cast=int)], ), Field( item="30", @@ -411,7 +412,7 @@ startIndex=67, endIndex=71, required=True, - validators=[validators.isInStringRange(0, 9999)], + validators=[category2.isBetween(0, 9999, inclusive=True, cast=int)], ), ], ) diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t6.py b/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t6.py index a85ca325e..9d4e8a4ac 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t6.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t6.py @@ -4,23 +4,23 @@ from tdpservice.parsers.transforms import calendar_quarter_to_rpt_month_year from tdpservice.parsers.fields import Field, TransformField from tdpservice.parsers.row_schema import RowSchema, SchemaManager -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2, category3 from tdpservice.search_indexes.documents.tribal import Tribal_TANF_T6DataSubmissionDocument s1 = RowSchema( record_type="T6", document=Tribal_TANF_T6DataSubmissionDocument(), preparsing_validators=[ - validators.recordHasLength(379), - validators.field_year_month_with_header_year_quarter(), - validators.calendarQuarterIsValid(2, 7), + category1.recordHasLength(379), + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.calendarQuarterIsValid(2, 7), ], postparsing_validators=[ - validators.sumIsEqual("NUM_APPLICATIONS", ["NUM_APPROVED", "NUM_DENIED"]), - validators.sumIsEqual( + category3.sumIsEqual("NUM_APPLICATIONS", ["NUM_APPROVED", "NUM_DENIED"]), + category3.sumIsEqual( "NUM_FAMILIES", ["NUM_2_PARENTS", "NUM_1_PARENTS", "NUM_NO_PARENTS"] ), - validators.sumIsEqual( + category3.sumIsEqual( "NUM_RECIPIENTS", ["NUM_ADULT_RECIPIENTS", "NUM_CHILD_RECIPIENTS"] ), ], @@ -44,8 +44,8 @@ endIndex=7, required=True, validators=[ - validators.dateYearIsLargerThan(2020), - validators.quarterIsValid(), + category2.dateYearIsLargerThan(2019), + category2.quarterIsValid(), ], ), TransformField( @@ -58,8 +58,8 @@ endIndex=7, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid(), + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid(), ], ), Field( @@ -70,7 +70,7 @@ startIndex=7, endIndex=15, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="5A", @@ -80,7 +80,7 @@ startIndex=31, endIndex=39, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="6A", @@ -90,7 +90,7 @@ startIndex=55, endIndex=63, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="7A", @@ -100,7 +100,7 @@ startIndex=79, endIndex=91, required=True, - validators=[validators.isInLimits(0, 999999999999)], + validators=[category2.isBetween(0, 999999999999, inclusive=True)], ), Field( item="8A", @@ -110,7 +110,7 @@ startIndex=115, endIndex=123, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="9A", @@ -120,7 +120,7 @@ startIndex=139, endIndex=147, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="10A", @@ -130,7 +130,7 @@ startIndex=163, endIndex=171, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="11A", @@ -140,7 +140,7 @@ startIndex=187, endIndex=195, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="12A", @@ -150,7 +150,7 @@ startIndex=211, endIndex=219, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="13A", @@ -160,7 +160,7 @@ startIndex=235, endIndex=243, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="14A", @@ -170,7 +170,7 @@ startIndex=259, endIndex=267, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="15A", @@ -180,7 +180,7 @@ startIndex=283, endIndex=291, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="16A", @@ -190,7 +190,7 @@ startIndex=307, endIndex=315, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="17A", @@ -200,7 +200,7 @@ startIndex=331, endIndex=339, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="18A", @@ -210,7 +210,7 @@ startIndex=355, endIndex=363, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), ], ) @@ -220,16 +220,16 @@ document=Tribal_TANF_T6DataSubmissionDocument(), quiet_preparser_errors=True, preparsing_validators=[ - validators.recordHasLength(379), - validators.field_year_month_with_header_year_quarter(), - validators.calendarQuarterIsValid(2, 7), + category1.recordHasLength(379), + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.calendarQuarterIsValid(2, 7), ], postparsing_validators=[ - validators.sumIsEqual("NUM_APPLICATIONS", ["NUM_APPROVED", "NUM_DENIED"]), - validators.sumIsEqual( + category3.sumIsEqual("NUM_APPLICATIONS", ["NUM_APPROVED", "NUM_DENIED"]), + category3.sumIsEqual( "NUM_FAMILIES", ["NUM_2_PARENTS", "NUM_1_PARENTS", "NUM_NO_PARENTS"] ), - validators.sumIsEqual( + category3.sumIsEqual( "NUM_RECIPIENTS", ["NUM_ADULT_RECIPIENTS", "NUM_CHILD_RECIPIENTS"] ), ], @@ -252,7 +252,10 @@ startIndex=2, endIndex=7, required=True, - validators=[], + validators=[ + category2.dateYearIsLargerThan(2019), + category2.quarterIsValid(), + ], ), TransformField( calendar_quarter_to_rpt_month_year(1), @@ -273,7 +276,7 @@ startIndex=15, endIndex=23, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="5B", @@ -283,7 +286,7 @@ startIndex=39, endIndex=47, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="6B", @@ -293,7 +296,7 @@ startIndex=63, endIndex=71, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="7B", @@ -303,7 +306,7 @@ startIndex=91, endIndex=103, required=True, - validators=[validators.isInLimits(0, 999999999999)], + validators=[category2.isBetween(0, 999999999999, inclusive=True)], ), Field( item="8B", @@ -313,7 +316,7 @@ startIndex=123, endIndex=131, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="9B", @@ -323,7 +326,7 @@ startIndex=147, endIndex=155, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="10B", @@ -333,7 +336,7 @@ startIndex=171, endIndex=179, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="11B", @@ -343,7 +346,7 @@ startIndex=195, endIndex=203, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="12B", @@ -353,7 +356,7 @@ startIndex=219, endIndex=227, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="13B", @@ -363,7 +366,7 @@ startIndex=243, endIndex=251, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="14B", @@ -373,7 +376,7 @@ startIndex=267, endIndex=275, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="15B", @@ -383,7 +386,7 @@ startIndex=291, endIndex=299, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="16B", @@ -393,7 +396,7 @@ startIndex=315, endIndex=323, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="17B", @@ -403,7 +406,7 @@ startIndex=339, endIndex=347, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="18B", @@ -413,7 +416,7 @@ startIndex=363, endIndex=371, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), ], ) @@ -423,16 +426,16 @@ document=Tribal_TANF_T6DataSubmissionDocument(), quiet_preparser_errors=True, preparsing_validators=[ - validators.recordHasLength(379), - validators.field_year_month_with_header_year_quarter(), - validators.calendarQuarterIsValid(2, 7), + category1.recordHasLength(379), + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.calendarQuarterIsValid(2, 7), ], postparsing_validators=[ - validators.sumIsEqual("NUM_APPLICATIONS", ["NUM_APPROVED", "NUM_DENIED"]), - validators.sumIsEqual( + category3.sumIsEqual("NUM_APPLICATIONS", ["NUM_APPROVED", "NUM_DENIED"]), + category3.sumIsEqual( "NUM_FAMILIES", ["NUM_2_PARENTS", "NUM_1_PARENTS", "NUM_NO_PARENTS"] ), - validators.sumIsEqual( + category3.sumIsEqual( "NUM_RECIPIENTS", ["NUM_ADULT_RECIPIENTS", "NUM_CHILD_RECIPIENTS"] ), ], @@ -455,7 +458,10 @@ startIndex=2, endIndex=7, required=True, - validators=[], + validators=[ + category2.dateYearIsLargerThan(2019), + category2.quarterIsValid(), + ], ), TransformField( calendar_quarter_to_rpt_month_year(2), @@ -476,7 +482,7 @@ startIndex=23, endIndex=31, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="5C", @@ -486,7 +492,7 @@ startIndex=47, endIndex=55, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="6C", @@ -496,7 +502,7 @@ startIndex=71, endIndex=79, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="7C", @@ -506,7 +512,7 @@ startIndex=103, endIndex=115, required=True, - validators=[validators.isInLimits(0, 999999999999)], + validators=[category2.isBetween(0, 999999999999, inclusive=True)], ), Field( item="8C", @@ -516,7 +522,7 @@ startIndex=131, endIndex=139, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="9C", @@ -526,7 +532,7 @@ startIndex=155, endIndex=163, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="10C", @@ -536,7 +542,7 @@ startIndex=179, endIndex=187, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="11C", @@ -546,7 +552,7 @@ startIndex=203, endIndex=211, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="12C", @@ -556,7 +562,7 @@ startIndex=227, endIndex=235, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="13C", @@ -566,7 +572,7 @@ startIndex=251, endIndex=259, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="14C", @@ -576,7 +582,7 @@ startIndex=275, endIndex=283, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="15C", @@ -586,7 +592,7 @@ startIndex=299, endIndex=307, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="16C", @@ -596,7 +602,7 @@ startIndex=323, endIndex=331, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="17C", @@ -606,7 +612,7 @@ startIndex=347, endIndex=355, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), Field( item="18C", @@ -616,7 +622,7 @@ startIndex=371, endIndex=379, required=True, - validators=[validators.isInLimits(0, 99999999)], + validators=[category2.isBetween(0, 99999999, inclusive=True)], ), ], ) diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t7.py b/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t7.py index dd0e020a2..58fb9efff 100644 --- a/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t7.py +++ b/tdrs-backend/tdpservice/parsers/schema_defs/tribal_tanf/t7.py @@ -3,7 +3,7 @@ from tdpservice.parsers.fields import Field, TransformField from tdpservice.parsers.row_schema import RowSchema, SchemaManager from tdpservice.parsers.transforms import calendar_quarter_to_rpt_month_year -from tdpservice.parsers import validators +from tdpservice.parsers.validators import category1, category2 from tdpservice.search_indexes.documents.tribal import Tribal_TANF_T7DataSubmissionDocument schemas = [] @@ -23,11 +23,11 @@ document=Tribal_TANF_T7DataSubmissionDocument(), quiet_preparser_errors=i > 1, preparsing_validators=[ - validators.recordHasLength(247), - validators.notEmpty(0, 7), - validators.notEmpty(validator_index, validator_index + 24), - validators.field_year_month_with_header_year_quarter(), - validators.calendarQuarterIsValid(2, 7), + category1.recordHasLength(247), + category1.recordIsNotEmpty(0, 7), + category1.recordIsNotEmpty(validator_index, validator_index + 24), + category1.validate_fieldYearMonth_with_headerYearQuarter(), + category1.calendarQuarterIsValid(2, 7), ], postparsing_validators=[], fields=[ @@ -50,8 +50,8 @@ endIndex=7, required=True, validators=[ - validators.dateYearIsLargerThan(2020), - validators.quarterIsValid(), + category2.dateYearIsLargerThan(2019), + category2.quarterIsValid(), ], ), TransformField( @@ -64,8 +64,8 @@ endIndex=7, required=True, validators=[ - validators.dateYearIsLargerThan(1998), - validators.dateMonthIsValid(), + category2.dateYearIsLargerThan(1998), + category2.dateMonthIsValid(), ], ), Field( @@ -76,7 +76,7 @@ startIndex=section_ind_index, endIndex=section_ind_index + 1, required=True, - validators=[validators.oneOf(["1", "2"])], + validators=[category2.isOneOf(["1", "2"])], ), Field( item="5", @@ -86,7 +86,7 @@ startIndex=stratum_index, endIndex=stratum_index + 2, required=True, - validators=[validators.isInStringRange(0, 99)], + validators=[category2.isBetween(0, 99, inclusive=True, cast=int)], ), Field( item=families_value_item_number, @@ -96,7 +96,7 @@ startIndex=families_index, endIndex=families_index + 7, required=True, - validators=[validators.isInLimits(0, 9999999)], + validators=[category2.isBetween(0, 9999999, inclusive=True)], ), ], ) diff --git a/tdrs-backend/tdpservice/parsers/schema_defs/util.py b/tdrs-backend/tdpservice/parsers/schema_defs/util.py deleted file mode 100644 index 5e7d3f9d2..000000000 --- a/tdrs-backend/tdpservice/parsers/schema_defs/util.py +++ /dev/null @@ -1,152 +0,0 @@ -"""Utility functions for schema definitions.""" - -from .. import schema_defs -from tdpservice.data_files.models import DataFile - -import logging - -logger = logging.getLogger(__name__) - -def get_schema_options(program, section, query=None, model=None, model_name=None): - """Centralized function to return the appropriate schema for a given program, section, and query. - - TODO: need to rework this docstring as it is outdated hence the weird ';;' for some of them. - - @param program: the abbreviated program type (.e.g, 'TAN') - @param section: the section of the file (.e.g, 'A');; or ACTIVE_CASE_DATA - @param query: the query for section_names (.e.g, 'section', 'models', etc.) - @return: the appropriate references (e.g., ACTIVE_CASE_DATA or {t1,t2,t3}) ;; returning 'A' - """ - schema_options = { - 'TAN': { - 'A': { - 'section': DataFile.Section.ACTIVE_CASE_DATA, - 'models': { - 'T1': schema_defs.tanf.t1, - 'T2': schema_defs.tanf.t2, - 'T3': schema_defs.tanf.t3, - } - }, - 'C': { - 'section': DataFile.Section.CLOSED_CASE_DATA, - 'models': { - 'T4': schema_defs.tanf.t4, - 'T5': schema_defs.tanf.t5, - } - }, - 'G': { - 'section': DataFile.Section.AGGREGATE_DATA, - 'models': { - 'T6': schema_defs.tanf.t6, - } - }, - 'S': { - 'section': DataFile.Section.STRATUM_DATA, - 'models': { - 'T7': schema_defs.tanf.t7, - } - } - }, - 'SSP': { - 'A': { - 'section': DataFile.Section.SSP_ACTIVE_CASE_DATA, - 'models': { - 'M1': schema_defs.ssp.m1, - 'M2': schema_defs.ssp.m2, - 'M3': schema_defs.ssp.m3, - } - }, - 'C': { - 'section': DataFile.Section.SSP_CLOSED_CASE_DATA, - 'models': { - 'M4': schema_defs.ssp.m4, - 'M5': schema_defs.ssp.m5, - } - }, - 'G': { - 'section': DataFile.Section.SSP_AGGREGATE_DATA, - 'models': { - 'M6': schema_defs.ssp.m6, - } - }, - 'S': { - 'section': DataFile.Section.SSP_STRATUM_DATA, - 'models': { - 'M7': schema_defs.ssp.m7, - } - } - }, - 'Tribal TAN': { - 'A': { - 'section': DataFile.Section.TRIBAL_ACTIVE_CASE_DATA, - 'models': { - 'T1': schema_defs.tribal_tanf.t1, - 'T2': schema_defs.tribal_tanf.t2, - 'T3': schema_defs.tribal_tanf.t3, - } - }, - 'C': { - 'section': DataFile.Section.TRIBAL_CLOSED_CASE_DATA, - 'models': { - 'T4': schema_defs.tribal_tanf.t4, - 'T5': schema_defs.tribal_tanf.t5, - } - }, - 'G': { - 'section': DataFile.Section.TRIBAL_AGGREGATE_DATA, - 'models': { - 'T6': schema_defs.tribal_tanf.t6, - } - }, - 'S': { - 'section': DataFile.Section.TRIBAL_STRATUM_DATA, - 'models': { - 'T7': schema_defs.tribal_tanf.t7, - } - }, - }, - } - - if query == "text": - for prog_name, prog_dict in schema_options.items(): - for sect, val in prog_dict.items(): - if val['section'] == section: - return {'program_type': prog_name, 'section': sect} - raise ValueError("Model not found in schema_defs") - elif query == "section": - return schema_options.get(program, {}).get(section, None)[query] - elif query == "models": - links = schema_options.get(program, {}).get(section, None) - - # if query is not chosen or wrong input, return all options - # query = 'models', model = 'T1' - models = links.get(query, links) - - if model_name is None: - return models - elif model_name not in models.keys(): - logger.debug(f"Model {model_name} not found in schema_defs") - return [] # intentionally trigger the error_msg for unknown record type - else: - return models.get(model_name, models) - -def get_program_models(str_prog, str_section): - """Return the models dict for a given program and section.""" - return get_schema_options(program=str_prog, section=str_section, query='models') - -def get_program_model(str_prog, str_section, str_model): - """Return singular model for a given program, section, and name.""" - return get_schema_options(program=str_prog, section=str_section, query='models', model_name=str_model) - -def get_section_reference(str_prog, str_section): - """Return the named section reference for a given program and section.""" - return get_schema_options(program=str_prog, section=str_section, query='section') - -def get_text_from_df(df): - """Return the short-hand text for program, section for a given datafile.""" - return get_schema_options("", section=df.section, query='text') - -def get_schema(line, section, program_type): - """Return the appropriate schema for the line.""" - line_type = line[0:2] - return get_schema_options(program_type, section, query='models', model_name=line_type) diff --git a/tdrs-backend/tdpservice/parsers/test/conftest.py b/tdrs-backend/tdpservice/parsers/test/conftest.py index 9fab9e8d2..5145300d6 100644 --- a/tdrs-backend/tdpservice/parsers/test/conftest.py +++ b/tdrs-backend/tdpservice/parsers/test/conftest.py @@ -431,6 +431,29 @@ def m5_cat2_invalid_23_24_file(): ) return parsing_file +@pytest.fixture +def test_file_zero_filled_fips_code(): + """Fixture for T1 file with an invalid CITIZENSHIP_STATUS.""" + parsing_file = ParsingFileFactory( + year=2021, + quarter='Q1', + file__name='t3_invalid_citizenship_file.txt', + file__section='Active Case Data', + file__data=(b'HEADER20241A01000TAN2ED\n' + b'T1202401 2132333 0140951112 43312 03 0 0 2 554145' + + b' 0 0 0 0 0 0 0 0 0 0222222 0 02229 22 \n' + + b'T2202401 21323333219550117WT@TB9BT92122222222223 1329911 34' + + b' 32 699 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0' + + b' 0 0 0 0 0 01623 0 0 0\n' + + b'T2202401 21323333219561102WTT@WBP992122221222222 2329911 28' + + b' 32 699 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0' + + b' 0 0 0 0 0 01432 0 0 0\n' + + b'T3202401 2132333120070906WT@@#ZY@W212222122 63981 0 012' + + b'0050201WTTYT#TT0212222122 63981 0 0 \n' + + b'TRAILER 4 ') + ) + return parsing_file + @pytest.fixture def tanf_s1_exact_dup_file(): """Fixture for a section 1 file containing an exact duplicate record.""" @@ -711,21 +734,19 @@ def tanf_s4_partial_dup_file(): def partial_dup_t1_err_msg(): """Fixture for t1 record partial duplicate error.""" return ("Partial duplicate record detected with record type {record_type} at line 3. Record is a partial " - "duplicate of the record at line number 2. Duplicated fields causing error: Record Type, " - "Reporting Year and Month, and Case Number.") + "duplicate of the record at line number 2.") @pytest.fixture def partial_dup_t5_err_msg(): """Fixture for t5 record partial duplicate error.""" return ("Partial duplicate record detected with record type {record_type} at line 3. Record is a partial " - "duplicate of the record at line number 2. Duplicated fields causing error: Record Type, " - "Reporting Year and Month, Case Number, Family Affiliation, Date of Birth, and Social Security Number.") + "duplicate of the record at line number 2.") @pytest.fixture def partial_dup_s3_s4_err_msg(): """Fixture for t7 record partial duplicate error.""" return ("Partial duplicate record detected with record type {record_type} at line 3. Record is a partial " - "duplicate of the record at line number 2. Duplicated fields causing error: Record Type.") + "duplicate of the record at line number 2.") @pytest.fixture def cat4_edge_case_file(stt_user, stt): diff --git a/tdrs-backend/tdpservice/parsers/test/test_case_consistency.py b/tdrs-backend/tdpservice/parsers/test/test_case_consistency.py index f65093d4e..535edec15 100644 --- a/tdrs-backend/tdpservice/parsers/test/test_case_consistency.py +++ b/tdrs-backend/tdpservice/parsers/test/test_case_consistency.py @@ -47,7 +47,7 @@ def tanf_s1_schemas(self): @pytest.fixture def small_correct_file(self, stt_user, stt): """Fixture for small_correct_file.""" - return util.create_test_datafile('small_correct_file.txt', stt_user, stt) + return util.create_test_datafile("small_correct_file.txt", stt_user, stt) @pytest.fixture def small_correct_file_header(self, small_correct_file): @@ -55,7 +55,7 @@ def small_correct_file_header(self, small_correct_file): header, header_is_valid, header_errors = self.parse_header(small_correct_file) if not header_is_valid: - logger.error('Header is not valid: %s', header_errors) + logger.error("Header is not valid: %s", header_errors) return None return header @@ -64,7 +64,7 @@ def test_add_record(self, small_correct_file_header, small_correct_file, tanf_s1 """Test add_record logic.""" case_consistency_validator = CaseConsistencyValidator( small_correct_file_header, - small_correct_file_header['program_type'], + small_correct_file_header["program_type"], STT.EntityType.STATE, util.make_generate_case_consistency_parser_error(small_correct_file) ) @@ -80,9 +80,9 @@ def test_add_record(self, small_correct_file_header, small_correct_file, tanf_s1 assert case_consistency_validator.total_cases_cached == 0 assert case_consistency_validator.total_cases_validated == 0 - # Add record with different case number to proc validation again and start caching a new case. - t1 = factories.TanfT1Factory.create() - t1.CASE_NUMBER = '2' + # Add record with different Case Number to proc validation again and start caching a new case. + t1 = factories.TanfT1Factory.build() + t1.CASE_NUMBER = "2" t1.RPT_MONTH_YEAR = 2 line_number += 1 case_consistency_validator.add_record(t1, tanf_s1_schemas[0], str(t1), line_number, False) @@ -93,11 +93,11 @@ def test_add_record(self, small_correct_file_header, small_correct_file, tanf_s1 assert case_consistency_validator.total_cases_validated == 1 # Complete the case to proc validation and verify that it occured. Even if the next case has errors. - t2 = factories.TanfT2Factory.create() - t3 = factories.TanfT3Factory.create() - t2.CASE_NUMBER = '2' + t2 = factories.TanfT2Factory.build() + t3 = factories.TanfT3Factory.build() + t2.CASE_NUMBER = "2" t2.RPT_MONTH_YEAR = 2 - t3.CASE_NUMBER = '2' + t3.CASE_NUMBER = "2" t3.RPT_MONTH_YEAR = 2 line_number += 1 case_consistency_validator.add_record(t2, tanf_s1_schemas[1], str(t2), line_number, False) @@ -117,23 +117,23 @@ def test_add_record(self, small_correct_file_header, small_correct_file, tanf_s1 @pytest.mark.parametrize("header,T1Stuff,T2Stuff,T3Stuff,stt_type", [ ( {"type": "A", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT1Factory, schema_defs.tanf.t1.schemas[0], 'T1'), - (factories.TanfT2Factory, schema_defs.tanf.t2.schemas[0], 'T2'), - (factories.TanfT3Factory, schema_defs.tanf.t3.schemas[0], 'T3'), + (factories.TanfT1Factory, schema_defs.tanf.t1.schemas[0], "T1"), + (factories.TanfT2Factory, schema_defs.tanf.t2.schemas[0], "T2"), + (factories.TanfT3Factory, schema_defs.tanf.t3.schemas[0], "T3"), STT.EntityType.STATE, ), ( {"type": "A", "program_type": "Tribal TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT1Factory, schema_defs.tribal_tanf.t1.schemas[0], 'T1'), - (factories.TribalTanfT2Factory, schema_defs.tribal_tanf.t2.schemas[0], 'T2'), - (factories.TribalTanfT3Factory, schema_defs.tribal_tanf.t3.schemas[0], 'T3'), + (factories.TribalTanfT1Factory, schema_defs.tribal_tanf.t1.schemas[0], "T1"), + (factories.TribalTanfT2Factory, schema_defs.tribal_tanf.t2.schemas[0], "T2"), + (factories.TribalTanfT3Factory, schema_defs.tribal_tanf.t3.schemas[0], "T3"), STT.EntityType.TRIBE, ), ( {"type": "A", "program_type": "SSP", "year": 2020, "quarter": "4"}, - (factories.SSPM1Factory, schema_defs.ssp.m1.schemas[0], 'M1'), - (factories.SSPM2Factory, schema_defs.ssp.m2.schemas[0], 'M2'), - (factories.SSPM3Factory, schema_defs.ssp.m3.schemas[0], 'M3'), + (factories.SSPM1Factory, schema_defs.ssp.m1.schemas[0], "M1"), + (factories.SSPM2Factory, schema_defs.ssp.m2.schemas[0], "M2"), + (factories.SSPM3Factory, schema_defs.ssp.m3.schemas[0], "M3"), STT.EntityType.STATE, ), ]) @@ -147,15 +147,15 @@ def test_section1_records_are_related_validator_pass( case_consistency_validator = CaseConsistencyValidator( header, - header['program_type'], + header["program_type"], stt_type, util.make_generate_case_consistency_parser_error(small_correct_file) ) t1s = [ - T1Factory.create( + T1Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", ), ] line_number = 1 @@ -164,14 +164,14 @@ def test_section1_records_are_related_validator_pass( line_number += 1 t2s = [ - T2Factory.create( + T2Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=1, ), - T2Factory.create( + T2Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=2, ), ] @@ -180,14 +180,14 @@ def test_section1_records_are_related_validator_pass( line_number += 1 t3s = [ - T3Factory.create( + T3Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=1, ), - T3Factory.create( + T3Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=2, ), ] @@ -204,23 +204,23 @@ def test_section1_records_are_related_validator_pass( @pytest.mark.parametrize("header,T1Stuff,T2Stuff,T3Stuff,stt_type", [ ( {"type": "A", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT1Factory, schema_defs.tanf.t1.schemas[0], 'T1'), - (factories.TanfT2Factory, schema_defs.tanf.t2.schemas[0], 'T2'), - (factories.TanfT3Factory, schema_defs.tanf.t3.schemas[0], 'T3'), + (factories.TanfT1Factory, schema_defs.tanf.t1.schemas[0], "T1", "4", "6"), + (factories.TanfT2Factory, schema_defs.tanf.t2.schemas[0], "T2"), + (factories.TanfT3Factory, schema_defs.tanf.t3.schemas[0], "T3"), STT.EntityType.STATE, ), ( {"type": "A", "program_type": "Tribal TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT1Factory, schema_defs.tribal_tanf.t1.schemas[0], 'T1'), - (factories.TribalTanfT2Factory, schema_defs.tribal_tanf.t2.schemas[0], 'T2'), - (factories.TribalTanfT3Factory, schema_defs.tribal_tanf.t3.schemas[0], 'T3'), + (factories.TribalTanfT1Factory, schema_defs.tribal_tanf.t1.schemas[0], "T1", "4", "6"), + (factories.TribalTanfT2Factory, schema_defs.tribal_tanf.t2.schemas[0], "T2"), + (factories.TribalTanfT3Factory, schema_defs.tribal_tanf.t3.schemas[0], "T3"), STT.EntityType.TRIBE, ), ( {"type": "A", "program_type": "SSP", "year": 2020, "quarter": "4"}, - (factories.SSPM1Factory, schema_defs.ssp.m1.schemas[0], 'M1'), - (factories.SSPM2Factory, schema_defs.ssp.m2.schemas[0], 'M2'), - (factories.SSPM3Factory, schema_defs.ssp.m3.schemas[0], 'M3'), + (factories.SSPM1Factory, schema_defs.ssp.m1.schemas[0], "M1", "3", "5"), + (factories.SSPM2Factory, schema_defs.ssp.m2.schemas[0], "M2"), + (factories.SSPM3Factory, schema_defs.ssp.m3.schemas[0], "M3"), STT.EntityType.STATE, ), ]) @@ -228,21 +228,21 @@ def test_section1_records_are_related_validator_pass( def test_section1_records_are_related_validator_fail_no_t2_or_t3( self, small_correct_file, header, T1Stuff, T2Stuff, T3Stuff, stt_type): """Test records are related validator fails with no t2s or t3s.""" - (T1Factory, t1_schema, t1_model_name) = T1Stuff + (T1Factory, t1_schema, t1_model_name, rpt_item_num, case_item_num) = T1Stuff (T2Factory, t2_schema, t2_model_name) = T2Stuff (T3Factory, t3_schema, t3_model_name) = T3Stuff case_consistency_validator = CaseConsistencyValidator( header, - header['program_type'], + header["program_type"], stt_type, util.make_generate_case_consistency_parser_error(small_correct_file) ) t1s = [ - T1Factory.create( + T1Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123' + CASE_NUMBER="123" ), ] line_number = 1 @@ -257,31 +257,35 @@ def test_section1_records_are_related_validator_fail_no_t2_or_t3( assert len(errors) == 1 assert num_errors == 1 assert errors[0].error_type == ParserErrorCategoryChoices.CASE_CONSISTENCY + is_tribal = "Tribal" in header['program_type'] + case_num = "Case Number" + case_num += "--TANF" if is_tribal else "" assert errors[0].error_message == ( - f'Every {t1_model_name} record should have at least one corresponding ' - f'{t2_model_name} or {t3_model_name} record with the same RPT_MONTH_YEAR and CASE_NUMBER.' + f"Every {t1_model_name} record should have at least one corresponding " + f"{t2_model_name} or {t3_model_name} record with the same Item {rpt_item_num} " + f"(Reporting Year and Month) and Item {case_item_num} ({case_num})." ) @pytest.mark.parametrize("header,T1Stuff,T2Stuff,T3Stuff,stt_type", [ ( {"type": "A", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT1Factory, schema_defs.tanf.t1.schemas[0], 'T1'), - (factories.TanfT2Factory, schema_defs.tanf.t2.schemas[0], 'T2'), - (factories.TanfT3Factory, schema_defs.tanf.t3.schemas[0], 'T3'), + (factories.TanfT1Factory, schema_defs.tanf.t1.schemas[0], "T1", "4", "6"), + (factories.TanfT2Factory, schema_defs.tanf.t2.schemas[0], "T2"), + (factories.TanfT3Factory, schema_defs.tanf.t3.schemas[0], "T3"), STT.EntityType.STATE, ), ( {"type": "A", "program_type": "Tribal TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT1Factory, schema_defs.tribal_tanf.t1.schemas[0], 'T1'), - (factories.TribalTanfT2Factory, schema_defs.tribal_tanf.t2.schemas[0], 'T2'), - (factories.TribalTanfT3Factory, schema_defs.tribal_tanf.t3.schemas[0], 'T3'), + (factories.TribalTanfT1Factory, schema_defs.tribal_tanf.t1.schemas[0], "T1", "4", "6"), + (factories.TribalTanfT2Factory, schema_defs.tribal_tanf.t2.schemas[0], "T2"), + (factories.TribalTanfT3Factory, schema_defs.tribal_tanf.t3.schemas[0], "T3"), STT.EntityType.TRIBE, ), ( {"type": "A", "program_type": "SSP", "year": 2020, "quarter": "4"}, - (factories.SSPM1Factory, schema_defs.ssp.m1.schemas[0], 'M1'), - (factories.SSPM2Factory, schema_defs.ssp.m2.schemas[0], 'M2'), - (factories.SSPM3Factory, schema_defs.ssp.m3.schemas[0], 'M3'), + (factories.SSPM1Factory, schema_defs.ssp.m1.schemas[0], "M1", "3", "5"), + (factories.SSPM2Factory, schema_defs.ssp.m2.schemas[0], "M2"), + (factories.SSPM3Factory, schema_defs.ssp.m3.schemas[0], "M3"), STT.EntityType.STATE, ), ]) @@ -289,26 +293,26 @@ def test_section1_records_are_related_validator_fail_no_t2_or_t3( def test_section1_records_are_related_validator_fail_no_t1( self, small_correct_file, header, T1Stuff, T2Stuff, T3Stuff, stt_type): """Test records are related validator fails with no t1s.""" - (T1Factory, t1_schema, t1_model_name) = T1Stuff + (T1Factory, t1_schema, t1_model_name, rpt_item_num, case_item_num) = T1Stuff (T2Factory, t2_schema, t2_model_name) = T2Stuff (T3Factory, t3_schema, t3_model_name) = T3Stuff case_consistency_validator = CaseConsistencyValidator( header, - header['program_type'], + header["program_type"], stt_type, util.make_generate_case_consistency_parser_error(small_correct_file) ) t2s = [ - T2Factory.create( + T2Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=1, ), - T2Factory.create( + T2Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=2, ), ] @@ -318,14 +322,14 @@ def test_section1_records_are_related_validator_fail_no_t1( line_number += 1 t3s = [ - T3Factory.create( + T3Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=1, ), - T3Factory.create( + T3Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=2, ), ] @@ -339,46 +343,54 @@ def test_section1_records_are_related_validator_fail_no_t1( assert len(errors) == 4 assert num_errors == 4 assert errors[0].error_type == ParserErrorCategoryChoices.CASE_CONSISTENCY + + is_tribal = "Tribal" in header['program_type'] + case_num = "Case Number" + case_num += "--TANF" if is_tribal else "" assert errors[0].error_message == ( - f'Every {t2_model_name} record should have at least one corresponding ' - f'{t1_model_name} record with the same RPT_MONTH_YEAR and CASE_NUMBER.' + f"Every {t2_model_name} record should have at least one corresponding " + f"{t1_model_name} record with the same Item {rpt_item_num} " + f"(Reporting Year and Month) and Item {case_item_num} ({case_num})." ) assert errors[1].error_type == ParserErrorCategoryChoices.CASE_CONSISTENCY assert errors[1].error_message == ( - f'Every {t2_model_name} record should have at least one corresponding ' - f'{t1_model_name} record with the same RPT_MONTH_YEAR and CASE_NUMBER.' + f"Every {t2_model_name} record should have at least one corresponding " + f"{t1_model_name} record with the same Item {rpt_item_num} " + f"(Reporting Year and Month) and Item {case_item_num} ({case_num})." ) assert errors[2].error_type == ParserErrorCategoryChoices.CASE_CONSISTENCY assert errors[2].error_message == ( - f'Every {t3_model_name} record should have at least one corresponding ' - f'{t1_model_name} record with the same RPT_MONTH_YEAR and CASE_NUMBER.' + f"Every {t3_model_name} record should have at least one corresponding " + f"{t1_model_name} record with the same Item {rpt_item_num} " + f"(Reporting Year and Month) and Item {case_item_num} ({case_num})." ) assert errors[3].error_type == ParserErrorCategoryChoices.CASE_CONSISTENCY assert errors[3].error_message == ( - f'Every {t3_model_name} record should have at least one corresponding ' - f'{t1_model_name} record with the same RPT_MONTH_YEAR and CASE_NUMBER.' + f"Every {t3_model_name} record should have at least one corresponding " + f"{t1_model_name} record with the same Item {rpt_item_num} " + f"(Reporting Year and Month) and Item {case_item_num} ({case_num})." ) @pytest.mark.parametrize("header,T1Stuff,T2Stuff,T3Stuff,stt_type", [ ( {"type": "A", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT1Factory, schema_defs.tanf.t1.schemas[0], 'T1'), - (factories.TanfT2Factory, schema_defs.tanf.t2.schemas[0], 'T2'), - (factories.TanfT3Factory, schema_defs.tanf.t3.schemas[0], 'T3'), + (factories.TanfT1Factory, schema_defs.tanf.t1.schemas[0], "T1", "4", "6"), + (factories.TanfT2Factory, schema_defs.tanf.t2.schemas[0], "T2", "30"), + (factories.TanfT3Factory, schema_defs.tanf.t3.schemas[0], "T3", "67"), STT.EntityType.STATE, ), ( {"type": "A", "program_type": "Tribal TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT1Factory, schema_defs.tribal_tanf.t1.schemas[0], 'T1'), - (factories.TribalTanfT2Factory, schema_defs.tribal_tanf.t2.schemas[0], 'T2'), - (factories.TribalTanfT3Factory, schema_defs.tribal_tanf.t3.schemas[0], 'T3'), + (factories.TribalTanfT1Factory, schema_defs.tribal_tanf.t1.schemas[0], "T1", "4", "6"), + (factories.TribalTanfT2Factory, schema_defs.tribal_tanf.t2.schemas[0], "T2", "30"), + (factories.TribalTanfT3Factory, schema_defs.tribal_tanf.t3.schemas[0], "T3", "66"), STT.EntityType.TRIBE, ), ( {"type": "A", "program_type": "SSP", "year": 2020, "quarter": "4"}, - (factories.SSPM1Factory, schema_defs.ssp.m1.schemas[0], 'M1'), - (factories.SSPM2Factory, schema_defs.ssp.m2.schemas[0], 'M2'), - (factories.SSPM3Factory, schema_defs.ssp.m3.schemas[0], 'M3'), + (factories.SSPM1Factory, schema_defs.ssp.m1.schemas[0], "M1", "3", "5"), + (factories.SSPM2Factory, schema_defs.ssp.m2.schemas[0], "M2", "26"), + (factories.SSPM3Factory, schema_defs.ssp.m3.schemas[0], "M3", "60"), STT.EntityType.STATE, ), ]) @@ -386,21 +398,21 @@ def test_section1_records_are_related_validator_fail_no_t1( def test_section1_records_are_related_validator_fail_no_family_affiliation( self, small_correct_file, header, T1Stuff, T2Stuff, T3Stuff, stt_type): """Test records are related validator fails when no t2 or t3 has family_affiliation == 1.""" - (T1Factory, t1_schema, t1_model_name) = T1Stuff - (T2Factory, t2_schema, t2_model_name) = T2Stuff - (T3Factory, t3_schema, t3_model_name) = T3Stuff + (T1Factory, t1_schema, t1_model_name, rpt_item_num, case_item_num) = T1Stuff + (T2Factory, t2_schema, t2_model_name, t2_fam_afil_item_num) = T2Stuff + (T3Factory, t3_schema, t3_model_name, t3_fam_afil_item_num) = T3Stuff case_consistency_validator = CaseConsistencyValidator( header, - header['program_type'], + header["program_type"], stt_type, util.make_generate_case_consistency_parser_error(small_correct_file) ) t1s = [ - T1Factory.create( + T1Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123' + CASE_NUMBER="123" ), ] line_number = 1 @@ -409,14 +421,14 @@ def test_section1_records_are_related_validator_fail_no_family_affiliation( line_number += 1 t2s = [ - T2Factory.create( + T2Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=2, ), - T2Factory.create( + T2Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=2, ), ] @@ -425,14 +437,14 @@ def test_section1_records_are_related_validator_fail_no_family_affiliation( line_number += 1 t3s = [ - T3Factory.create( + T3Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=2, ), - T3Factory.create( + T3Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=2, ), ] @@ -446,29 +458,33 @@ def test_section1_records_are_related_validator_fail_no_family_affiliation( assert len(errors) == 2 assert num_errors == 1 assert errors[0].error_type == ParserErrorCategoryChoices.CASE_CONSISTENCY + is_tribal = "Tribal" in header['program_type'] + case_num = "Case Number" + case_num += "--TANF" if is_tribal else "" assert errors[0].error_message == ( - f'Every {t1_model_name} record should have at least one corresponding ' - f'{t2_model_name} or {t3_model_name} record with the same RPT_MONTH_YEAR and ' - f'CASE_NUMBER, where FAMILY_AFFILIATION==1' + f"Every {t1_model_name} record should have at least one corresponding " + f"{t2_model_name} or {t3_model_name} record with the same Item {rpt_item_num} (Reporting Year and Month) " + f"and Item {case_item_num} ({case_num}), where Item {t2_fam_afil_item_num} (Family Affiliation)==1 or " + f"Item {t3_fam_afil_item_num} (Family Affiliation)==1." ) @pytest.mark.parametrize("header,T4Stuff,T5Stuff,stt_type", [ ( {"type": "C", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], 'T4'), - (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], 'T5'), + (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], "T4"), + (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], "T5"), STT.EntityType.STATE, ), ( {"type": "C", "program_type": "Tribal TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], 'T4'), - (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], 'T5'), + (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], "T4"), + (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], "T5"), STT.EntityType.TRIBE, ), ( {"type": "C", "program_type": "SSP", "year": 2020, "quarter": "4"}, - (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], 'M4'), - (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], 'M5'), + (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], "M4"), + (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], "M5"), STT.EntityType.STATE, ), ]) @@ -480,15 +496,15 @@ def test_section2_validator_pass(self, small_correct_file, header, T4Stuff, T5St case_consistency_validator = CaseConsistencyValidator( header, - header['program_type'], + header["program_type"], stt_type, util.make_generate_case_consistency_parser_error(small_correct_file) ) t4s = [ - T4Factory.create( + T4Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", ), ] line_number = 1 @@ -497,16 +513,16 @@ def test_section2_validator_pass(self, small_correct_file, header, T4Stuff, T5St line_number += 1 t5s = [ - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=3, REC_AID_TOTALLY_DISABLED=2, REC_SSI=1 ), - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=2, REC_AID_TOTALLY_DISABLED=2, REC_SSI=1 @@ -526,20 +542,20 @@ def test_section2_validator_pass(self, small_correct_file, header, T4Stuff, T5St @pytest.mark.parametrize("header,T4Stuff,T5Stuff,stt_type", [ ( {"type": "C", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], 'T4'), - (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], 'T5'), + (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], "T4", "4", "9"), + (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], "T5", "28"), STT.EntityType.STATE, ), ( {"type": "C", "program_type": "Tribal TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], 'T4'), - (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], 'T5'), + (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], "T4", "4", "9"), + (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], "T5", "28"), STT.EntityType.TRIBE, ), ( {"type": "C", "program_type": "SSP", "year": 2020, "quarter": "4"}, - (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], 'M4'), - (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], 'M5'), + (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], "M4", "3", "8"), + (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], "M5", "25"), STT.EntityType.STATE, ), ]) @@ -547,21 +563,21 @@ def test_section2_validator_pass(self, small_correct_file, header, T4Stuff, T5St def test_section2_validator_fail_case_closure_employment( self, small_correct_file, header, T4Stuff, T5Stuff, stt_type): """Test records are related validator section 2 success case.""" - (T4Factory, t4_schema, t4_model_name) = T4Stuff - (T5Factory, t5_schema, t5_model_name) = T5Stuff + (T4Factory, t4_schema, t4_model_name, rpt_item_num, closure_item_num) = T4Stuff + (T5Factory, t5_schema, t5_model_name, emp_status_item_num) = T5Stuff case_consistency_validator = CaseConsistencyValidator( header, - header['program_type'], + header["program_type"], stt_type, util.make_generate_case_consistency_parser_error(small_correct_file) ) t4s = [ - T4Factory.create( + T4Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', - CLOSURE_REASON='01' + CASE_NUMBER="123", + CLOSURE_REASON="01" ), ] line_number = 1 @@ -570,17 +586,17 @@ def test_section2_validator_fail_case_closure_employment( line_number += 1 t5s = [ - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=3, REC_AID_TOTALLY_DISABLED=2, REC_SSI=1, EMPLOYMENT_STATUS=3, ), - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=2, REC_AID_TOTALLY_DISABLED=2, REC_SSI=1, @@ -599,42 +615,43 @@ def test_section2_validator_fail_case_closure_employment( assert num_errors == 1 assert errors[0].error_type == ParserErrorCategoryChoices.CASE_CONSISTENCY assert errors[0].error_message == ( - 'At least one person on the case must have employment status = 1:Yes' - ' in the same RPT_MONTH_YEAR since CLOSURE_REASON = 1:Employment/excess earnings.' + f"At least one person on the case must have Item {emp_status_item_num} (Employment Status) = 1:Yes in the " + f"same Item {rpt_item_num} (Reporting Year and Month) since Item {closure_item_num} (Reason for Closure) = " + "1:Employment/excess earnings." ) @pytest.mark.parametrize("header,T4Stuff,T5Stuff,stt_type", [ ( {"type": "C", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], 'T4'), - (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], 'T5'), + (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], "T4", "9"), + (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], "T5", "26"), STT.EntityType.STATE, ), ( {"type": "C", "program_type": "Tribal TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], 'T4'), - (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], 'T5'), + (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], "T4", "9"), + (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], "T5", "26"), STT.EntityType.TRIBE, ), ]) @pytest.mark.django_db def test_section2_validator_fail_case_closure_ftl(self, small_correct_file, header, T4Stuff, T5Stuff, stt_type): """Test records are related validator section 2 success case.""" - (T4Factory, t4_schema, t4_model_name) = T4Stuff - (T5Factory, t5_schema, t5_model_name) = T5Stuff + (T4Factory, t4_schema, t4_model_name, closure_item_num) = T4Stuff + (T5Factory, t5_schema, t5_model_name, fed_time_item_num) = T5Stuff case_consistency_validator = CaseConsistencyValidator( header, - header['program_type'], + header["program_type"], stt_type, util.make_generate_case_consistency_parser_error(small_correct_file) ) t4s = [ - T4Factory.create( + T4Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', - CLOSURE_REASON='03' + CASE_NUMBER="123", + CLOSURE_REASON="03" ), ] line_number = 1 @@ -643,23 +660,23 @@ def test_section2_validator_fail_case_closure_ftl(self, small_correct_file, head line_number += 1 t5s = [ - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=2, REC_AID_TOTALLY_DISABLED=2, REC_SSI=2, - RELATIONSHIP_HOH='10', - COUNTABLE_MONTH_FED_TIME='059', + RELATIONSHIP_HOH="10", + COUNTABLE_MONTH_FED_TIME="059", ), - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=3, REC_AID_TOTALLY_DISABLED=2, REC_SSI=2, - RELATIONSHIP_HOH='03', - COUNTABLE_MONTH_FED_TIME='001', + RELATIONSHIP_HOH="03", + COUNTABLE_MONTH_FED_TIME="001", ), ] for t5 in t5s: @@ -673,27 +690,31 @@ def test_section2_validator_fail_case_closure_ftl(self, small_correct_file, head assert len(errors) == 1 assert num_errors == 1 assert errors[0].error_type == ParserErrorCategoryChoices.CASE_CONSISTENCY - assert errors[0].error_message == ('At least one person who is head-of-household or spouse of ' - 'head-of-household on case must have countable months toward time limit >= ' - '60 since CLOSURE_REASON = 03: federal 5 year time limit.') + is_tribal = "Tribal" in header["program_type"] + tribe_or_fed = "Tribal" if is_tribal else "Federal" + assert errors[0].error_message == ("At least one person who is head-of-household or spouse of " + f"head-of-household on case must have Item {fed_time_item_num} " + f"(Number of Months Countable Toward {tribe_or_fed} Time Limit) >= 60 since " + f"Item {closure_item_num} (Reason for Closure) = 03: federal 5 year time " + "limit.") @pytest.mark.parametrize("header,T4Stuff,T5Stuff,stt_type", [ ( {"type": "C", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], 'T4'), - (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], 'T5'), + (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], "T4", "4", "6"), + (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], "T5"), STT.EntityType.STATE, ), ( {"type": "C", "program_type": "Tribal TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], 'T4'), - (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], 'T5'), + (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], "T4", "4", "6"), + (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], "T5"), STT.EntityType.TRIBE, ), ( {"type": "C", "program_type": "SSP", "year": 2020, "quarter": "4"}, - (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], 'M4'), - (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], 'M5'), + (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], "M4", "3", "5"), + (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], "M5"), STT.EntityType.STATE, ), ]) @@ -701,20 +722,20 @@ def test_section2_validator_fail_case_closure_ftl(self, small_correct_file, head def test_section2_records_are_related_validator_fail_no_t5s( self, small_correct_file, header, T4Stuff, T5Stuff, stt_type): """Test records are related validator fails with no t5s.""" - (T4Factory, t4_schema, t4_model_name) = T4Stuff + (T4Factory, t4_schema, t4_model_name, rpt_item_num, case_item_num) = T4Stuff (T5Factory, t5_schema, t5_model_name) = T5Stuff case_consistency_validator = CaseConsistencyValidator( header, - header['program_type'], + header["program_type"], stt_type, util.make_generate_case_consistency_parser_error(small_correct_file) ) t4s = [ - T4Factory.create( + T4Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", ), ] line_number = 1 @@ -729,28 +750,32 @@ def test_section2_records_are_related_validator_fail_no_t5s( assert len(errors) == 1 assert num_errors == 1 assert errors[0].error_type == ParserErrorCategoryChoices.CASE_CONSISTENCY + is_tribal = "Tribal" in header['program_type'] + case_num = "Case Number" + case_num += "--TANF" if is_tribal else "" assert errors[0].error_message == ( - f'Every {t4_model_name} record should have at least one corresponding ' - f'{t5_model_name} record with the same RPT_MONTH_YEAR and CASE_NUMBER.' + f"Every {t4_model_name} record should have at least one corresponding " + f"{t5_model_name} record with the same Item {rpt_item_num} (Reporting Year and Month) " + f"and Item {case_item_num} ({case_num})." ) @pytest.mark.parametrize("header,T4Stuff,T5Stuff,stt_type", [ ( {"type": "C", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], 'T4'), - (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], 'T5'), + (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], "T4", "4", "6"), + (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], "T5"), STT.EntityType.STATE, ), ( {"type": "C", "program_type": "Tribal TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], 'T4'), - (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], 'T5'), + (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], "T4", "4", "6"), + (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], "T5"), STT.EntityType.TRIBE, ), ( {"type": "C", "program_type": "SSP", "year": 2020, "quarter": "4"}, - (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], 'M4'), - (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], 'M5'), + (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], "M4", "3", "5"), + (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], "M5"), STT.EntityType.STATE, ), ]) @@ -758,27 +783,27 @@ def test_section2_records_are_related_validator_fail_no_t5s( def test_section2_records_are_related_validator_fail_no_t4s( self, small_correct_file, header, T4Stuff, T5Stuff, stt_type): """Test records are related validator fails with no t4s.""" - (T4Factory, t4_schema, t4_model_name) = T4Stuff + (T4Factory, t4_schema, t4_model_name, rpt_item_num, case_item_num) = T4Stuff (T5Factory, t5_schema, t5_model_name) = T5Stuff case_consistency_validator = CaseConsistencyValidator( header, - header['program_type'], + header["program_type"], stt_type, util.make_generate_case_consistency_parser_error(small_correct_file) ) t5s = [ - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=3, REC_AID_TOTALLY_DISABLED=2, REC_SSI=1 ), - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", FAMILY_AFFILIATION=2, REC_AID_TOTALLY_DISABLED=2, REC_SSI=1 @@ -796,31 +821,36 @@ def test_section2_records_are_related_validator_fail_no_t4s( assert len(errors) == 2 assert num_errors == 2 assert errors[0].error_type == ParserErrorCategoryChoices.CASE_CONSISTENCY + is_tribal = "Tribal" in header['program_type'] + case_num = "Case Number" + case_num += "--TANF" if is_tribal else "" assert errors[0].error_message == ( - f'Every {t5_model_name} record should have at least one corresponding ' - f'{t4_model_name} record with the same RPT_MONTH_YEAR and CASE_NUMBER.' + f"Every {t5_model_name} record should have at least one corresponding " + f"{t4_model_name} record with the same Item {rpt_item_num} (Reporting Year and Month) " + f"and Item {case_item_num} ({case_num})." ) assert errors[1].error_type == ParserErrorCategoryChoices.CASE_CONSISTENCY assert errors[1].error_message == ( - f'Every {t5_model_name} record should have at least one corresponding ' - f'{t4_model_name} record with the same RPT_MONTH_YEAR and CASE_NUMBER.' + f"Every {t5_model_name} record should have at least one corresponding " + f"{t4_model_name} record with the same Item {rpt_item_num} (Reporting Year and Month) " + f"and Item {case_item_num} ({case_num})." ) @pytest.mark.parametrize("header,T4Stuff,T5Stuff", [ ( {"type": "C", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], 'T4'), - (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], 'T5'), + (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], "T4"), + (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], "T5"), ), ( {"type": "C", "program_type": "Tribal TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], 'T4'), - (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], 'T5'), + (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], "T4"), + (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], "T5"), ), ( {"type": "C", "program_type": "SSP", "year": 2020, "quarter": "4"}, - (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], 'M4'), - (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], 'M5'), + (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], "M4"), + (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], "M5"), ), ]) @pytest.mark.django_db @@ -831,15 +861,15 @@ def test_section2_aabd_ssi_validator_pass_territory_adult_aadb(self, small_corre case_consistency_validator = CaseConsistencyValidator( header, - header['program_type'], + header["program_type"], STT.EntityType.TERRITORY, util.make_generate_case_consistency_parser_error(small_correct_file) ) t4s = [ - T4Factory.create( + T4Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", ), ] line_number = 1 @@ -848,17 +878,17 @@ def test_section2_aabd_ssi_validator_pass_territory_adult_aadb(self, small_corre line_number += 1 t5s = [ - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", DATE_OF_BIRTH="19970209", FAMILY_AFFILIATION=1, REC_AID_TOTALLY_DISABLED=1, REC_SSI=2 ), - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", DATE_OF_BIRTH="19970209", FAMILY_AFFILIATION=2, REC_AID_TOTALLY_DISABLED=2, @@ -879,37 +909,37 @@ def test_section2_aabd_ssi_validator_pass_territory_adult_aadb(self, small_corre @pytest.mark.parametrize("header,T4Stuff,T5Stuff", [ ( {"type": "C", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], 'T4'), - (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], 'T5'), + (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], "T4"), + (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], "T5", "19C"), ), ( {"type": "C", "program_type": "Tribal TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], 'T4'), - (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], 'T5'), + (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], "T4"), + (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], "T5", "19C"), ), ( {"type": "C", "program_type": "SSP", "year": 2020, "quarter": "4"}, - (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], 'M4'), - (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], 'M5'), + (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], "M4"), + (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], "M5", "18C"), ), ]) @pytest.mark.django_db def test_section2_aabd_ssi_validator_fail_territory_adult_aabd(self, small_correct_file, header, T4Stuff, T5Stuff): """Test records are related validator section 2 success case.""" (T4Factory, t4_schema, t4_model_name) = T4Stuff - (T5Factory, t5_schema, t5_model_name) = T5Stuff + (T5Factory, t5_schema, t5_model_name, ratd_item_num) = T5Stuff case_consistency_validator = CaseConsistencyValidator( header, - header['program_type'], + header["program_type"], STT.EntityType.TERRITORY, util.make_generate_case_consistency_parser_error(small_correct_file) ) t4s = [ - T4Factory.create( + T4Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", ), ] line_number = 1 @@ -918,17 +948,17 @@ def test_section2_aabd_ssi_validator_fail_territory_adult_aabd(self, small_corre line_number += 1 t5s = [ - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", DATE_OF_BIRTH="19970209", FAMILY_AFFILIATION=1, REC_AID_TOTALLY_DISABLED=0, REC_SSI=2 ), - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", DATE_OF_BIRTH="19970209", FAMILY_AFFILIATION=2, REC_AID_TOTALLY_DISABLED=0, @@ -947,28 +977,30 @@ def test_section2_aabd_ssi_validator_fail_territory_adult_aabd(self, small_corre assert num_errors == 2 assert errors[0].error_type == ParserErrorCategoryChoices.CASE_CONSISTENCY assert errors[0].error_message == ( - f'{t5_model_name} Adults in territories must have a valid value for REC_AID_TOTALLY_DISABLED.' + f"{t5_model_name} Adults in territories must have a valid value for Item {ratd_item_num} " + "(Received Disability Benefits: Permanently and Totally Disabled)." ) assert errors[1].error_type == ParserErrorCategoryChoices.CASE_CONSISTENCY assert errors[1].error_message == ( - f'{t5_model_name} Adults in territories must have a valid value for REC_AID_TOTALLY_DISABLED.' + f"{t5_model_name} Adults in territories must have a valid value for Item {ratd_item_num} " + "(Received Disability Benefits: Permanently and Totally Disabled)." ) @pytest.mark.parametrize("header,T4Stuff,T5Stuff", [ ( {"type": "C", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], 'T4'), - (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], 'T5'), + (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], "T4"), + (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], "T5"), ), ( {"type": "C", "program_type": "Tribal TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], 'T4'), - (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], 'T5'), + (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], "T4"), + (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], "T5"), ), ( {"type": "C", "program_type": "SSP", "year": 2020, "quarter": "4"}, - (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], 'M4'), - (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], 'M5'), + (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], "M4"), + (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], "M5"), ), ]) @pytest.mark.django_db @@ -979,15 +1011,15 @@ def test_section2_aabd_ssi_validator_pass_territory_child_aabd(self, small_corre case_consistency_validator = CaseConsistencyValidator( header, - header['program_type'], + header["program_type"], STT.EntityType.TERRITORY, util.make_generate_case_consistency_parser_error(small_correct_file) ) t4s = [ - T4Factory.create( + T4Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", ), ] line_number = 1 @@ -996,17 +1028,17 @@ def test_section2_aabd_ssi_validator_pass_territory_child_aabd(self, small_corre line_number += 1 t5s = [ - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", DATE_OF_BIRTH="20170209", FAMILY_AFFILIATION=1, REC_AID_TOTALLY_DISABLED=2, REC_SSI=2 ), - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", DATE_OF_BIRTH="20170209", FAMILY_AFFILIATION=2, REC_AID_TOTALLY_DISABLED=1, @@ -1027,37 +1059,37 @@ def test_section2_aabd_ssi_validator_pass_territory_child_aabd(self, small_corre @pytest.mark.parametrize("header,T4Stuff,T5Stuff", [ ( {"type": "C", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], 'T4'), - (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], 'T5'), + (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], "T4"), + (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], "T5", "19C"), ), ( {"type": "C", "program_type": "Tribal TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], 'T4'), - (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], 'T5'), + (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], "T4"), + (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], "T5", "19C"), ), ( {"type": "C", "program_type": "SSP", "year": 2020, "quarter": "4"}, - (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], 'M4'), - (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], 'M5'), + (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], "M4"), + (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], "M5", "18C"), ), ]) @pytest.mark.django_db - def test_section2_aabd_ssi_validator_fail_state_aabd(self, small_correct_file, header, T4Stuff, T5Stuff): + def test_section2_atd_ssi_validator_fail_state_atdd(self, small_correct_file, header, T4Stuff, T5Stuff): """Test records are related validator section 2 success case.""" (T4Factory, t4_schema, t4_model_name) = T4Stuff - (T5Factory, t5_schema, t5_model_name) = T5Stuff + (T5Factory, t5_schema, t5_model_name, item_no) = T5Stuff case_consistency_validator = CaseConsistencyValidator( header, - header['program_type'], + header["program_type"], STT.EntityType.STATE, util.make_generate_case_consistency_parser_error(small_correct_file) ) t4s = [ - T4Factory.create( + T4Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", ), ] line_number = 1 @@ -1066,17 +1098,17 @@ def test_section2_aabd_ssi_validator_fail_state_aabd(self, small_correct_file, h line_number += 1 t5s = [ - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", DATE_OF_BIRTH="19970209", FAMILY_AFFILIATION=2, REC_AID_TOTALLY_DISABLED=1, REC_SSI=2 ), - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", DATE_OF_BIRTH="20170209", FAMILY_AFFILIATION=2, REC_AID_TOTALLY_DISABLED=1, @@ -1095,47 +1127,49 @@ def test_section2_aabd_ssi_validator_fail_state_aabd(self, small_correct_file, h assert num_errors == 2 assert errors[0].error_type == ParserErrorCategoryChoices.CASE_CONSISTENCY assert errors[0].error_message == ( - f'{t5_model_name} People in states should not have a value of 1 for REC_AID_TOTALLY_DISABLED.' + f"{t5_model_name} People in states should not have a value of 1 for Item {item_no} (" + "Received Disability Benefits: Permanently and Totally Disabled)." ) assert errors[1].error_type == ParserErrorCategoryChoices.CASE_CONSISTENCY assert errors[1].error_message == ( - f'{t5_model_name} People in states should not have a value of 1 for REC_AID_TOTALLY_DISABLED.' + f"{t5_model_name} People in states should not have a value of 1 for Item {item_no} " + "(Received Disability Benefits: Permanently and Totally Disabled)." ) @pytest.mark.parametrize("header,T4Stuff,T5Stuff", [ ( {"type": "C", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], 'T4'), - (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], 'T5'), + (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], "T4"), + (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], "T5", "19E"), ), ( {"type": "C", "program_type": "Tribal TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], 'T4'), - (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], 'T5'), + (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], "T4"), + (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], "T5", "19E"), ), ( {"type": "C", "program_type": "SSP", "year": 2020, "quarter": "4"}, - (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], 'M4'), - (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], 'M5'), + (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], "M4"), + (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], "M5", "18E"), ), ]) @pytest.mark.django_db def test_section2_aabd_ssi_validator_fail_territory_ssi(self, small_correct_file, header, T4Stuff, T5Stuff): """Test records are related validator section 2 success case.""" (T4Factory, t4_schema, t4_model_name) = T4Stuff - (T5Factory, t5_schema, t5_model_name) = T5Stuff + (T5Factory, t5_schema, t5_model_name, rec_ssi_item_num) = T5Stuff case_consistency_validator = CaseConsistencyValidator( header, - header['program_type'], + header["program_type"], STT.EntityType.TERRITORY, util.make_generate_case_consistency_parser_error(small_correct_file) ) t4s = [ - T4Factory.create( + T4Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", ), ] line_number = 1 @@ -1144,17 +1178,17 @@ def test_section2_aabd_ssi_validator_fail_territory_ssi(self, small_correct_file line_number += 1 t5s = [ - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", DATE_OF_BIRTH="19970209", FAMILY_AFFILIATION=1, REC_AID_TOTALLY_DISABLED=1, REC_SSI=1 ), - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", DATE_OF_BIRTH="19970209", FAMILY_AFFILIATION=2, REC_AID_TOTALLY_DISABLED=1, @@ -1173,47 +1207,49 @@ def test_section2_aabd_ssi_validator_fail_territory_ssi(self, small_correct_file assert num_errors == 2 assert errors[0].error_type == ParserErrorCategoryChoices.CASE_CONSISTENCY assert errors[0].error_message == ( - f'{t5_model_name} People in territories must have value = 2:No for REC_SSI.' + f"{t5_model_name} People in territories must have value = 2:No for Item {rec_ssi_item_num} " + "(Received Disability Benefits: SSI)." ) assert errors[1].error_type == ParserErrorCategoryChoices.CASE_CONSISTENCY assert errors[1].error_message == ( - f'{t5_model_name} People in territories must have value = 2:No for REC_SSI.' + f"{t5_model_name} People in territories must have value = 2:No for Item {rec_ssi_item_num} " + "(Received Disability Benefits: SSI)." ) @pytest.mark.parametrize("header,T4Stuff,T5Stuff", [ ( {"type": "C", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], 'T4'), - (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], 'T5'), + (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], "T4"), + (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], "T5", "19E"), ), ( {"type": "C", "program_type": "Tribal TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], 'T4'), - (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], 'T5'), + (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], "T4"), + (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], "T5", "19E"), ), ( {"type": "C", "program_type": "SSP", "year": 2020, "quarter": "4"}, - (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], 'M4'), - (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], 'M5'), + (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], "M4"), + (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], "M5", "18E"), ), ]) @pytest.mark.django_db - def test_section2_aabd_ssi_validator_fail_state_ssi(self, small_correct_file, header, T4Stuff, T5Stuff): + def test_section2_atd_ssi_validator_fail_state_ssi(self, small_correct_file, header, T4Stuff, T5Stuff): """Test records are related validator section 2 success case.""" (T4Factory, t4_schema, t4_model_name) = T4Stuff - (T5Factory, t5_schema, t5_model_name) = T5Stuff + (T5Factory, t5_schema, t5_model_name, rec_ssi_item_num) = T5Stuff case_consistency_validator = CaseConsistencyValidator( header, - header['program_type'], + header["program_type"], STT.EntityType.STATE, util.make_generate_case_consistency_parser_error(small_correct_file) ) t4s = [ - T4Factory.create( + T4Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", ), ] line_number = 1 @@ -1222,21 +1258,21 @@ def test_section2_aabd_ssi_validator_fail_state_ssi(self, small_correct_file, he line_number += 1 t5s = [ - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", DATE_OF_BIRTH="19970209", FAMILY_AFFILIATION=1, REC_AID_TOTALLY_DISABLED=2, - REC_SSI=2 + REC_SSI=0 ), - T5Factory.create( + T5Factory.build( RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", DATE_OF_BIRTH="19970209", FAMILY_AFFILIATION=2, # validator only applies to fam_affil = 1; won't generate error REC_AID_TOTALLY_DISABLED=2, - REC_SSI=2 + REC_SSI=0 ), ] for t5 in t5s: @@ -1251,29 +1287,30 @@ def test_section2_aabd_ssi_validator_fail_state_ssi(self, small_correct_file, he assert num_errors == 1 assert errors[0].error_type == ParserErrorCategoryChoices.CASE_CONSISTENCY assert errors[0].error_message == ( - f'{t5_model_name} People in states must have a valid value for REC_SSI.' + f"{t5_model_name} People in states must have a valid value for Item {rec_ssi_item_num} " + "(Received Disability Benefits: SSI)." ) @pytest.mark.parametrize("header,T1Stuff,T2Stuff,T3Stuff,stt_type", [ ( {"type": "A", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT1Factory, schema_defs.tanf.t1.schemas[0], 'T1'), - (factories.TanfT2Factory, schema_defs.tanf.t2.schemas[0], 'T2'), - (factories.TanfT3Factory, schema_defs.tanf.t3.schemas[0], 'T3'), + (factories.TanfT1Factory, schema_defs.tanf.t1.schemas[0], "T1"), + (factories.TanfT2Factory, schema_defs.tanf.t2.schemas[0], "T2"), + (factories.TanfT3Factory, schema_defs.tanf.t3.schemas[0], "T3"), STT.EntityType.STATE, ), ( {"type": "A", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT1Factory, schema_defs.tribal_tanf.t1.schemas[0], 'T1'), - (factories.TribalTanfT2Factory, schema_defs.tribal_tanf.t2.schemas[0], 'T2'), - (factories.TribalTanfT3Factory, schema_defs.tribal_tanf.t3.schemas[0], 'T3'), + (factories.TribalTanfT1Factory, schema_defs.tribal_tanf.t1.schemas[0], "T1"), + (factories.TribalTanfT2Factory, schema_defs.tribal_tanf.t2.schemas[0], "T2"), + (factories.TribalTanfT3Factory, schema_defs.tribal_tanf.t3.schemas[0], "T3"), STT.EntityType.STATE, ), ( {"type": "A", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.SSPM1Factory, schema_defs.ssp.m1.schemas[0], 'M1'), - (factories.SSPM2Factory, schema_defs.ssp.m2.schemas[0], 'M2'), - (factories.SSPM3Factory, schema_defs.ssp.m3.schemas[0], 'M3'), + (factories.SSPM1Factory, schema_defs.ssp.m1.schemas[0], "M1"), + (factories.SSPM2Factory, schema_defs.ssp.m2.schemas[0], "M2"), + (factories.SSPM3Factory, schema_defs.ssp.m3.schemas[0], "M3"), STT.EntityType.STATE, ) ]) @@ -1291,28 +1328,28 @@ def test_section1_duplicate_records(self, small_correct_file, header, T1Stuff, T util.make_generate_case_consistency_parser_error(small_correct_file) ) - t1 = T1Factory.create(RecordType="T1", RPT_MONTH_YEAR=202010, CASE_NUMBER='123') + t1 = T1Factory.build(RecordType="T1", RPT_MONTH_YEAR=202010, CASE_NUMBER="123") line_number = 1 case_consistency_validator.add_record(t1, t1_schema, str(t1), line_number, False) line_number += 1 - t2 = T2Factory.create(RecordType="T2", RPT_MONTH_YEAR=202010, CASE_NUMBER='123', FAMILY_AFFILIATION=1, - SSN="111111111", DATE_OF_BIRTH="22222222") + t2 = T2Factory.build(RecordType="T2", RPT_MONTH_YEAR=202010, CASE_NUMBER="123", FAMILY_AFFILIATION=1, + SSN="111111111", DATE_OF_BIRTH="22222222") case_consistency_validator.add_record(t2, t2_schema, str(t2), line_number, False) line_number += 1 t3s = [ - T3Factory.create( + T3Factory.build( RecordType="T3", RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", SSN="111111111", DATE_OF_BIRTH="22222222" ), - T3Factory.create( + T3Factory.build( RecordType="T3", RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", SSN="111111111", DATE_OF_BIRTH="22222222" ), @@ -1321,20 +1358,20 @@ def test_section1_duplicate_records(self, small_correct_file, header, T1Stuff, T for t3 in t3s: case_consistency_validator.add_record(t3, t3_schema, str(t3), line_number, False) - t1_dup = T1Factory.create(RecordType="T1", RPT_MONTH_YEAR=202010, CASE_NUMBER='123') + t1_dup = T1Factory.build(RecordType="T1", RPT_MONTH_YEAR=202010, CASE_NUMBER="123") line_number += 1 has_errors, _, _ = case_consistency_validator.add_record(t1_dup, t1_schema, str(t1), line_number, False) line_number += 1 assert has_errors - t2_dup = T2Factory.create(RecordType="T2", RPT_MONTH_YEAR=202010, CASE_NUMBER='123', FAMILY_AFFILIATION=1, - SSN="111111111", DATE_OF_BIRTH="22222222") + t2_dup = T2Factory.build(RecordType="T2", RPT_MONTH_YEAR=202010, CASE_NUMBER="123", FAMILY_AFFILIATION=1, + SSN="111111111", DATE_OF_BIRTH="22222222") has_errors, _, _ = case_consistency_validator.add_record(t2_dup, t2_schema, str(t2), line_number, False) line_number += 1 assert has_errors - t3_dup = T3Factory.create(RecordType="T3", RPT_MONTH_YEAR=202010, CASE_NUMBER='123', FAMILY_AFFILIATION=1, - SSN="111111111", DATE_OF_BIRTH="22222222") + t3_dup = T3Factory.build(RecordType="T3", RPT_MONTH_YEAR=202010, CASE_NUMBER="123", FAMILY_AFFILIATION=1, + SSN="111111111", DATE_OF_BIRTH="22222222") has_errors, _, _ = case_consistency_validator.add_record(t3_dup, t3_schema, str(t3s[0]), line_number, False) line_number += 1 assert has_errors @@ -1349,23 +1386,23 @@ def test_section1_duplicate_records(self, small_correct_file, header, T1Stuff, T @pytest.mark.parametrize("header,T1Stuff,T2Stuff,T3Stuff,stt_type", [ ( {"type": "A", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT1Factory, schema_defs.tanf.t1.schemas[0], 'T1'), - (factories.TanfT2Factory, schema_defs.tanf.t2.schemas[0], 'T2'), - (factories.TanfT3Factory, schema_defs.tanf.t3.schemas[0], 'T3'), + (factories.TanfT1Factory, schema_defs.tanf.t1.schemas[0], "T1"), + (factories.TanfT2Factory, schema_defs.tanf.t2.schemas[0], "T2"), + (factories.TanfT3Factory, schema_defs.tanf.t3.schemas[0], "T3"), STT.EntityType.STATE, ), ( {"type": "A", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT1Factory, schema_defs.tribal_tanf.t1.schemas[0], 'T1'), - (factories.TribalTanfT2Factory, schema_defs.tribal_tanf.t2.schemas[0], 'T2'), - (factories.TribalTanfT3Factory, schema_defs.tribal_tanf.t3.schemas[0], 'T3'), + (factories.TribalTanfT1Factory, schema_defs.tribal_tanf.t1.schemas[0], "T1"), + (factories.TribalTanfT2Factory, schema_defs.tribal_tanf.t2.schemas[0], "T2"), + (factories.TribalTanfT3Factory, schema_defs.tribal_tanf.t3.schemas[0], "T3"), STT.EntityType.STATE, ), ( {"type": "A", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.SSPM1Factory, schema_defs.ssp.m1.schemas[0], 'M1'), - (factories.SSPM2Factory, schema_defs.ssp.m2.schemas[0], 'M2'), - (factories.SSPM3Factory, schema_defs.ssp.m3.schemas[0], 'M3'), + (factories.SSPM1Factory, schema_defs.ssp.m1.schemas[0], "M1"), + (factories.SSPM2Factory, schema_defs.ssp.m2.schemas[0], "M2"), + (factories.SSPM3Factory, schema_defs.ssp.m3.schemas[0], "M3"), STT.EntityType.STATE, ), ]) @@ -1384,28 +1421,28 @@ def test_section1_partial_duplicate_records_and_precedence(self, small_correct_f util.make_generate_case_consistency_parser_error(small_correct_file) ) - t1 = T1Factory.create(RecordType="T1", RPT_MONTH_YEAR=202010, CASE_NUMBER='123') + t1 = T1Factory.build(RecordType="T1", RPT_MONTH_YEAR=202010, CASE_NUMBER="123") line_number = 1 case_consistency_validator.add_record(t1, t1_schema, str(t1), line_number, False) line_number += 1 - t2 = T2Factory.create(RecordType="T2", RPT_MONTH_YEAR=202010, CASE_NUMBER='123', FAMILY_AFFILIATION=1, - SSN="111111111", DATE_OF_BIRTH="22222222") + t2 = T2Factory.build(RecordType="T2", RPT_MONTH_YEAR=202010, CASE_NUMBER="123", FAMILY_AFFILIATION=1, + SSN="111111111", DATE_OF_BIRTH="22222222") case_consistency_validator.add_record(t2, t2_schema, str(t2), line_number, False) line_number += 1 t3s = [ - T3Factory.create( + T3Factory.build( RecordType="T3", RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", SSN="111111111", DATE_OF_BIRTH="22222222" ), - T3Factory.create( + T3Factory.build( RecordType="T3", RPT_MONTH_YEAR=202010, - CASE_NUMBER='123', + CASE_NUMBER="123", SSN="111111111", DATE_OF_BIRTH="22222222" ), @@ -1415,20 +1452,20 @@ def test_section1_partial_duplicate_records_and_precedence(self, small_correct_f case_consistency_validator.add_record(t3, t3_schema, str(t3), line_number, False) # Introduce partial dups - t1_dup = T1Factory.create(RecordType="T1", RPT_MONTH_YEAR=202010, CASE_NUMBER='123') + t1_dup = T1Factory.build(RecordType="T1", RPT_MONTH_YEAR=202010, CASE_NUMBER="123") line_number += 1 has_errors, _, _ = case_consistency_validator.add_record(t1_dup, t1_schema, str(t1_dup), line_number, False) line_number += 1 assert has_errors - t2_dup = T2Factory.create(RecordType="T2", RPT_MONTH_YEAR=202010, CASE_NUMBER='123', FAMILY_AFFILIATION=1, - SSN="111111111", DATE_OF_BIRTH="22222222") + t2_dup = T2Factory.build(RecordType="T2", RPT_MONTH_YEAR=202010, CASE_NUMBER="123", FAMILY_AFFILIATION=1, + SSN="111111111", DATE_OF_BIRTH="22222222") has_errors, _, _ = case_consistency_validator.add_record(t2_dup, t2_schema, str(t2_dup), line_number, False) line_number += 1 assert has_errors - t3_dup = T3Factory.create(RecordType="T3", RPT_MONTH_YEAR=202010, CASE_NUMBER='123', FAMILY_AFFILIATION=1, - SSN="111111111", DATE_OF_BIRTH="22222222") + t3_dup = T3Factory.build(RecordType="T3", RPT_MONTH_YEAR=202010, CASE_NUMBER="123", FAMILY_AFFILIATION=1, + SSN="111111111", DATE_OF_BIRTH="22222222") has_errors, _, _ = case_consistency_validator.add_record(t3_dup, t3_schema, str(t3_dup), line_number, False) line_number += 1 assert has_errors @@ -1444,7 +1481,7 @@ def test_section1_partial_duplicate_records_and_precedence(self, small_correct_f # are automatically replaced with the errors of higher precedence. case_consistency_validator.clear_errors(clear_dup=False) - t1_complete_dup = T1Factory.create(RecordType="T1", RPT_MONTH_YEAR=202010, CASE_NUMBER='123') + t1_complete_dup = T1Factory.build(RecordType="T1", RPT_MONTH_YEAR=202010, CASE_NUMBER="123") has_errors, _, _ = case_consistency_validator.add_record(t1_complete_dup, t1_schema, str(t1), line_number, False) @@ -1458,18 +1495,18 @@ def test_section1_partial_duplicate_records_and_precedence(self, small_correct_f @pytest.mark.parametrize("header,T4Stuff,T5Stuff", [ ( {"type": "C", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], 'T4'), - (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], 'T5'), + (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], "T4"), + (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], "T5"), ), ( {"type": "C", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], 'T4'), - (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], 'T5'), + (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], "T4"), + (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], "T5"), ), ( {"type": "C", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], 'M4'), - (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], 'M5'), + (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], "M4"), + (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], "M5"), ), ]) @pytest.mark.django_db @@ -1486,21 +1523,21 @@ def test_section2_duplicate_records(self, small_correct_file, header, T4Stuff, T ) line_number = 1 - t4 = T4Factory.create(RecordType="T4", RPT_MONTH_YEAR=202010, CASE_NUMBER='123') + t4 = T4Factory.build(RecordType="T4", RPT_MONTH_YEAR=202010, CASE_NUMBER="123") case_consistency_validator.add_record(t4, t4_schema, str(t4), line_number, False) line_number += 1 - t5 = T5Factory.create(RecordType="T5", RPT_MONTH_YEAR=202010, CASE_NUMBER='123', FAMILY_AFFILIATION=1, - SSN="111111111", DATE_OF_BIRTH="22222222") + t5 = T5Factory.build(RecordType="T5", RPT_MONTH_YEAR=202010, CASE_NUMBER="123", FAMILY_AFFILIATION=1, + SSN="111111111", DATE_OF_BIRTH="22222222") case_consistency_validator.add_record(t5, t5_schema, str(t5), line_number, False) line_number += 1 - t4_dup = T4Factory.create(RecordType="T4", RPT_MONTH_YEAR=202010, CASE_NUMBER='123') + t4_dup = T4Factory.build(RecordType="T4", RPT_MONTH_YEAR=202010, CASE_NUMBER="123") case_consistency_validator.add_record(t4_dup, t4_schema, str(t4), line_number, False) line_number += 1 - t5_dup = T5Factory.create(RecordType="T5", RPT_MONTH_YEAR=202010, CASE_NUMBER='123', FAMILY_AFFILIATION=1, - SSN="111111111", DATE_OF_BIRTH="22222222") + t5_dup = T5Factory.build(RecordType="T5", RPT_MONTH_YEAR=202010, CASE_NUMBER="123", FAMILY_AFFILIATION=1, + SSN="111111111", DATE_OF_BIRTH="22222222") case_consistency_validator.add_record(t5_dup, t5_schema, str(t5), line_number, False) line_number += 1 @@ -1514,18 +1551,18 @@ def test_section2_duplicate_records(self, small_correct_file, header, T4Stuff, T @pytest.mark.parametrize("header,T4Stuff,T5Stuff", [ ( {"type": "C", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], 'T4'), - (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], 'T5'), + (factories.TanfT4Factory, schema_defs.tanf.t4.schemas[0], "T4"), + (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], "T5"), ), ( {"type": "C", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], 'T4'), - (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], 'T5'), + (factories.TribalTanfT4Factory, schema_defs.tribal_tanf.t4.schemas[0], "T4"), + (factories.TribalTanfT5Factory, schema_defs.tribal_tanf.t5.schemas[0], "T5"), ), ( {"type": "C", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], 'M4'), - (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], 'M5'), + (factories.SSPM4Factory, schema_defs.ssp.m4.schemas[0], "M4"), + (factories.SSPM5Factory, schema_defs.ssp.m5.schemas[0], "M5"), ), ]) @pytest.mark.django_db @@ -1542,21 +1579,21 @@ def test_section2_partial_duplicate_records_and_precedence(self, small_correct_f ) line_number = 1 - t4 = T4Factory.create(RecordType="T4", RPT_MONTH_YEAR=202010, CASE_NUMBER='123') + t4 = T4Factory.build(RecordType="T4", RPT_MONTH_YEAR=202010, CASE_NUMBER="123") case_consistency_validator.add_record(t4, t4_schema, str(t4), line_number, False) line_number += 1 - t5 = T5Factory.create(RecordType="T5", RPT_MONTH_YEAR=202010, CASE_NUMBER='123', FAMILY_AFFILIATION=1, - SSN="111111111", DATE_OF_BIRTH="22222222") + t5 = T5Factory.build(RecordType="T5", RPT_MONTH_YEAR=202010, CASE_NUMBER="123", FAMILY_AFFILIATION=1, + SSN="111111111", DATE_OF_BIRTH="22222222") case_consistency_validator.add_record(t5, t5_schema, str(t5), line_number, False) line_number += 1 - t4_dup = T4Factory.create(RecordType="T4", RPT_MONTH_YEAR=202010, CASE_NUMBER='123') + t4_dup = T4Factory.build(RecordType="T4", RPT_MONTH_YEAR=202010, CASE_NUMBER="123") case_consistency_validator.add_record(t4_dup, t4_schema, str(t4_dup), line_number, False) line_number += 1 - t5_dup = T5Factory.create(RecordType="T5", RPT_MONTH_YEAR=202010, CASE_NUMBER='123', FAMILY_AFFILIATION=1, - SSN="111111111", DATE_OF_BIRTH="22222222") + t5_dup = T5Factory.build(RecordType="T5", RPT_MONTH_YEAR=202010, CASE_NUMBER="123", FAMILY_AFFILIATION=1, + SSN="111111111", DATE_OF_BIRTH="22222222") case_consistency_validator.add_record(t5_dup, t5_schema, str(t5_dup), line_number, False) line_number += 1 @@ -1571,7 +1608,7 @@ def test_section2_partial_duplicate_records_and_precedence(self, small_correct_f # are automatically replaced with the errors of higher precedence. case_consistency_validator.clear_errors(clear_dup=False) - t4_complete_dup = T4Factory.create(RecordType="T4", RPT_MONTH_YEAR=202010, CASE_NUMBER='123') + t4_complete_dup = T4Factory.build(RecordType="T4", RPT_MONTH_YEAR=202010, CASE_NUMBER="123") has_errors, _, _ = case_consistency_validator.add_record(t4_complete_dup, t4_schema, str(t4), line_number, False) @@ -1585,15 +1622,15 @@ def test_section2_partial_duplicate_records_and_precedence(self, small_correct_f @pytest.mark.parametrize("header,record_stuff", [ ( {"type": "A", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT2Factory, schema_defs.tanf.t2.schemas[0], 'T2'), + (factories.TanfT2Factory, schema_defs.tanf.t2.schemas[0], "T2"), ), ( {"type": "A", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT3Factory, schema_defs.tanf.t3.schemas[0], 'T3'), + (factories.TanfT3Factory, schema_defs.tanf.t3.schemas[0], "T3"), ), ( {"type": "C", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], 'T5'), + (factories.TanfT5Factory, schema_defs.tanf.t5.schemas[0], "T5"), ), ]) @pytest.mark.django_db @@ -1609,12 +1646,12 @@ def test_family_affiliation_negate_partial_duplicate(self, small_correct_file, h ) line_number = 1 - first_record = Factory.create(RecordType=model_name, RPT_MONTH_YEAR=202010, CASE_NUMBER='123') + first_record = Factory.build(RecordType=model_name, RPT_MONTH_YEAR=202010, CASE_NUMBER="123") case_consistency_validator.add_record(first_record, schema, str(first_record), line_number, False) line_number += 1 - second_record = Factory.create(RecordType=model_name, RPT_MONTH_YEAR=202010, CASE_NUMBER='123', - FAMILY_AFFILIATION=5) + second_record = Factory.build(RecordType=model_name, RPT_MONTH_YEAR=202010, CASE_NUMBER="123", + FAMILY_AFFILIATION=5) case_consistency_validator.add_record(second_record, schema, str(second_record), line_number, False) line_number += 1 @@ -1624,27 +1661,27 @@ def test_family_affiliation_negate_partial_duplicate(self, small_correct_file, h @pytest.mark.parametrize("header,record_stuff", [ ( {"type": "G", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT6Factory, schema_defs.tanf.t6.schemas[0], 'T6'), + (factories.TanfT6Factory, schema_defs.tanf.t6.schemas[0], "T6"), ), ( {"type": "G", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT6Factory, schema_defs.tribal_tanf.t6.schemas[0], 'T6'), + (factories.TribalTanfT6Factory, schema_defs.tribal_tanf.t6.schemas[0], "T6"), ), ( {"type": "G", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.SSPM6Factory, schema_defs.ssp.m6.schemas[0], 'M6'), + (factories.SSPM6Factory, schema_defs.ssp.m6.schemas[0], "M6"), ), ( {"type": "S", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TanfT7Factory, schema_defs.tanf.t7.schemas[0], 'T7'), + (factories.TanfT7Factory, schema_defs.tanf.t7.schemas[0], "T7"), ), ( {"type": "S", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.TribalTanfT7Factory, schema_defs.tribal_tanf.t7.schemas[0], 'T7'), + (factories.TribalTanfT7Factory, schema_defs.tribal_tanf.t7.schemas[0], "T7"), ), ( {"type": "S", "program_type": "TAN", "year": 2020, "quarter": "4"}, - (factories.SSPM7Factory, schema_defs.ssp.m7.schemas[0], 'M7'), + (factories.SSPM7Factory, schema_defs.ssp.m7.schemas[0], "M7"), ), ]) @pytest.mark.django_db @@ -1664,7 +1701,7 @@ def test_s3_s4_duplicates(self, small_correct_file, header, record_stuff): # the file. If the line number was changing, we would be flagging duplicate errors. first_record = None for i in range(5): - record = Factory.create(RecordType=model_name, RPT_MONTH_YEAR=202010) + record = Factory.build(RecordType=model_name, RPT_MONTH_YEAR=202010) if i == 0: first_record = record case_consistency_validator.add_record(record, schema, str(record), line_number, False) @@ -1673,7 +1710,7 @@ def test_s3_s4_duplicates(self, small_correct_file, header, record_stuff): errors = case_consistency_validator.get_generated_errors() assert len(errors) == 0 - second_record = Factory.create(RecordType=model_name, RPT_MONTH_YEAR=202010) + second_record = Factory.build(RecordType=model_name, RPT_MONTH_YEAR=202010) case_consistency_validator.add_record(second_record, schema, str(first_record), line_number, False) errors = case_consistency_validator.get_generated_errors() diff --git a/tdrs-backend/tdpservice/parsers/test/test_header.py b/tdrs-backend/tdpservice/parsers/test/test_header.py index 18079bc68..ceca0a35e 100644 --- a/tdrs-backend/tdpservice/parsers/test/test_header.py +++ b/tdrs-backend/tdpservice/parsers/test/test_header.py @@ -59,8 +59,8 @@ def test_header_cleanup(test_datafile): # Encryption error ("HEADER20204A06 TAN1AD", False, "HEADER Item 9 (encryption): A is not in [ , E]."), # Update error - ("HEADER20204A06 TAN1EA", False, ("HEADER Update Indicator must be set to D instead of A. Please review " - "Exporting Complete Data Using FTANF in the Knowledge Center.")), + ("HEADER20204A06 TAN1EA", True, ("HEADER Update Indicator must be set to D instead of A. Please review " + "Exporting Complete Data Using FTANF in the Knowledge Center.")), ]) @pytest.mark.django_db def test_header_fields(test_datafile, header_line, is_valid, error): diff --git a/tdrs-backend/tdpservice/parsers/test/test_parse.py b/tdrs-backend/tdpservice/parsers/test/test_parse.py index fd24bb8fe..71dd6fc10 100644 --- a/tdrs-backend/tdpservice/parsers/test/test_parse.py +++ b/tdrs-backend/tdpservice/parsers/test/test_parse.py @@ -12,9 +12,7 @@ from tdpservice.search_indexes.models.tribal import Tribal_TANF_T5, Tribal_TANF_T6, Tribal_TANF_T7 from tdpservice.search_indexes.models.ssp import SSP_M1, SSP_M2, SSP_M3, SSP_M4, SSP_M5, SSP_M6, SSP_M7 from tdpservice.search_indexes import documents -from tdpservice.parsers.test.factories import DataFileSummaryFactory, ParsingFileFactory from tdpservice.data_files.models import DataFile -from tdpservice.parsers import util from .. import schema_defs, aggregates from elasticsearch.helpers.errors import BulkIndexError import logging @@ -26,38 +24,6 @@ settings.GENERATE_TRAILER_ERRORS = True -@pytest.fixture -def test_datafile(stt_user, stt): - """Fixture for small_correct_file.""" - return util.create_test_datafile('small_correct_file.txt', stt_user, stt) - - -@pytest.fixture -def test_header_datafile(stt_user, stt): - """Fixture for header test.""" - return util.create_test_datafile('tanf_section1_header_test.txt', stt_user, stt) - - -@pytest.fixture -def dfs(): - """Fixture for DataFileSummary.""" - return DataFileSummaryFactory.build() - - -@pytest.fixture -def t2_invalid_dob_file(): - """Fixture for T2 file with an invalid DOB.""" - parsing_file = ParsingFileFactory( - year=2021, - quarter='Q1', - file__name='t2_invalid_dob_file.txt', - file__section='Active Case Data', - file__data=(b'HEADER20204A25 TAN1ED\n' - b'T22020101111111111212Q897$9 3WTTTTTY@W222122222222101221211001472201140000000000000000000000000' - b'0000000000000000000000000000000000000000000000000000000000291\n' - b'TRAILER0000001 ') - ) - return parsing_file # TODO: the name of this test doesn't make perfect sense anymore since it will always have errors now. # TODO: parametrize and merge with test_zero_filled_fips_code_file @@ -171,6 +137,7 @@ def test_parse_big_file(big_file, dfs): parse.parse_datafile(big_file, dfs) dfs.status = dfs.get_status() assert dfs.status == DataFileSummary.Status.ACCEPTED_WITH_ERRORS + dfs.case_aggregates = aggregates.case_aggregates_by_month( dfs.datafile, dfs.status) assert dfs.case_aggregates == {'months': [ @@ -351,6 +318,7 @@ def test_parse_bad_trailer_file2(bad_trailer_file_2, dfs): parser_errors = ParserError.objects.filter(file=bad_trailer_file_2) assert parser_errors.count() == 9 + parser_errors = parser_errors.exclude(error_type=ParserErrorCategoryChoices.CASE_CONSISTENCY) trailer_errors = list(parser_errors.filter(row_number=3).order_by('id')) trailer_error_1 = trailer_errors[0] @@ -369,8 +337,7 @@ def test_parse_bad_trailer_file2(bad_trailer_file_2, dfs): row_2_error = row_2_errors.first() assert row_2_error.error_type == ParserErrorCategoryChoices.FIELD_VALUE assert row_2_error.error_message == ( - 'T1 Item 13 (Receives Subsidized Housing): 3 is not ' - 'larger or equal to 1 and smaller or equal to 2.' + 'T1 Item 13 (Receives Subsidized Housing): 3 is not in range [1, 2].' ) # catch-rpt-month-year-mismatches @@ -511,23 +478,23 @@ def test_parse_ssp_section1_datafile(ssp_section1_datafile, dfs): parse.parse_datafile(ssp_section1_datafile, dfs) - parser_errors = ParserError.objects.filter(file=ssp_section1_datafile) + parser_errors = ParserError.objects.filter(file=ssp_section1_datafile).order_by('row_number') err = parser_errors.first() assert err.row_number == 2 assert err.error_type == ParserErrorCategoryChoices.FIELD_VALUE assert err.error_message == ( - 'M1 Item 11 (Receives Subsidized Housing): 3 is not larger or equal to 1 and smaller or equal to 2.' + 'M1 Item 11 (Receives Subsidized Housing): 3 is not in range [1, 2].' ) assert err.content_type is not None assert err.object_id is not None - dup_errors = parser_errors.filter(error_type=ParserErrorCategoryChoices.CASE_CONSISTENCY).order_by("id") - assert dup_errors.count() == 2 - assert dup_errors[0].error_message == "Duplicate record detected with record type M3 at line 453. " + \ + cat4_errors = parser_errors.filter(error_type=ParserErrorCategoryChoices.CASE_CONSISTENCY).order_by("id") + assert cat4_errors.count() == 2 + assert cat4_errors[0].error_message == "Duplicate record detected with record type M3 at line 453. " + \ "Record is a duplicate of the record at line number 452." - assert dup_errors[1].error_message == "Duplicate record detected with record type M3 at line 3273. " + \ + assert cat4_errors[1].error_message == "Duplicate record detected with record type M3 at line 3273. " + \ "Record is a duplicate of the record at line number 3272." assert parser_errors.count() == 32488 @@ -918,8 +885,7 @@ def test_parse_tanf_section2_file(tanf_section2_file, dfs): err = parser_errors.first() assert err.error_type == ParserErrorCategoryChoices.FIELD_VALUE assert err.error_message == ( - "T4 Item 10 (Received Subsidized Housing): 3 " - "is not larger or equal to 1 and smaller or equal to 2." + "T4 Item 10 (Received Subsidized Housing): 3 is not in range [1, 2]." ) assert err.content_type.model == "tanf_t4" assert err.object_id is not None @@ -1089,6 +1055,7 @@ def test_parse_ssp_section4_file(ssp_section4_file, dfs): dfs.status = dfs.get_status() dfs.case_aggregates = aggregates.total_errors_by_month( dfs.datafile, dfs.status) + assert dfs.case_aggregates == {"months": [ {"month": "Oct", "total_errors": 0}, {"month": "Nov", "total_errors": 0}, @@ -1452,8 +1419,7 @@ def test_empty_t4_t5_values(t4_t5_empty_values, dfs): logger.info(t4[0].__dict__) assert t5.count() == 1 assert parser_errors[0].error_message == ( - "T4 Item 10 (Received Subsidized Housing): 3 is " - "not larger or equal to 1 and smaller or equal to 2." + "T4 Item 10 (Received Subsidized Housing): 3 is not in range [1, 2]." ) @@ -1504,7 +1470,7 @@ def test_bulk_create_returns_rollback_response_on_bulk_index_exception(small_cor assert LogEntry.objects.all().count() == 1 log = LogEntry.objects.get() - assert log.change_message == "Encountered error while indexing datafile documents: indexing exception" + assert log.change_message == "Encountered error while indexing datafile documents: \nindexing exception" assert all_created is True assert TANF_T1.objects.all().count() == 1 @@ -1610,16 +1576,12 @@ def test_parse_tanf_section_1_file_with_bad_update_indicator(tanf_section_1_file parser_errors = ParserError.objects.filter(file=tanf_section_1_file_with_bad_update_indicator) - assert parser_errors.count() == 1 - - error = parser_errors.first() + assert parser_errors.count() == 5 - assert error.error_type == ParserErrorCategoryChoices.FIELD_VALUE - assert error.error_message == ("HEADER Update Indicator must be set to D " - "instead of U. Please review " - "Exporting Complete Data Using FTANF in the " - "Knowledge Center.") + error_messages = [error.error_message for error in parser_errors] + assert "HEADER Update Indicator must be set to D instead of U. Please review" + \ + " Exporting Complete Data Using FTANF in the Knowledge Center." in error_messages @pytest.mark.django_db() def test_parse_tribal_section_4_bad_quarter(tribal_section_4_bad_quarter, dfs): @@ -1677,8 +1639,7 @@ def test_parse_m2_cat2_invalid_37_38_39_file(m2_cat2_invalid_37_38_39_file, dfs) assert parser_errors.count() == 3 error_msgs = { - "Item 37 (Educational Level) 00 is not in range [1, 16]. or " - "Item 37 (Educational Level) 00 is not in range [98, 99].", + "Item 37 (Educational Level) 00 must be between 1 and 16 or must be between 98 and 99.", "M2 Item 38 (Citizenship/Immigration Status): 0 is not in [1, 2, 3, 9].", "M2 Item 39 (Cooperated with Child Support): 0 is not in [1, 2, 9]." } @@ -1702,12 +1663,12 @@ def test_parse_m3_cat2_invalid_68_69_file(m3_cat2_invalid_68_69_file, dfs): assert parser_errors.count() == 4 - error_msgs = {"Item 68 (Educational Level) 00 is not in range [1, 16]. or Item 68 (Educational Level) " + - "00 is not in range [98, 99].", - "M3 Item 69 (Citizenship/Immigration Status): 0 is not in [1, 2, 3, 9].", - "Item 68 (Educational Level) 00 is not in range [1, 16]. or Item 68 (Educational Level) " + - "00 is not in range [98, 99].", - "M3 Item 69 (Citizenship/Immigration Status): 0 is not in [1, 2, 3, 9]."} + error_msgs = { + "Item 68 (Educational Level) 00 must be between 1 and 16 or must be between 98 and 99.", + "M3 Item 69 (Citizenship/Immigration Status): 0 is not in [1, 2, 3, 9].", + "Item 68 (Educational Level) 00 must be between 1 and 16 or must be between 98 and 99.", + "M3 Item 69 (Citizenship/Immigration Status): 0 is not in [1, 2, 3, 9]." + } for e in parser_errors: assert e.error_message in error_msgs @@ -1735,31 +1696,6 @@ def test_parse_m5_cat2_invalid_23_24_file(m5_cat2_invalid_23_24_file, dfs): for e in parser_errors: assert e.error_message in error_msgs - -@pytest.fixture -def test_file_zero_filled_fips_code(): - """Fixture for T1 file with an invalid CITIZENSHIP_STATUS.""" - parsing_file = ParsingFileFactory( - year=2021, - quarter='Q1', - file__name='t3_invalid_citizenship_file.txt', - file__section='Active Case Data', - file__data=(b'HEADER20241A01000TAN2ED\n' - b'T1202401 2132333 0140951112 43312 03 0 0 2 554145' + - b' 0 0 0 0 0 0 0 0 0 0222222 0 02229 22 \n' + - b'T2202401 21323333219550117WT@TB9BT92122222222223 1329911 34' + - b' 32 699 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0' + - b' 0 0 0 0 0 01623 0 0 0\n' + - b'T2202401 21323333219561102WTT@WBP992122221222222 2329911 28' + - b' 32 699 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0' + - b' 0 0 0 0 0 01432 0 0 0\n' + - b'T3202401 2132333120070906WT@@#ZY@W212222122 63981 0 012' + - b'0050201WTTYT#TT0212222122 63981 0 0 \n' + - b'TRAILER 4 ') - ) - return parsing_file - - @pytest.mark.django_db() def test_zero_filled_fips_code_file(test_file_zero_filled_fips_code, dfs): """Test parsing a file with zero filled FIPS code.""" @@ -1853,7 +1789,7 @@ def test_parse_partial_duplicate(file, batch_size, model, record_type, num_error assert parser_errors.count() == num_errors dup_error = parser_errors.first() - assert dup_error.error_message == expected_error_msg.format(record_type=record_type) + assert expected_error_msg.format(record_type=record_type) in dup_error.error_message model.objects.count() == 0 @@ -1882,4 +1818,4 @@ def test_parse_cat_4_edge_case_file(cat4_edge_case_file, dfs): err = parser_errors.first() assert err.error_message == ("Every T1 record should have at least one corresponding T2 or T3 record with the " - "same RPT_MONTH_YEAR and CASE_NUMBER.") + "same Item 4 (Reporting Year and Month) and Item 6 (Case Number).") diff --git a/tdrs-backend/tdpservice/parsers/test/test_util.py b/tdrs-backend/tdpservice/parsers/test/test_util.py index 4d379e741..811395d88 100644 --- a/tdrs-backend/tdpservice/parsers/test/test_util.py +++ b/tdrs-backend/tdpservice/parsers/test/test_util.py @@ -14,12 +14,12 @@ def passing_validator(): """Fake validator that always returns valid.""" - return lambda _, __, ___, ____: (True, None) + return lambda _, __: (True, None) def failing_validator(): """Fake validator that always returns invalid.""" - return lambda _, __, ___, ____: (False, 'Value is not valid.') + return lambda _, __: (False, 'Value is not valid.') def passing_postparsing_validator(): """Fake validator that always returns valid.""" diff --git a/tdrs-backend/tdpservice/parsers/test/test_validators.py b/tdrs-backend/tdpservice/parsers/test/test_validators.py deleted file mode 100644 index d729efc6e..000000000 --- a/tdrs-backend/tdpservice/parsers/test/test_validators.py +++ /dev/null @@ -1,2126 +0,0 @@ -"""Tests for generic validator functions.""" - -import pytest -import logging -from datetime import date -from .. import validators -from ..row_schema import RowSchema -from ..fields import Field -from tdpservice.parsers.test.factories import TanfT1Factory, TanfT2Factory, TanfT3Factory, TanfT5Factory, TanfT6Factory - -from tdpservice.parsers.test.factories import SSPM5Factory - -logger = logging.getLogger(__name__) - -@pytest.mark.parametrize("value,length", [ - (None, 0), - (None, 10), - (' ', 5), - ('###', 3), - ('', 0), - ('', 10), -]) -def test_value_is_empty_returns_true(value, length): - """Test value_is_empty returns valid.""" - result = validators.value_is_empty(value, length) - assert result is True - - -@pytest.mark.parametrize("value,length", [ - (0, 1), - (1, 1), - (10, 2), - ('0', 1), - ('0000', 4), - ('1 ', 5), - ('##3', 3), -]) -def test_value_is_empty_returns_false(value, length): - """Test value_is_empty returns invalid.""" - result = validators.value_is_empty(value, length) - assert result is False - - -def test_or_validators(): - """Test `or_validators` gives a valid result.""" - value = "2" - validator = validators.or_validators(validators.matches(("2")), validators.matches(("3"))) - assert validator(value, RowSchema(), "friendly_name", "item_no", 'inline') == (True, None) - assert validator("3", RowSchema(), "friendly_name", "item_no", 'inline') == (True, None) - assert validator("5", RowSchema(), "friendly_name", "item_no", 'inline') == ( - False, - "Item item_no (friendly_name) 5 does not match 2. or " - "Item item_no (friendly_name) 5 does not match 3." - ) - - validator = validators.or_validators(validators.matches(("2")), validators.matches(("3")), - validators.matches(("4"))) - assert validator(value, RowSchema(), "friendly_name", "item_no", 'inline') == (True, None) - - value = "3" - assert validator(value, RowSchema(), "friendly_name", "item_no", 'inline') == (True, None) - - value = "4" - assert validator(value, RowSchema(), "friendly_name", "item_no", 'inline') == (True, None) - - value = "5" - assert validator(value, RowSchema(), "friendly_name", "item_no", 'inline') == ( - False, - "Item item_no (friendly_name) 5 does not match 2. or " - "Item item_no (friendly_name) 5 does not match 3. or " - "Item item_no (friendly_name) 5 does not match 4." - ) - - validator = validators.or_validators(validators.matches((2)), validators.matches((3)), validators.isLargerThan(4)) - assert validator(5, RowSchema(), "friendly_name", "item_no", 'inline') == (True, None) - assert validator(1, RowSchema(), "friendly_name", "item_no", 'inline') == ( - False, - "Item item_no (friendly_name) 1 does not match 2. or " - "Item item_no (friendly_name) 1 does not match 3. or " - "Item item_no (friendly_name) 1 is not larger than 4." - ) - -def test_if_validators(): - """Test `if_then_validator` gives a valid result.""" - value = {"Field1": "1", "Field2": "2"} - validator = validators.if_then_validator( - condition_field_name="Field1", condition_function=validators.matches('1'), - result_field_name="Field2", result_function=validators.matches('2'), - ) - assert validator(value, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='Field1', friendly_name='field 1'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='Field2', friendly_name='field 2'), - ] - )) == (True, None, ['Field1', 'Field2']) - - validator = validator = validators.if_then_validator( - condition_field_name="Field1", condition_function=validators.matches('1'), - result_field_name="Field2", result_function=validators.matches('1'), - ) - result = validator(value, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='Field1', friendly_name='field 1'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='Field2', friendly_name='field 2'), - ] - )) - assert result == (False, 'if Field1 :1 validator1 passed then Item 2 (field 2) 2 does not match 1.', - ['Field1', 'Field2']) - - -def test_and_validators(): - """Test `and_validators` gives a valid result.""" - validator = validators.and_validators(validators.isLargerThan(2), validators.isLargerThan(0)) - assert validator(1, RowSchema(), "friendly_name", "item_no") == ( - False, - 'Item item_no (friendly_name) 1 is not larger than 2.' - ) - assert validator(3, RowSchema(), "friendly_name", "item_no") == (True, None) - - -def test_validate__FAM_AFF__SSN(): - """Test `validate__FAM_AFF__SSN` gives a valid result.""" - instance = { - 'FAMILY_AFFILIATION': 2, - 'CITIZENSHIP_STATUS': 1, - 'SSN': '0'*9, - } - result = validators.validate__FAM_AFF__SSN()(instance, RowSchema()) - assert result == (False, - 'T1: If FAMILY_AFFILIATION ==2 and CITIZENSHIP_STATUS==1 or 2, ' + - 'then SSN != 000000000 -- 999999999.', - ['FAMILY_AFFILIATION', 'CITIZENSHIP_STATUS', 'SSN']) - instance['SSN'] = '1'*8 + '0' - result = validators.validate__FAM_AFF__SSN()(instance, RowSchema()) - assert result == (True, None, ['FAMILY_AFFILIATION', 'CITIZENSHIP_STATUS', 'SSN']) - -@pytest.mark.parametrize( - "value, valid", - [ - ("20201", True), - ("20202", True), - ("20203", True), - ("20204", True), - ("20200", False), - ("20205", False), - ("2020 ", False), - ("2020A", False) - ]) -def test_quarterIsValid(value, valid): - """Test `quarterIsValid`.""" - val = validators.quarterIsValid() - result = val(value, RowSchema(), "friendly_name", "item_no", None) - - errorText = None if valid else f"T1 Item item_no (friendly_name): {value[-1:]} is not a valid quarter." - errorText = None if valid else f"T1 Item item_no (friendly_name): {value[-1:]} is not a valid quarter." - assert result == (valid, errorText) - -def test_validateSSN(): - """Test `validateSSN`.""" - value = "123456789" - val = validators.validateSSN() - result = val(value) - assert result == (True, None) - - value = "111111111" - options = [str(i) * 9 for i in range(0, 10)] - result = val(value, RowSchema(), "friendly_name", "item_no", None) - assert result == (False, f"T1 Item item_no (friendly_name): {value} is in {options}.") - -def test_validateRace(): - """Test `validateRace`.""" - value = 1 - val = validators.validateRace() - result = val(value) - assert result == (True, None) - - value = 3 - result = val(value, RowSchema(), "friendly_name", "item_no", None) - assert result == ( - False, - f"T1 Item item_no (friendly_name): {value} is not greater than or equal to 0 or smaller than or equal to 2." - ) - -def test_validateRptMonthYear(): - """Test `validateRptMonthYear`.""" - value = "T1202012" - val = validators.validateRptMonthYear() - result = val(value) - assert result == (True, None) - - value = "T1 " - result = val(value, RowSchema(), "friendly_name", "item_no", None) - assert result == ( - False, - f"T1 Item item_no (friendly_name): The value: {value[2:8]}, does not " - "follow the YYYYMM format for Reporting Year and Month." - ) - - value = "T1189912" - result = val(value, RowSchema(), "friendly_name", "item_no", None) - assert result == ( - False, - f"T1 Item item_no (friendly_name): The value: {value[2:8]}, does not follow " - "the YYYYMM format for Reporting Year and Month." - ) - - value = "T1202013" - result = val(value, RowSchema(), "friendly_name", "item_no", None) - assert result == ( - False, - f"T1 Item item_no (friendly_name): The value: {value[2:8]}, does " - "not follow the YYYYMM format for Reporting Year and Month." - ) - -def test_matches_returns_valid(): - """Test `matches` gives a valid result.""" - value = 'TEST' - - validator = validators.matches('TEST') - is_valid, error = validator(value) - - assert is_valid is True - assert error is None - - -def test_matches_returns_invalid(): - """Test `matches` gives an invalid result.""" - value = 'TEST' - - validator = validators.matches('test') - is_valid, error = validator(value, RowSchema(), "friendly_name", "item_no", None) - - assert is_valid is False - assert error == 'T1 Item item_no (friendly_name): TEST does not match test.' - assert error == 'T1 Item item_no (friendly_name): TEST does not match test.' - - -def test_oneOf_returns_valid(): - """Test `oneOf` gives a valid result.""" - value = 17 - options = [17, 24, 36] - - validator = validators.oneOf(options) - is_valid, error = validator(value) - - assert is_valid is True - assert error is None - - value = 50 - options = ["17-55"] - - validator = validators.oneOf(options) - is_valid, error = validator(value, RowSchema(), "friendly_name", "item_no", None) - - assert is_valid is True - assert error is None - - -def test_oneOf_returns_invalid(): - """Test `oneOf` gives an invalid result.""" - value = 64 - options = [17, 24, 36] - - validator = validators.oneOf(options) - is_valid, error = validator(value, RowSchema(), "friendly_name", "item_no", None) - - assert is_valid is False - assert error == 'T1 Item item_no (friendly_name): 64 is not in [17, 24, 36].' - assert error == 'T1 Item item_no (friendly_name): 64 is not in [17, 24, 36].' - - value = 65 - options = ["17-55"] - - validator = validators.oneOf(options) - is_valid, error = validator(value, RowSchema(), "friendly_name", "item_no", None) - - assert is_valid is False - assert error == 'T1 Item item_no (friendly_name): 65 is not in [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, ' \ - '29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55].' - - -def test_between_returns_valid(): - """Test `between` gives a valid result for integers.""" - value = 47 - - validator = validators.between(3, 400) - is_valid, error = validator(value) - - assert is_valid is True - assert error is None - - -def test_between_returns_valid_for_string_value(): - """Test `between` gives a valid result for strings.""" - value = '047' - - validator = validators.between(3, 400) - is_valid, error = validator(value) - - assert is_valid is True - assert error is None - - -def test_between_returns_invalid(): - """Test `between` gives an invalid result for integers.""" - value = 47 - - validator = validators.between(48, 400) - is_valid, error = validator(value, RowSchema(), "friendly_name", "item_no", None) - - assert is_valid is False - assert error == 'T1 Item item_no (friendly_name): 47 is not between 48 and 400.' - - -@pytest.mark.parametrize('value, expected_is_valid, expected_error', [ - (7, True, None), - (77731, True, None), - ('7', True, None), - ('234897', True, None), - ('a', False, 'T1 Item item_no (friendly_name): a is not a number.'), - ( - 'houston, we have a problem', False, - 'T1 Item item_no (friendly_name): houston, we have a problem is not a number.' - ), - (' test', False, 'T1 Item item_no (friendly_name): test is not a number.'), - (' 7 ', True, None), - (' 8388323', True, None), - ('87932875 ', True, None), - (' 00 ', True, None), - (' 88 ', True, None), - (' 088 ', True, None), - (' 8 8 ', False, 'T1 Item item_no (friendly_name): 8 8 is not a number.'), -]) -def test_isNumber(value, expected_is_valid, expected_error): - """Test `isNumber` validator.""" - validator = validators.isNumber() - is_valid, error = validator(value, RowSchema(), "friendly_name", "item_no", None) - assert is_valid == expected_is_valid - assert error == expected_error - - -def test_date_month_is_valid_returns_valid(): - """Test `dateMonthIsValid` gives a valid result.""" - value = '20191027' - validator = validators.dateMonthIsValid() - is_valid, error = validator(value) - assert is_valid is True - assert error is None - - -def test_date_month_is_valid_returns_invalid(): - """Test `dateMonthIsValid` gives an invalid result.""" - value = '20191327' - validator = validators.dateMonthIsValid() - is_valid, error = validator(value, RowSchema(), "friendly_name", "item_no", None) - assert is_valid is False - assert error == 'T1 Item item_no (friendly_name): 13 is not a valid month.' - assert error == 'T1 Item item_no (friendly_name): 13 is not a valid month.' - - -def test_date_day_is_valid_returns_valid(): - """Test `dateDayIsValid` gives a valid result.""" - value = '20191027' - validator = validators.dateDayIsValid() - is_valid, error = validator(value) - assert is_valid is True - assert error is None - - -def test_date_day_is_valid_returns_invalid(): - """Test `dateDayIsValid` gives an invalid result.""" - value = '20191132' - validator = validators.dateDayIsValid() - is_valid, error = validator(value, RowSchema(), "friendly_name", "item_no", None) - assert is_valid is False - assert error == 'T1 Item item_no (friendly_name): 32 is not a valid day.' - assert error == 'T1 Item item_no (friendly_name): 32 is not a valid day.' - - -def test_olderThan(): - """Test `olderThan`.""" - min_age = 18 - value = 19830223 - validator = validators.olderThan(min_age) - assert validator(value) == (True, None) - - value = 20240101 - result = validator(value, RowSchema(), "friendly_name", "item_no", None) - assert result == ( - False, - f"T1 Item item_no (friendly_name): {str(value)[:4]} must be less than or equal to " - f"{date.today().year - min_age} to meet the minimum age requirement." - ) - - -def test_dateYearIsLargerThan(): - """Test `dateYearIsLargerThan`.""" - year = 1900 - value = 19830223 - validator = validators.dateYearIsLargerThan(year) - assert validator(value) == (True, None) - - value = 18990101 - assert validator(value, RowSchema(), "friendly_name", "item_no", None) == ( - False, - f"T1 Item item_no (friendly_name): Year {str(value)[:4]} must be larger than {year}." - ) - - -def test_between_returns_invalid_for_string_value(): - """Test `between` gives an invalid result for strings.""" - value = '047' - - validator = validators.between(100, 400) - is_valid, error = validator(value, RowSchema(), "friendly_name", "item_no", None) - - assert is_valid is False - assert error == 'T1 Item item_no (friendly_name): 047 is not between 100 and 400.' - assert error == 'T1 Item item_no (friendly_name): 047 is not between 100 and 400.' - - -def test_recordHasLength_returns_valid(): - """Test `recordHasLength` gives a valid result.""" - value = 'abcd123' - - validator = validators.recordHasLength(7) - is_valid, error = validator(value) - - assert is_valid is True - assert error is None - - -def test_recordHasLength_returns_invalid(): - """Test `recordHasLength` gives an invalid result.""" - value = 'abcd123' - - validator = validators.recordHasLength(22) - is_valid, error = validator(value, RowSchema(), "friendly_name", "item_no", None) - - assert is_valid is False - assert error == 'T1: record length is 7 characters but must be 22.' - assert error == 'T1: record length is 7 characters but must be 22.' - -def test_hasLengthGreaterThan_returns_valid(): - """Test `hasLengthGreaterThan` gives a valid result.""" - value = 'abcd123' - - validator = validators.hasLengthGreaterThan(6) - is_valid, error = validator(value, None, "friendly_name", "item_no", None) - - assert is_valid is True - assert error is None - -def test_hasLengthGreaterThan_returns_invalid(): - """Test `hasLengthGreaterThan` gives an invalid result.""" - value = 'abcd123' - - validator = validators.hasLengthGreaterThan(8) - is_valid, error = validator(value) - - assert is_valid is False - assert error == 'Value length 7 is not greater than 8.' - - -def test_recordHasLengthBetween_returns_valid(): - """Test `hasLengthBetween` gives a valid result.""" - value = 'abcd123' - lower = 0 - upper = 15 - - validator = validators.recordHasLengthBetween(lower, upper) - is_valid, error = validator(value, RowSchema(), "friendly_name", "item_no", None) - - assert is_valid is True - assert error is None - - -def test_recordHasLengthBetween_returns_invalid(): - """Test `hasLengthBetween` gives an invalid result.""" - value = 'abcd123' - lower = 0 - upper = 1 - - validator = validators.recordHasLengthBetween(lower, upper) - is_valid, error = validator(value, RowSchema(), "friendly_name", "item_no", None) - - assert is_valid is False - assert error == f"T1: record length of {len(value)} characters is not in the range [{lower}, {upper}]." - assert error == f"T1: record length of {len(value)} characters is not in the range [{lower}, {upper}]." - - -def test_intHasLength_returns_valid(): - """Test `intHasLength` gives a valid result.""" - value = '123' - - validator = validators.intHasLength(3) - is_valid, error = validator(value) - - assert is_valid is True - assert error is None - - -def test_intHasLength_returns_invalid(): - """Test `intHasLength` gives an invalid result.""" - value = '1a3' - - validator = validators.intHasLength(22) - is_valid, error = validator(value, RowSchema(), "friendly_name", "item_no", None) - - assert is_valid is False - assert error == 'T1 Item item_no (friendly_name): 1a3 does not have exactly 22 digits.' - assert error == 'T1 Item item_no (friendly_name): 1a3 does not have exactly 22 digits.' - - -def test_contains_returns_valid(): - """Test `contains` gives a valid result.""" - value = '12345abcde' - - validator = validators.contains('2345') - is_valid, error = validator(value) - - assert is_valid is True - assert error is None - - -def test_contains_returns_invalid(): - """Test `contains` gives an invalid result.""" - value = '12345abcde' - - validator = validators.contains('6789') - is_valid, error = validator(value, RowSchema(), "friendly_name", "item_no", None) - - assert is_valid is False - assert error == 'T1 Item item_no (friendly_name): 12345abcde does not contain 6789.' - assert error == 'T1 Item item_no (friendly_name): 12345abcde does not contain 6789.' - - -def test_startsWith_returns_valid(): - """Test `startsWith` gives a valid result.""" - value = '12345abcde' - - validator = validators.startsWith('1234') - is_valid, error = validator(value) - - assert is_valid is True - assert error is None - - -def test_startsWith_returns_invalid(): - """Test `startsWith` gives an invalid result.""" - value = '12345abcde' - - validator = validators.startsWith('abc') - is_valid, error = validator(value, RowSchema(), "friendly_name", "item_no", None) - - assert is_valid is False - assert error == 'T1 Item item_no (friendly_name): 12345abcde does not start with abc.' - assert error == 'T1 Item item_no (friendly_name): 12345abcde does not start with abc.' - - -def test_notEmpty_returns_valid_full_string(): - """Test `notEmpty` gives a valid result for a full string.""" - value = '1 ' - - validator = validators.notEmpty() - is_valid, error = validator(value) - - assert is_valid is True - assert error is None - - -def test_notEmpty_returns_invalid_full_string(): - """Test `notEmpty` gives an invalid result for a full string.""" - value = ' ' - - validator = validators.notEmpty() - is_valid, error = validator(value, RowSchema(), "friendly_name", "item_no", None) - - assert is_valid is False - assert error == 'T1 Item item_no (friendly_name): contains blanks between positions 0 and 9.' - assert error == 'T1 Item item_no (friendly_name): contains blanks between positions 0 and 9.' - - -def test_notEmpty_returns_valid_substring(): - """Test `notEmpty` gives a valid result for a partial string.""" - value = '11122333' - - validator = validators.notEmpty(start=3, end=5) - is_valid, error = validator(value) - - assert is_valid is True - assert error is None - - -def test_notEmpty_returns_invalid_substring(): - """Test `notEmpty` gives an invalid result for a partial string.""" - value = '111 333' - - validator = validators.notEmpty(start=3, end=5) - is_valid, error = validator(value, RowSchema(), "friendly_name", "item_no", None) - - assert is_valid is False - assert error == "T1 Item item_no (friendly_name): 111 333 contains blanks between positions 3 and 5." - assert error == "T1 Item item_no (friendly_name): 111 333 contains blanks between positions 3 and 5." - - -def test_notEmpty_returns_nonexistent_substring(): - """Test `notEmpty` gives an invalid result for a nonexistent substring.""" - value = '111 333' - - validator = validators.notEmpty(start=10, end=12) - is_valid, error = validator(value, RowSchema(), "friendly_name", "item_no", None) - - assert is_valid is False - assert error == "T1 Item item_no (friendly_name): 111 333 contains blanks between positions 10 and 12." - assert error == "T1 Item item_no (friendly_name): 111 333 contains blanks between positions 10 and 12." - - -@pytest.mark.parametrize("test_input", [1, 2, 3, 4]) -def test_quarterIsValid_returns_true_if_valid(test_input): - """Test `quarterIsValid` gives a valid result for values 1-4.""" - validator = validators.quarterIsValid() - is_valid, error = validator(test_input, RowSchema(), "friendly_name", "item_no", None) - - assert is_valid is True - assert error is None - - -@pytest.mark.parametrize("test_input", [" ", 0, 5, "A"]) -def test_quarterIsValid_returns_false_if_invalid(test_input): - """Test `quarterIsValid` gives an invalid result for values not 1-4.""" - validator = validators.quarterIsValid() - is_valid, error = validator(test_input, RowSchema(), "friendly_name", "item_no", 'prefix') - - assert is_valid is False - assert error == f"T1 Item item_no (friendly_name): {test_input} is not a valid quarter." - assert error == f"T1 Item item_no (friendly_name): {test_input} is not a valid quarter." - -@pytest.mark.parametrize("value", ["T72020 ", "T720194", "T720200", "T720207", "T72020$"]) -def test_calendarQuarterIsValid_returns_invalid(value): - """Test `calendarQuarterIsValid` returns false on invalid input.""" - val = validators.calendarQuarterIsValid(2, 7) - is_valid, error_msg = val(value, RowSchema(), "friendly_name", "item_no", None) - - assert is_valid is False - assert error_msg == ( - f"T1: {value[2:7]} is invalid. Calendar Quarter must be a numeric " - "representing the Calendar Year and Quarter formatted as YYYYQ" - ) - - -@pytest.mark.parametrize("value", ["T720201", "T720202", "T720203", "T720204"]) -def test_calendarQuarterIsValid_returns_valid(value): - """Test `calendarQuarterIsValid` returns false on invalid input.""" - val = validators.calendarQuarterIsValid(2, 7) - is_valid, error_msg = val(value, RowSchema(), "friendly_name", "item_no", None) - - assert is_valid is True - assert error_msg is None - -@pytest.mark.usefixtures('db') -class TestCat3ValidatorsBase: - """A base test class for tests that evaluate category three validators.""" - - @pytest.fixture - def record(self): - """Record instance that returns a valid Section 1 record. - - This fixture must be overridden in all child classes. - """ - raise NotImplementedError() - - -class TestT1Cat3Validators(TestCat3ValidatorsBase): - """Test category three validators for TANF T1 records.""" - - @pytest.fixture - def record(self): - """Override default record with TANF T1 record.""" - return TanfT1Factory.create() - - def test_validate_food_stamps(self, record): - """Test cat3 validator for food stamps.""" - val = validators.if_then_validator( - condition_field_name='RECEIVES_FOOD_STAMPS', condition_function=validators.matches(1), - result_field_name='AMT_FOOD_STAMP_ASSISTANCE', result_function=validators.isLargerThan(0), - ) - record.RECEIVES_FOOD_STAMPS = 1 - record.AMT_FOOD_STAMP_ASSISTANCE = 1 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='RECEIVES_FOOD_STAMPS', friendly_name='receives food stamps'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='AMT_FOOD_STAMP_ASSISTANCE', friendly_name='amt food stamps'), - ] - )) - assert result == (True, None, ['RECEIVES_FOOD_STAMPS', 'AMT_FOOD_STAMP_ASSISTANCE']) - - record.AMT_FOOD_STAMP_ASSISTANCE = 0 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='RECEIVES_FOOD_STAMPS', friendly_name='receives food stamps'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='AMT_FOOD_STAMP_ASSISTANCE', friendly_name='amt food stamps'), - ] - )) - assert result[0] is False - - def test_validate_subsidized_child_care(self, record): - """Test cat3 validator for subsidized child care.""" - val = validators.if_then_validator( - condition_field_name='RECEIVES_SUB_CC', condition_function=validators.notMatches(3), - result_field_name='AMT_SUB_CC', result_function=validators.isLargerThan(0), - ) - record.RECEIVES_SUB_CC = 4 - record.AMT_SUB_CC = 1 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='RECEIVES_SUB_CC', friendly_name='receives sub cc'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='AMT_SUB_CC', friendly_name='amt sub cc'), - ] - )) - assert result == (True, None, ['RECEIVES_SUB_CC', 'AMT_SUB_CC']) - - record.RECEIVES_SUB_CC = 4 - record.AMT_SUB_CC = 0 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='RECEIVES_SUB_CC', friendly_name='receives sub cc'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='AMT_SUB_CC', friendly_name='amt sub cc'), - ] - )) - assert result[0] is False - - def test_validate_cash_amount_and_nbr_months(self, record): - """Test cat3 validator for cash amount and number of months.""" - val = validators.if_then_validator( - condition_field_name='CASH_AMOUNT', condition_function=validators.isLargerThan(0), - result_field_name='NBR_MONTHS', result_function=validators.isLargerThan(0), - ) - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='CASH_AMOUNT', friendly_name='cash amt'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='NBR_MONTHS', friendly_name='nbr months'), - ] - )) - assert result == (True, None, ['CASH_AMOUNT', 'NBR_MONTHS']) - - record.CASH_AMOUNT = 1 - record.NBR_MONTHS = -1 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='CASH_AMOUNT', friendly_name='cash amt'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='NBR_MONTHS', friendly_name='nbr months'), - ] - )) - assert result[0] is False - - def test_validate_child_care(self, record): - """Test cat3 validator for child care.""" - val = validators.if_then_validator( - condition_field_name='CC_AMOUNT', condition_function=validators.isLargerThan(0), - result_field_name='CHILDREN_COVERED', result_function=validators.isLargerThan(0), - ) - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='CC_AMOUNT', friendly_name='cc amt'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='CHILDREN_COVERED', friendly_name='chldrn coverd'), - ] - )) - assert result == (True, None, ['CC_AMOUNT', 'CHILDREN_COVERED']) - - record.CC_AMOUNT = 1 - record.CHILDREN_COVERED = -1 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='CC_AMOUNT', friendly_name='cc amt'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='CHILDREN_COVERED', friendly_name='chldrn coverd'), - ] - )) - assert result[0] is False - - val = validators.if_then_validator( - condition_field_name='CC_AMOUNT', condition_function=validators.isLargerThan(0), - result_field_name='CC_NBR_MONTHS', result_function=validators.isLargerThan(0), - ) - record.CC_AMOUNT = 10 - record.CC_NBR_MONTHS = -1 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='CC_AMOUNT', friendly_name='cc amt'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='CC_NBR_MONTHS', friendly_name='cc nbr mnths'), - ] - )) - assert result[0] is False - - def test_validate_transportation(self, record): - """Test cat3 validator for transportation.""" - val = validators.if_then_validator( - condition_field_name='TRANSP_AMOUNT', condition_function=validators.isLargerThan(0), - result_field_name='TRANSP_NBR_MONTHS', result_function=validators.isLargerThan(0), - ) - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='TRANSP_AMOUNT', friendly_name='transp amt'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='TRANSP_NBR_MONTHS', friendly_name='transp nbr months'), - ] - )) - assert result == (True, None, ['TRANSP_AMOUNT', 'TRANSP_NBR_MONTHS']) - - record.TRANSP_AMOUNT = 1 - record.TRANSP_NBR_MONTHS = -1 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='TRANSP_AMOUNT', friendly_name='transp amt'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='TRANSP_NBR_MONTHS', friendly_name='transp nbr months'), - ] - )) - assert result[0] is False - - def test_validate_transitional_services(self, record): - """Test cat3 validator for transitional services.""" - val = validators.if_then_validator( - condition_field_name='TRANSITION_SERVICES_AMOUNT', condition_function=validators.isLargerThan(0), - result_field_name='TRANSITION_NBR_MONTHS', result_function=validators.isLargerThan(0), - ) - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='TRANSITION_SERVICES_AMOUNT', friendly_name='transition serv amt'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='TRANSITION_NBR_MONTHS', friendly_name='transition nbr months'), - ] - )) - assert result == (True, None, ['TRANSITION_SERVICES_AMOUNT', 'TRANSITION_NBR_MONTHS']) - - record.TRANSITION_SERVICES_AMOUNT = 1 - record.TRANSITION_NBR_MONTHS = -1 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='TRANSITION_SERVICES_AMOUNT', friendly_name='transition serv amt'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='TRANSITION_NBR_MONTHS', friendly_name='transition nbr months'), - ] - )) - assert result[0] is False - - def test_validate_other(self, record): - """Test cat3 validator for other.""" - val = validators.if_then_validator( - condition_field_name='OTHER_AMOUNT', condition_function=validators.isLargerThan(0), - result_field_name='OTHER_NBR_MONTHS', result_function=validators.isLargerThan(0), - ) - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='OTHER_AMOUNT', friendly_name='other amt'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='OTHER_NBR_MONTHS', friendly_name='other nbr months'), - ] - )) - assert result == (True, None, ['OTHER_AMOUNT', 'OTHER_NBR_MONTHS']) - - record.OTHER_AMOUNT = 1 - record.OTHER_NBR_MONTHS = -1 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='OTHER_AMOUNT', friendly_name='other amt'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='OTHER_NBR_MONTHS', friendly_name='other nbr months'), - ] - )) - assert result[0] is False - - def test_validate_reasons_for_amount_of_assistance_reductions(self, record): - """Test cat3 validator for assistance reductions.""" - val = validators.if_then_validator( - condition_field_name='SANC_REDUCTION_AMT', condition_function=validators.isLargerThan(0), - result_field_name='WORK_REQ_SANCTION', result_function=validators.oneOf((1, 2)), - ) - record.SANC_REDUCTION_AMT = 1 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='SANC_REDUCTION_AMT', friendly_name='sanc reduction amt'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='WORK_REQ_SANCTION', friendly_name='work req sanction'), - ] - )) - assert result == (True, None, ['SANC_REDUCTION_AMT', 'WORK_REQ_SANCTION']) - - record.SANC_REDUCTION_AMT = 10 - record.WORK_REQ_SANCTION = -1 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='SANC_REDUCTION_AMT', friendly_name='sanc reduction amt'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='WORK_REQ_SANCTION', friendly_name='work req sanction'), - ] - )) - assert result[0] is False - - def test_validate_sum(self, record): - """Test cat3 validator for sum of cash fields.""" - val = validators.sumIsLarger(("AMT_FOOD_STAMP_ASSISTANCE", "AMT_SUB_CC", "CC_AMOUNT", "TRANSP_AMOUNT", - "TRANSITION_SERVICES_AMOUNT", "OTHER_AMOUNT"), 0) - result = val(record, RowSchema()) - assert result == (True, None, ['AMT_FOOD_STAMP_ASSISTANCE', 'AMT_SUB_CC', 'CC_AMOUNT', 'TRANSP_AMOUNT', - 'TRANSITION_SERVICES_AMOUNT', 'OTHER_AMOUNT']) - - record.AMT_FOOD_STAMP_ASSISTANCE = 0 - record.AMT_SUB_CC = 0 - record.CC_AMOUNT = 0 - record.TRANSP_AMOUNT = 0 - record.TRANSITION_SERVICES_AMOUNT = 0 - record.OTHER_AMOUNT = 0 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='AMT_FOOD_STAMP_ASSISTANCE', friendly_name='amt food stamp assis'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='AMT_SUB_CC', friendly_name='amt sub cc'), - Field(item=3, startIndex=4, endIndex=5, type='string', - name='CC_AMOUNT', friendly_name='cc amt'), - Field(item=4, startIndex=5, endIndex=6, type='string', - name='TRANSP_AMOUNT', friendly_name='transp amt'), - Field(item=5, startIndex=6, endIndex=7, type='string', - name='TRANSITION_SERVICES_AMOUNT', friendly_name='transition serv amt'), - Field(item=6, startIndex=7, endIndex=8, type='string', - name='OTHER_AMOUNT', friendly_name='other amt'), - ] - )) - assert result[0] is False - - -class TestT2Cat3Validators(TestCat3ValidatorsBase): - """Test category three validators for TANF T2 records.""" - - @pytest.fixture - def record(self): - """Override default record with TANF T2 record.""" - return TanfT2Factory.create() - - def test_validate_ssn(self, record): - """Test cat3 validator for social security number.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.oneOf((1, 2)), - result_field_name='SSN', result_function=validators.notOneOf(("000000000", "111111111", "222222222", - "333333333", "444444444", "555555555", - "666666666", "777777777", "888888888", - "999999999")), - ) - record.SSN = "999989999" - record.FAMILY_AFFILIATION = 1 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='SSN', friendly_name='ssn'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'SSN']) - - record.FAMILY_AFFILIATION = 1 - record.SSN = "999999999" - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='SSN', friendly_name='ssn'), - ] - )) - assert result[0] is False - - def test_validate_race_ethnicity(self, record): - """Test cat3 validator for race/ethnicity.""" - races = ["RACE_HISPANIC", "RACE_AMER_INDIAN", "RACE_ASIAN", "RACE_BLACK", "RACE_HAWAIIAN", "RACE_WHITE"] - record.FAMILY_AFFILIATION = 1 - for race in races: - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.oneOf((1, 2, 3)), - result_field_name=race, result_function=validators.isInLimits(1, 2), - ) - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name=race, friendly_name='race'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', race]) - - record.FAMILY_AFFILIATION = 0 - for race in races: - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.oneOf((1, 2, 3)), - result_field_name=race, result_function=validators.isInLimits(1, 2) - ) - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name=race, friendly_name='race'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', race]) - - def test_validate_marital_status(self, record): - """Test cat3 validator for marital status.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.isInLimits(1, 3), - result_field_name='MARITAL_STATUS', result_function=validators.isInLimits(1, 5), - ) - record.FAMILY_AFFILIATION = 1 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='MARITAL_STATUS', friendly_name='married?'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'MARITAL_STATUS']) - - record.FAMILY_AFFILIATION = 3 - record.MARITAL_STATUS = 0 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='MARITAL_STATUS', friendly_name='married?'), - ] - )) - assert result[0] is False - - def test_validate_parent_with_minor(self, record): - """Test cat3 validator for parent with a minor child.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.isInLimits(1, 3), - result_field_name='PARENT_MINOR_CHILD', result_function=validators.isInLimits(1, 3), - ) - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='PARENT_MINOR_CHILD', friendly_name='parent minor child'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'PARENT_MINOR_CHILD']) - - record.PARENT_MINOR_CHILD = 0 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='PARENT_MINOR_CHILD', friendly_name='parent minor child'), - ] - )) - assert result[0] is False - - def test_validate_education_level(self, record): - """Test cat3 validator for education level.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.oneOf((1, 2, 3)), - result_field_name='EDUCATION_LEVEL', result_function=validators.oneOf(("01", "02", "03", "04", - "05", "06", "07", "08", - "09", "10", "11", "12", - "13", "14", "15", "16", - "98", "99")), - ) - record.FAMILY_AFFILIATION = 3 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='EDUCATION_LEVEL', friendly_name='education level'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'EDUCATION_LEVEL']) - - record.FAMILY_AFFILIATION = 1 - record.EDUCATION_LEVEL = "00" - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='EDUCATION_LEVEL', friendly_name='education level'), - ] - )) - assert result[0] is False - - def test_validate_citizenship(self, record): - """Test cat3 validator for citizenship.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.matches(1), - result_field_name='CITIZENSHIP_STATUS', result_function=validators.oneOf((1, 2)), - ) - record.FAMILY_AFFILIATION = 0 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='CITIZENSHIP_STATUS', friendly_name='citizenship status'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'CITIZENSHIP_STATUS']) - - record.FAMILY_AFFILIATION = 1 - record.CITIZENSHIP_STATUS = 0 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='CITIZENSHIP_STATUS', friendly_name='citizenship status'), - ] - )) - assert result[0] is False - - def test_validate_cooperation_with_child_support(self, record): - """Test cat3 validator for cooperation with child support.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.isInLimits(1, 3), - result_field_name='COOPERATION_CHILD_SUPPORT', result_function=validators.oneOf((1, 2, 9)), - ) - record.FAMILY_AFFILIATION = 0 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='COOPERATION_CHILD_SUPPORT', friendly_name='cooperation child support'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'COOPERATION_CHILD_SUPPORT']) - - record.FAMILY_AFFILIATION = 1 - record.COOPERATION_CHILD_SUPPORT = 0 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='COOPERATION_CHILD_SUPPORT', friendly_name='cooperation child support'), - ] - )) - assert result[0] is False - - def test_validate_employment_status(self, record): - """Test cat3 validator for employment status.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.isInLimits(1, 3), - result_field_name='EMPLOYMENT_STATUS', result_function=validators.isInLimits(1, 3), - ) - record.FAMILY_AFFILIATION = 0 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='EMPLOYMENT_STATUS', friendly_name='employment status'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'EMPLOYMENT_STATUS']) - - record.FAMILY_AFFILIATION = 3 - record.EMPLOYMENT_STATUS = 4 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='EMPLOYMENT_STATUS', friendly_name='employment status'), - ] - )) - assert result[0] is False - - def test_validate_work_eligible_indicator(self, record): - """Test cat3 validator for work eligibility.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.oneOf((1, 2)), - result_field_name='WORK_ELIGIBLE_INDICATOR', result_function=validators.or_validators( - validators.isInStringRange(1, 9), - validators.matches('12') - ), - ) - record.FAMILY_AFFILIATION = 0 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='WORK_ELIGIBLE_INDICATOR', friendly_name='work eligible indicator'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'WORK_ELIGIBLE_INDICATOR']) - - record.FAMILY_AFFILIATION = 1 - record.WORK_ELIGIBLE_INDICATOR = "00" - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='WORK_ELIGIBLE_INDICATOR', friendly_name='work eligible indicator'), - ] - )) - assert result[0] is False - - def test_validate_work_participation(self, record): - """Test cat3 validator for work participation.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.oneOf((1, 2)), - result_field_name='WORK_PART_STATUS', result_function=validators.oneOf(['01', '02', '05', '07', - '09', '15', '17', '18', - '19', '99']), - ) - record.FAMILY_AFFILIATION = 0 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='WORK_PART_STATUS', friendly_name='work part status'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'WORK_PART_STATUS']) - - record.FAMILY_AFFILIATION = 2 - record.WORK_PART_STATUS = "04" - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='WORK_PART_STATUS', friendly_name='work part status'), - ] - )) - assert result[0] is False - - val = validators.if_then_validator( - condition_field_name='WORK_ELIGIBLE_INDICATOR', - condition_function=validators.isInStringRange(1, 5), - result_field_name='WORK_PART_STATUS', - result_function=validators.notMatches('99'), - ) - record.WORK_PART_STATUS = "99" - record.WORK_ELIGIBLE_INDICATOR = "01" - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='WORK_ELIGIBLE_INDICATOR', friendly_name='work eligible indicator'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='WORK_PART_STATUS', friendly_name='work part status'), - ] - )) - assert result[0] is False - - -class TestT3Cat3Validators(TestCat3ValidatorsBase): - """Test category three validators for TANF T3 records.""" - - @pytest.fixture - def record(self): - """Override default record with TANF T3 record.""" - return TanfT3Factory.create() - - def test_validate_ssn(self, record): - """Test cat3 validator for relationship to head of household.""" - record.FAMILY_AFFILIATION = 1 - record.SSN = "199199991" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.matches(1), - result_field_name='SSN', result_function=validators.notOneOf(("999999999", "000000000")), - ) - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='SSN', friendly_name='social'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'SSN']) - - record.FAMILY_AFFILIATION = 1 - record.SSN = "999999999" - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='SSN', friendly_name='social'), - ] - )) - assert result[0] is False - - def test_validate_t3_race_ethnicity(self, record): - """Test cat3 validator for race/ethnicity.""" - races = ["RACE_HISPANIC", "RACE_AMER_INDIAN", "RACE_ASIAN", "RACE_BLACK", "RACE_HAWAIIAN", "RACE_WHITE"] - record.FAMILY_AFFILIATION = 1 - for race in races: - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.oneOf((1, 2)), - result_field_name=race, result_function=validators.oneOf((1, 2)), - ) - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name=race, friendly_name='race'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', race]) - - record.FAMILY_AFFILIATION = 0 - for race in races: - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.oneOf((1, 2)), - result_field_name=race, result_function=validators.oneOf((1, 2)), - ) - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name=race, friendly_name='race'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', race]) - - def test_validate_relationship_hoh(self, record): - """Test cat3 validator for relationship to head of household.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.oneOf((1, 2)), - result_field_name='RELATIONSHIP_HOH', result_function=validators.isInStringRange(4, 9), - ) - record.FAMILY_AFFILIATION = 0 - record.RELATIONSHIP_HOH = "04" - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='RELATIONSHIP_HOH', friendly_name='relationship hoh'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'RELATIONSHIP_HOH']) - - record.FAMILY_AFFILIATION = 1 - record.RELATIONSHIP_HOH = "01" - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='RELATIONSHIP_HOH', friendly_name='relationship hoh'), - ] - )) - assert result[0] is False - - def test_validate_t3_education_level(self, record): - """Test cat3 validator for education level.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.matches(1), - result_field_name='EDUCATION_LEVEL', result_function=validators.notMatches("99"), - ) - record.FAMILY_AFFILIATION = 1 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='EDUCATION_LEVEL', friendly_name='ed lev'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'EDUCATION_LEVEL']) - - record.FAMILY_AFFILIATION = 1 - record.EDUCATION_LEVEL = "99" - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='EDUCATION_LEVEL', friendly_name='ed lev'), - ] - )) - assert result[0] is False - - def test_validate_t3_citizenship(self, record): - """Test cat3 validator for citizenship.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.matches(1), - result_field_name='CITIZENSHIP_STATUS', result_function=validators.oneOf((1, 2)), - ) - record.FAMILY_AFFILIATION = 1 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='CITIZENSHIP_STATUS', friendly_name='cit stat'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'CITIZENSHIP_STATUS']) - - record.FAMILY_AFFILIATION = 1 - record.CITIZENSHIP_STATUS = 3 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='CITIZENSHIP_STATUS', friendly_name='cit stat'), - ] - )) - assert result[0] is False - - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.matches(2), - result_field_name='CITIZENSHIP_STATUS', result_function=validators.oneOf((1, 2, 9)), - ) - record.FAMILY_AFFILIATION = 2 - record.CITIZENSHIP_STATUS = 3 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='CITIZENSHIP_STATUS', friendly_name='cit stat'), - ] - )) - assert result[0] is False - - -class TestT5Cat3Validators(TestCat3ValidatorsBase): - """Test category three validators for TANF T5 records.""" - - @pytest.fixture - def record(self): - """Override default record with TANF T5 record.""" - return TanfT5Factory.create() - - def test_validate_ssn(self, record): - """Test cat3 validator for SSN.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.notMatches(1), - result_field_name='SSN', result_function=validators.isNumber() - ) - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='SSN', friendly_name='social'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'SSN']) - - record.SSN = "abc" - record.FAMILY_AFFILIATION = 2 - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='SSN', friendly_name='social'), - ] - )) - assert result[0] is False - - def test_validate_ssn_citizenship(self, record): - """Test cat3 validator for SSN/citizenship.""" - val = validators.validate__FAM_AFF__SSN() - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='SSN', friendly_name='social'), - Field(item=3, startIndex=4, endIndex=5, type='string', - name='CITIZENSHIP_STATUS', friendly_name='cit stat'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'CITIZENSHIP_STATUS', 'SSN']) - - record.FAMILY_AFFILIATION = 2 - record.SSN = "000000000" - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='SSN', friendly_name='social'), - Field(item=3, startIndex=4, endIndex=5, type='string', - name='CITIZENSHIP_STATUS', friendly_name='cit stat'), - ] - )) - assert result[0] is False - - def test_validate_race_ethnicity(self, record): - """Test cat3 validator for race/ethnicity.""" - races = ["RACE_HISPANIC", "RACE_AMER_INDIAN", "RACE_ASIAN", "RACE_BLACK", "RACE_HAWAIIAN", "RACE_WHITE"] - record.FAMILY_AFFILIATION = 1 - for race in races: - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.isInLimits(1, 3), - result_field_name=race, result_function=validators.isInLimits(1, 2) - ) - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name=race, friendly_name='social'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', race]) - - record.FAMILY_AFFILIATION = 1 - record.RACE_HISPANIC = 0 - record.RACE_AMER_INDIAN = 0 - record.RACE_ASIAN = 0 - record.RACE_BLACK = 0 - record.RACE_HAWAIIAN = 0 - record.RACE_WHITE = 0 - for race in races: - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.isInLimits(1, 3), - result_field_name=race, result_function=validators.isInLimits(1, 2) - ) - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name=race, friendly_name='social'), - ] - )) - assert result[0] is False - - def test_validate_marital_status(self, record): - """Test cat3 validator for marital status.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.isInLimits(1, 3), - result_field_name='MARITAL_STATUS', result_function=validators.isInLimits(0, 5) - ) - - record.FAMILY_AFFILIATION = 0 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='MARITAL_STATUS', friendly_name='marital status'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'MARITAL_STATUS']) - - record.FAMILY_AFFILIATION = 2 - record.MARITAL_STATUS = 6 - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='MARITAL_STATUS', friendly_name='marital status'), - ] - )) - assert result[0] is False - - def test_validate_parent_minor(self, record): - """Test cat3 validator for parent with minor.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.isInLimits(1, 2), - result_field_name='PARENT_MINOR_CHILD', result_function=validators.isInLimits(1, 3) - ) - - record.FAMILY_AFFILIATION = 0 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='PARENT_MINOR_CHILD', friendly_name='parent minor child'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'PARENT_MINOR_CHILD']) - - record.FAMILY_AFFILIATION = 2 - record.PARENT_MINOR_CHILD = 0 - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='PARENT_MINOR_CHILD', friendly_name='parent minor child'), - ] - )) - assert result[0] is False - - def test_validate_education(self, record): - """Test cat3 validator for education level.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.isInLimits(1, 3), - result_field_name='EDUCATION_LEVEL', result_function=validators.or_validators( - validators.isInStringRange(1, 16), - validators.isInStringRange(98, 99) - ) - ) - - record.FAMILY_AFFILIATION = 0 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='EDUCATION_LEVEL', friendly_name='education level'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'EDUCATION_LEVEL']) - - record.FAMILY_AFFILIATION = 2 - record.EDUCATION_LEVEL = "0" - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='EDUCATION_LEVEL', friendly_name='education level'), - ] - )) - assert result[0] is False - - def test_validate_citizenship_status(self, record): - """Test cat3 validator for citizenship status.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.matches(1), - result_field_name='CITIZENSHIP_STATUS', result_function=validators.isInLimits(1, 2) - ) - - record.FAMILY_AFFILIATION = 0 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='CITIZENSHIP_STATUS', friendly_name='citizenship status'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'CITIZENSHIP_STATUS']) - - record.FAMILY_AFFILIATION = 1 - record.CITIZENSHIP_STATUS = 0 - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='CITIZENSHIP_STATUS', friendly_name='citizenship status'), - ] - )) - assert result[0] is False - - def test_validate_oasdi_insurance(self, record): - """Test cat3 validator for OASDI insurance.""" - val = validators.if_then_validator( - condition_field_name='DATE_OF_BIRTH', condition_function=validators.olderThan(18), - result_field_name='REC_OASDI_INSURANCE', result_function=validators.isInLimits(1, 2) - ) - - record.DATE_OF_BIRTH = 0 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='DATE_OF_BIRTH', friendly_name='dob'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='REC_OASDI_INSURANCE', friendly_name='rec oasdi insurance'), - ] - )) - assert result == (True, None, ['DATE_OF_BIRTH', 'REC_OASDI_INSURANCE']) - - record.DATE_OF_BIRTH = 200001 - record.REC_OASDI_INSURANCE = 0 - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='DATE_OF_BIRTH', friendly_name='dob'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='REC_OASDI_INSURANCE', friendly_name='rec oasdi insurance'), - ] - )) - assert result[0] is False - - def test_validate_federal_disability(self, record): - """Test cat3 validator for federal disability.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.matches(1), - result_field_name='REC_FEDERAL_DISABILITY', result_function=validators.isInLimits(1, 2) - ) - - record.FAMILY_AFFILIATION = 0 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='REC_FEDERAL_DISABILITY', friendly_name='rec fed disability'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'REC_FEDERAL_DISABILITY']) - - record.FAMILY_AFFILIATION = 1 - record.REC_FEDERAL_DISABILITY = 0 - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='REC_FEDERAL_DISABILITY', friendly_name='rec fed disability'), - ] - )) - assert result[0] is False - - -class TestT6Cat3Validators(TestCat3ValidatorsBase): - """Test category three validators for TANF T6 records.""" - - @pytest.fixture - def record(self): - """Override default record with TANF T6 record.""" - return TanfT6Factory.create() - - def test_sum_of_applications(self, record): - """Test cat3 validator for sum of applications.""" - val = validators.sumIsEqual("NUM_APPLICATIONS", ["NUM_APPROVED", "NUM_DENIED"]) - - record.NUM_APPLICATIONS = 2 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='NUM_APPLICATIONS', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='NUM_APPROVED', friendly_name='num approved'), - Field(item=2, startIndex=4, endIndex=5, type='string', - name='NUM_DENIED', friendly_name='num denied'), - ] - )) - - assert result == (True, None, ['NUM_APPLICATIONS', 'NUM_APPROVED', 'NUM_DENIED']) - - record.NUM_APPLICATIONS = 1 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='NUM_APPLICATIONS', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='NUM_APPROVED', friendly_name='num approved'), - Field(item=3, startIndex=4, endIndex=5, type='string', - name='NUM_DENIED', friendly_name='num denied'), - ] - )) - - assert result[0] is False - - def test_sum_of_families(self, record): - """Test cat3 validator for sum of families.""" - val = validators.sumIsEqual("NUM_FAMILIES", ["NUM_2_PARENTS", "NUM_1_PARENTS", "NUM_NO_PARENTS"]) - - record.NUM_FAMILIES = 3 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='NUM_FAMILIES', friendly_name='num fam'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='NUM_2_PARENTS', friendly_name='num 2 parent'), - Field(item=3, startIndex=4, endIndex=5, type='string', - name='NUM_1_PARENTS', friendly_name='num 2 parent'), - Field(item=4, startIndex=5, endIndex=6, type='string', - name='NUM_NO_PARENTS', friendly_name='num 0 parent'), - ] - )) - - assert result == (True, None, ['NUM_FAMILIES', 'NUM_2_PARENTS', 'NUM_1_PARENTS', 'NUM_NO_PARENTS']) - - record.NUM_FAMILIES = 1 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='NUM_FAMILIES', friendly_name='num fam'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='NUM_2_PARENTS', friendly_name='num 2 parent'), - Field(item=3, startIndex=4, endIndex=5, type='string', - name='NUM_1_PARENTS', friendly_name='num 2 parent'), - Field(item=4, startIndex=5, endIndex=6, type='string', - name='NUM_NO_PARENTS', friendly_name='num 0 parent'), - ] - )) - - assert result[0] is False - - def test_sum_of_recipients(self, record): - """Test cat3 validator for sum of recipients.""" - val = validators.sumIsEqual("NUM_RECIPIENTS", ["NUM_ADULT_RECIPIENTS", "NUM_CHILD_RECIPIENTS"]) - - record.NUM_RECIPIENTS = 2 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='NUM_RECIPIENTS', friendly_name='num recip'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='NUM_ADULT_RECIPIENTS', friendly_name='num adult recip'), - Field(item=3, startIndex=4, endIndex=5, type='string', - name='NUM_CHILD_RECIPIENTS', friendly_name='num child recip'), - ] - )) - - assert result == (True, None, ['NUM_RECIPIENTS', 'NUM_ADULT_RECIPIENTS', 'NUM_CHILD_RECIPIENTS']) - - record.NUM_RECIPIENTS = 1 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='NUM_RECIPIENTS', friendly_name='num recip'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='NUM_ADULT_RECIPIENTS', friendly_name='num adult recip'), - Field(item=3, startIndex=4, endIndex=5, type='string', - name='NUM_CHILD_RECIPIENTS', friendly_name='num child recip'), - ] - )) - - assert result[0] is False - -class TestM5Cat3Validators(TestCat3ValidatorsBase): - """Test category three validators for TANF T6 records.""" - - @pytest.fixture - def record(self): - """Override default record with TANF T6 record.""" - return SSPM5Factory.create() - - def test_fam_affil_ssn(self, record): - """Test cat3 validator for family affiliation and ssn.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.matches(1), - result_field_name='SSN', result_function=validators.validateSSN(), - ) - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='SSN', friendly_name='social'), - ] - )) - assert result == (True, None, ["FAMILY_AFFILIATION", "SSN"]) - - record.SSN = '111111111' - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='SSN', friendly_name='social'), - ] - )) - - assert result[0] is False - - def test_validate_race_ethnicity(self, record): - """Test cat3 validator for race/ethnicity.""" - races = ["RACE_HISPANIC", "RACE_AMER_INDIAN", "RACE_ASIAN", "RACE_BLACK", "RACE_HAWAIIAN", "RACE_WHITE"] - for race in races: - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.isInLimits(1, 3), - result_field_name=race, result_function=validators.isInLimits(1, 2), - ) - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name=race, friendly_name='social'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', race]) - - def test_fam_affil_marital_stat(self, record): - """Test cat3 validator for family affiliation, and marital status.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.isInLimits(1, 3), - result_field_name='MARITAL_STATUS', result_function=validators.isInLimits(1, 5), - ) - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='MARITAL_STATUS', friendly_name='marital status'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'MARITAL_STATUS']) - - record.MARITAL_STATUS = 0 - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='MARITAL_STATUS', friendly_name='marital status'), - ] - )) - assert result[0] is False - - def test_fam_affil_parent_with_minor(self, record): - """Test cat3 validator for family affiliation, and parent with minor child.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.isInLimits(1, 2), - result_field_name='PARENT_MINOR_CHILD', result_function=validators.isInLimits(1, 3), - ) - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='PARENT_MINOR_CHILD', friendly_name='parent minor child'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'PARENT_MINOR_CHILD']) - - record.PARENT_MINOR_CHILD = 0 - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='PARENT_MINOR_CHILD', friendly_name='parent minor child'), - ] - )) - assert result[0] is False - - def test_fam_affil_ed_level(self, record): - """Test cat3 validator for family affiliation, and education level.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.isInLimits(1, 3), - result_field_name='EDUCATION_LEVEL', result_function=validators.or_validators( - validators.isInStringRange(1, 16), validators.isInStringRange(98, 99)), - ) - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='EDUCATION_LEVEL', friendly_name='education level'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'EDUCATION_LEVEL']) - - record.EDUCATION_LEVEL = 0 - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='EDUCATION_LEVEL', friendly_name='education level'), - ] - )) - assert result[0] is False - - def test_fam_affil_citz_stat(self, record): - """Test cat3 validator for family affiliation, and citizenship status.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.matches(1), - result_field_name='CITIZENSHIP_STATUS', result_function=validators.isInLimits(1, 3), - ) - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='CITIZENSHIP_STATUS', friendly_name='citizenship status'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'CITIZENSHIP_STATUS']) - - record.CITIZENSHIP_STATUS = 0 - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='CITIZENSHIP_STATUS', friendly_name='citizenship status'), - ] - )) - assert result[0] is False - - def test_dob_oasdi_insur(self, record): - """Test cat3 validator for dob, and REC_OASDI_INSURANCE.""" - val = validators.if_then_validator( - condition_field_name='DATE_OF_BIRTH', condition_function=validators.olderThan(18), - result_field_name='REC_OASDI_INSURANCE', result_function=validators.isInLimits(1, 2), - ) - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='DATE_OF_BIRTH', friendly_name='dob'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='REC_OASDI_INSURANCE', friendly_name='rec oasdi insurance'), - ] - )) - assert result == (True, None, ['DATE_OF_BIRTH', 'REC_OASDI_INSURANCE']) - - record.REC_OASDI_INSURANCE = 0 - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='DATE_OF_BIRTH', friendly_name='dob'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='REC_OASDI_INSURANCE', friendly_name='rec oasdi insurance'), - ] - )) - assert result[0] is False - - def test_fam_affil_fed_disability(self, record): - """Test cat3 validator for family affiliation, and REC_FEDERAL_DISABILITY.""" - val = validators.if_then_validator( - condition_field_name='FAMILY_AFFILIATION', condition_function=validators.matches(1), - result_field_name='REC_FEDERAL_DISABILITY', result_function=validators.isInLimits(1, 2), - ) - - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='REC_FEDERAL_DISABILITY', friendly_name='rec fed disability'), - ] - )) - assert result == (True, None, ['FAMILY_AFFILIATION', 'REC_FEDERAL_DISABILITY']) - - record.REC_FEDERAL_DISABILITY = 0 - result = val(record, RowSchema( - fields=[ - Field(item=1, startIndex=0, endIndex=2, type='string', - name='FAMILY_AFFILIATION', friendly_name='fam affil'), - Field(item=2, startIndex=2, endIndex=4, type='string', - name='REC_FEDERAL_DISABILITY', friendly_name='rec fed disability'), - ] - )) - assert result[0] is False - -def test_is_quiet_preparser_errors(): - """Test is_quiet_preparser_errors.""" - assert validators.is_quiet_preparser_errors(2, 4, 6)("#######") is True - assert validators.is_quiet_preparser_errors(2, 4, 6)("####1##") is False - assert validators.is_quiet_preparser_errors(4, 4, 6)("##1") is True - -def test_t3_m3_child_validator(): - """Test t3_m3_child_validator.""" - assert validators.t3_m3_child_validator(1)( - "4" * 61, None, "fake_friendly_name", 0 - ) == (True, None) - assert validators.t3_m3_child_validator(1)("12", None, "fake_friendly_name", 0) == ( - False, - "The first child record is too short at 2 characters and must be at least 60 characters.", - ) diff --git a/tdrs-backend/tdpservice/parsers/util.py b/tdrs-backend/tdpservice/parsers/util.py index 72a2850df..69a53dadd 100644 --- a/tdrs-backend/tdpservice/parsers/util.py +++ b/tdrs-backend/tdpservice/parsers/util.py @@ -1,7 +1,9 @@ """Utility file for functions shared between all parsers even preparser.""" from .models import ParserError +from django.contrib.admin.models import ADDITION from django.contrib.contenttypes.models import ContentType from tdpservice.data_files.models import DataFile +from tdpservice.core.utils import log from datetime import datetime from pathlib import Path import logging @@ -35,6 +37,8 @@ def generate_parser_error(datafile, line_number, schema, error_category, error_m } } + field = fields[-1] # if multiple fields, result field is last + return ParserError( file=datafile, row_number=line_number, @@ -298,3 +302,15 @@ def get_t1_t4_partial_hash_members(): def get_t2_t3_t5_partial_hash_members(): """Return field names used to generate t2/t3/t5 partial hashes.""" return ["RecordType", "RPT_MONTH_YEAR", "CASE_NUMBER", "FAMILY_AFFILIATION", "DATE_OF_BIRTH", "SSN"] + +def get_record_value_by_field_name(record, field_name): + """Return the value of a record for a given field name, accounting for the generic record type.""" + return record.get(field_name, None) if type(record) is dict else getattr(record, field_name, None) + +def log_parser_exception(datafile, error_msg, level): + """Log to DAC and console on parser exception.""" + context = {'user_id': datafile.user.pk, + 'action_flag': ADDITION, + 'object_repr': f"Datafile id: {datafile.pk}; year: {datafile.year}, quarter: {datafile.quarter}", + "object_id": datafile} + log(error_msg, context, level) diff --git a/tdrs-backend/tdpservice/parsers/validators.py b/tdrs-backend/tdpservice/parsers/validators.py deleted file mode 100644 index 2d4ac7b34..000000000 --- a/tdrs-backend/tdpservice/parsers/validators.py +++ /dev/null @@ -1,815 +0,0 @@ -"""Generic parser validator functions for use in schema definitions.""" - -import datetime -import logging -from dataclasses import dataclass -from typing import Any -# from tdpservice.parsers.row_schema import RowSchema -from tdpservice.parsers.models import ParserErrorCategoryChoices -from tdpservice.parsers.util import fiscal_to_calendar, year_month_to_year_quarter, clean_options_string - -logger = logging.getLogger(__name__) - - -def value_is_empty(value, length, extra_vals={}): - """Handle 'empty' values as field inputs.""" - # TODO: have to build mixed type handling for value - empty_values = { - '', - ' '*length, # ' ' - '#'*length, # '#####' - '_'*length, # '_____' - } - - empty_values = empty_values.union(extra_vals) - - return value is None or value in empty_values - - -@dataclass -class ValidationErrorArgs: - """Dataclass for args to `make_validator` `error_func`s.""" - - value: Any - row_schema: object # RowSchema causes circular import - friendly_name: str - item_num: str - error_context_format: str = 'prefix' - - -def format_error_context(eargs: ValidationErrorArgs): - """Format the error message for consistency across cat2 validators.""" - match eargs.error_context_format: - case 'inline': - return f'Item {eargs.item_num} ({eargs.friendly_name})' - - case 'prefix' | _: - return f'{eargs.row_schema.record_type} Item {eargs.item_num} ({eargs.friendly_name}):' - - -# higher order validator functions - - -def make_validator(validator_func, error_func): - """Return a function accepting a value input and returning (bool, string) to represent validation state.""" - def validator(value, row_schema=None, friendly_name=None, item_num=None, error_context_format='prefix'): - eargs = ValidationErrorArgs( - value=value, - row_schema=row_schema, - friendly_name=friendly_name, - item_num=item_num, - error_context_format=error_context_format - ) - - try: - if validator_func(value): - return (True, None) - return (False, error_func(eargs)) - except Exception as e: - logger.debug(f"Caught exception in validator. Exception: {e}") - return (False, error_func(eargs)) - return validator - - -def or_validators(*args, **kwargs): - """Return a validator that is true only if one of the validators is true.""" - return ( - lambda value, row_schema, friendly_name, - item_num, error_context_format='inline': (True, None) - if any([ - validator(value, row_schema, friendly_name, item_num, error_context_format)[0] for validator in args - ]) - else (False, " or ".join([ - validator(value, row_schema, friendly_name, item_num, error_context_format)[1] for validator in args - ])) - ) - - -def and_validators(validator1, validator2): - """Return a validator that is true only if both validators are true.""" - return ( - lambda value, row_schema, friendly_name, item_num: (True, None) - if (validator1(value, row_schema, friendly_name, item_num, 'inline')[0] - and validator2(value, row_schema, friendly_name, item_num, 'inline')[0]) - else ( - False, - (validator1(value, row_schema, friendly_name, item_num, 'inline')[1]) - if validator1(value, row_schema, friendly_name, item_num, 'inline')[1] is not None - else "" + " and " + validator2(value)[1] - if validator2(value, row_schema, friendly_name, item_num, 'inline')[1] is not None - else "", - ) - ) - -def or_priority_validators(validators=[]): - """Return a validator that is true based on a priority of validators. - - validators: ordered list of validators to be checked - """ - def or_priority_validators_func(value, rows_schema, friendly_name=None, item_num=None): - for validator in validators: - if not validator(value, rows_schema, friendly_name, item_num, 'inline')[0]: - return (False, validator(value, rows_schema, - friendly_name, item_num, 'inline')[1]) - return (True, None) - - return or_priority_validators_func - - -def extended_and_validators(*args, **kwargs): - """Return a validator that is true only if all validators are true.""" - def returned_func(value, row_schema, friendly_name, item_num): - if all([validator(value, row_schema, friendly_name, item_num, 'inline')[0] for validator in args]): - return (True, None) - else: - return (False, "".join( - [ - " and " + validator(value, row_schema, friendly_name, item_num, 'inline')[1] - if validator(value, row_schema, friendly_name, item_num, 'inline')[0] else "" - for validator in args - ] - )) - return returned_func - - -def if_then_validator( - condition_field_name, condition_function, result_field_name, result_function -): - """Return second validation if the first validator is true. - - :param condition_field: function that returns (bool, string) to represent validation state - :param condition_function: function that returns (bool, string) to represent validation state - :param result_field: function that returns (bool, string) to represent validation state - :param result_function: function that returns (bool, string) to represent validation state - """ - - def if_then_validator_func(value, row_schema): - value1 = ( - value[condition_field_name] - if type(value) is dict - else getattr(value, condition_field_name) - ) - value2 = ( - value[result_field_name] if type(value) is dict else getattr(value, result_field_name) - ) - - condition_field = row_schema.get_field_by_name(condition_field_name) - result_field = row_schema.get_field_by_name(result_field_name) - - validator1_result = condition_function( - value1, - row_schema, - condition_field.friendly_name, - condition_field.item, - 'inline' - ) - validator2_result = result_function( - value2, - row_schema, - result_field.friendly_name, - result_field.item, - 'inline' - ) - - if not validator1_result[0]: - returned_value = (True, None, [condition_field_name, result_field_name]) - else: - if not validator2_result[0]: - - # center of error message - if validator1_result[1] is not None: - center_error = validator1_result[1] - else: - center_error = f":{value1} validator1 passed" - - # ending of error message - if validator2_result[1] is not None: - ending_error = validator2_result[1] - else: - ending_error = "validator2 passed" - - error_message = (f"if {condition_field_name} " + (center_error) + - " then " + ending_error) - else: - error_message = None - - returned_value = (validator2_result[0], error_message, [condition_field_name, result_field_name]) - - return returned_value - - return lambda value, row_schema: if_then_validator_func(value, row_schema) - - -def sumIsEqual(condition_field, sum_fields=[]): - """Validate that the sum of the sum_fields equals the condition_field.""" - - def sumIsEqualFunc(value, row_schema): - sum = 0 - for field in sum_fields: - val = value[field] if type(value) is dict else getattr(value, field) - sum += 0 if val is None else val - - condition_val = ( - value[condition_field] - if type(value) is dict - else getattr(value, condition_field) - ) - fields = [condition_field] - fields.extend(sum_fields) - return ( - (True, None, fields) - if sum == condition_val - else ( - False, - f"{row_schema.record_type}: The sum of {sum_fields} does not equal {condition_field}.", - fields, - ) - ) - - return sumIsEqualFunc - - -def field_year_month_with_header_year_quarter(): - """Validate that the field year and month match the header year and quarter.""" - def validate_reporting_month_year_fields_header( - line, row_schema, friendly_name, item_num, error_context_format=None): - - field_month_year = row_schema.get_field_values_by_names(line, ['RPT_MONTH_YEAR']).get('RPT_MONTH_YEAR') - df_quarter = row_schema.datafile.quarter - df_year = row_schema.datafile.year - - # get reporting month year from header - field_year, field_quarter = year_month_to_year_quarter(f"{field_month_year}") - file_calendar_year, file_calendar_qtr = fiscal_to_calendar(df_year, f"{df_quarter}") - return (True, None) if str(file_calendar_year) == str(field_year) and file_calendar_qtr == field_quarter else ( - False, f"{row_schema.record_type}: Reporting month year {field_month_year} " + - f"does not match file reporting year:{df_year}, quarter:{df_quarter}.", - ) - - return validate_reporting_month_year_fields_header - - -def sumIsLarger(fields, val): - """Validate that the sum of the fields is larger than val.""" - - def sumIsLargerFunc(value, row_schema): - sum = 0 - for field in fields: - temp_val = value[field] if type(value) is dict else getattr(value, field) - sum += 0 if temp_val is None else temp_val - - return ( - (True, None, [field for field in fields]) - if sum > val - else ( - False, - f"{row_schema.record_type}: The sum of {fields} is not larger than {val}.", - [field for field in fields], - ) - ) - - return sumIsLargerFunc - - -def recordHasLength(length): - """Validate that value (string or array) has a length matching length param.""" - return make_validator( - lambda value: len(value) == length, - lambda eargs: f"{eargs.row_schema.record_type}: record length is " - f"{len(eargs.value)} characters but must be {length}.", - ) - - -def recordHasLengthBetween(lower, upper, error_func=None): - """Validate that value (string or array) has a length matching length param.""" - return make_validator( - lambda value: len(value) >= lower and len(value) <= upper, - lambda eargs: error_func(eargs.value, lower, upper) - if error_func - else - f"{eargs.row_schema.record_type}: record length of {len(eargs.value)} " - f"characters is not in the range [{lower}, {upper}].", - ) - - -def caseNumberNotEmpty(start=0, end=None): - """Validate that string value isn't only blanks.""" - return make_validator( - lambda value: not _is_empty(value, start, end), - lambda eargs: f'{eargs.row_schema.record_type}: Case number {str(eargs.value)} cannot contain blanks.' - ) - - -def calendarQuarterIsValid(start=0, end=None): - """Validate that the calendar quarter value is valid.""" - return make_validator( - lambda value: value[start:end].isnumeric() and int(value[start:end - 1]) >= 2020 - and int(value[end - 1:end]) > 0 and int(value[end - 1:end]) < 5, - lambda eargs: f"{eargs.row_schema.record_type}: {eargs.value[start:end]} is invalid. " - "Calendar Quarter must be a numeric representing the Calendar Year and Quarter formatted as YYYYQ", - ) - - -# generic validators - - -def matches(option, error_func=None): - """Validate that value is equal to option.""" - return make_validator( - lambda value: value == option, - lambda eargs: error_func(eargs) - if error_func - else f"{format_error_context(eargs)} {eargs.value} does not match {option}.", - ) - - -def notMatches(option): - """Validate that value is not equal to option.""" - return make_validator( - lambda value: value != option, - lambda eargs: f"{format_error_context(eargs)} {eargs.value} matches {option}." - ) - - -def oneOf(options=[]): - """Validate that value does not exist in the provided options array.""" - """ - accepts options as list of: string, int or string range ("3-20") - """ - - def check_option(value, options): - # split the option if it is a range and append the range to the options - for option in options: - if "-" in str(option): - start, end = option.split("-") - options.extend([i for i in range(int(start), int(end) + 1)]) - options.remove(option) - return value in options - - return make_validator( - lambda value: check_option(value, options), - lambda eargs: - f"{format_error_context(eargs)} {eargs.value} is not in {clean_options_string(options)}." - ) - - -def notOneOf(options=[]): - """Validate that value exists in the provided options array.""" - return make_validator( - lambda value: value not in options, - lambda eargs: - f"{format_error_context(eargs)} {eargs.value} is in {clean_options_string(options)}." - ) - - -def between(min, max): - """Validate value, when casted to int, is greater than min and less than max.""" - return make_validator( - lambda value: int(value) > min and int(value) < max, - lambda eargs: - f"{format_error_context(eargs)} {eargs.value} is not between {min} and {max}.", - ) - - -def fieldHasLength(length): - """Validate that the field value (string or array) has a length matching length param.""" - return make_validator( - lambda value: len(value) == length, - lambda eargs: - f"{eargs.row_schema.record_type} field length is {len(eargs.value)} characters but must be {length}.", - ) - - -def hasLengthGreaterThan(val, error_func=None): - """Validate that value (string or array) has a length greater than val.""" - return make_validator( - lambda value: len(value) >= val, - lambda eargs: - f"Value length {len(eargs.value)} is not greater than {val}.", - ) - - -def intHasLength(num_digits): - """Validate the number of digits in an integer.""" - return make_validator( - lambda value: sum(c.isdigit() for c in str(value)) == num_digits, - lambda eargs: - f"{format_error_context(eargs)} {eargs.value} does not have exactly {num_digits} digits.", - ) - - -def contains(substring): - """Validate that string value contains the given substring param.""" - return make_validator( - lambda value: value.find(substring) != -1, - lambda eargs: f"{format_error_context(eargs)} {eargs.value} does not contain {substring}.", - ) - - -def startsWith(substring, error_func=None): - """Validate that string value starts with the given substring param.""" - return make_validator( - lambda value: value.startswith(substring), - lambda eargs: error_func(substring) - if error_func - else f"{format_error_context(eargs)} {eargs.value} does not start with {substring}.", - ) - - -def isNumber(): - """Validate that value can be casted to a number.""" - return make_validator( - lambda value: str(value).strip().isnumeric(), - lambda eargs: f"{format_error_context(eargs)} {eargs.value} is not a number." - ) - - -def isAlphaNumeric(): - """Validate that value is alphanumeric.""" - return make_validator( - lambda value: value.isalnum(), - lambda eargs: f"{format_error_context(eargs)} {eargs.value} is not alphanumeric." - ) - - -def isBlank(): - """Validate that string value is blank.""" - return make_validator( - lambda value: value.isspace(), - lambda eargs: f"{format_error_context(eargs)} {eargs.value} is not blank." - ) - - -def isInStringRange(lower, upper): - """Validate that string value is in a specific range.""" - return make_validator( - lambda value: int(value) >= lower and int(value) <= upper, - lambda eargs: f"{format_error_context(eargs)} {eargs.value} is not in range [{lower}, {upper}].", - ) - - -def isStringLargerThan(val): - """Validate that string value is larger than val.""" - return make_validator( - lambda value: int(value) > val, - lambda eargs: f"{format_error_context(eargs)} {eargs.value} is not larger than {val}.", - ) - - -def _is_empty(value, start, end): - end = end if end else len(str(value)) - vlen = end - start - subv = str(value)[start:end] - return value_is_empty(subv, vlen) or len(subv) < vlen - - -def notEmpty(start=0, end=None): - """Validate that string value isn't only blanks.""" - return make_validator( - lambda value: not _is_empty(value, start, end), - lambda eargs: - f'{format_error_context(eargs)} {str(eargs.value)} contains blanks ' - f'between positions {start} and {end if end else len(str(eargs.value))}.' - ) - - -def isEmpty(start=0, end=None): - """Validate that string value is only blanks.""" - return make_validator( - lambda value: _is_empty(value, start, end), - lambda eargs: - f'{format_error_context(eargs)} {eargs.value} is not blank ' - f'between positions {start} and {end if end else len(eargs.value)}.' - ) - - -def notZero(number_of_zeros=1): - """Validate that value is not zero.""" - return make_validator( - lambda value: value != "0" * number_of_zeros, - lambda eargs: f"{format_error_context(eargs)} {eargs.value} is zero." - ) - - -def isLargerThan(LowerBound): - """Validate that value is larger than the given value.""" - return make_validator( - lambda value: float(value) > LowerBound if value is not None else False, - lambda eargs: f"{format_error_context(eargs)} {eargs.value} is not larger than {LowerBound}.", - ) - - -def isSmallerThan(UpperBound): - """Validate that value is smaller than the given value.""" - return make_validator( - lambda value: value < UpperBound, - lambda eargs: f"{format_error_context(eargs)} {eargs.value} is not smaller than {UpperBound}.", - ) - - -def isLargerThanOrEqualTo(LowerBound): - """Validate that value is larger than the given value.""" - return make_validator( - lambda value: value >= LowerBound, - lambda eargs: f"{format_error_context(eargs)} {eargs.value} is not larger than {LowerBound}.", - ) - - -def isSmallerThanOrEqualTo(UpperBound): - """Validate that value is smaller than the given value.""" - return make_validator( - lambda value: value <= UpperBound, - lambda eargs: f"{format_error_context(eargs)} {eargs.value} is not smaller than {UpperBound}.", - ) - - -def isInLimits(LowerBound, UpperBound): - """Validate that value is in a range including the limits.""" - return make_validator( - lambda value: int(value) >= LowerBound and int(value) <= UpperBound, - lambda eargs: - f"{format_error_context(eargs)} {eargs.value} is not larger or equal " - f"to {LowerBound} and smaller or equal to {UpperBound}." - ) - -# custom validators - -def dateMonthIsValid(): - """Validate that in a monthyear combination, the month is a valid month.""" - return make_validator( - lambda value: int(str(value)[4:6]) in range(1, 13), - lambda eargs: f"{format_error_context(eargs)} {str(eargs.value)[4:6]} is not a valid month.", - ) - -def dateDayIsValid(): - """Validate that in a monthyearday combination, the day is a valid day.""" - return make_validator( - lambda value: int(str(value)[6:]) in range(1, 32), - lambda eargs: f"{format_error_context(eargs)} {str(eargs.value)[6:]} is not a valid day.", - ) - - -def olderThan(min_age): - """Validate that value is larger than min_age.""" - return make_validator( - lambda value: datetime.date.today().year - int(str(value)[:4]) > min_age, - lambda eargs: - f"{format_error_context(eargs)} {str(eargs.value)[:4]} must be less " - f"than or equal to {datetime.date.today().year - min_age} to meet the minimum age requirement." - ) - - -def dateYearIsLargerThan(year): - """Validate that in a monthyear combination, the year is larger than the given year.""" - return make_validator( - lambda value: int(str(value)[:4]) > year, - lambda eargs: f"{format_error_context(eargs)} Year {str(eargs.value)[:4]} must be larger than {year}.", - ) - - -def quarterIsValid(): - """Validate in a year quarter combination, the quarter is valid.""" - return make_validator( - lambda value: int(str(value)[-1]) > 0 and int(str(value)[-1]) < 5, - lambda eargs: f"{format_error_context(eargs)} {str(eargs.value)[-1]} is not a valid quarter.", - ) - - -def validateSSN(): - """Validate that SSN value is not a repeating digit.""" - options = [str(i) * 9 for i in range(0, 10)] - return make_validator( - lambda value: value not in options, - lambda eargs: f"{format_error_context(eargs)} {eargs.value} is in {options}." - ) - - -def validateRace(): - """Validate race.""" - return make_validator( - lambda value: value >= 0 and value <= 2, - lambda eargs: - f"{format_error_context(eargs)} {eargs.value} is not greater than or equal to 0 " - "or smaller than or equal to 2." - ) - - -def validateRptMonthYear(): - """Validate RPT_MONTH_YEAR.""" - return make_validator( - lambda value: value[2:8].isdigit() and int(value[2:6]) > 1900 and value[6:8] in {"01", "02", "03", "04", "05", - "06", "07", "08", "09", "10", - "11", "12"}, - lambda eargs: - f"{format_error_context(eargs)} The value: {eargs.value[2:8]}, " - "does not follow the YYYYMM format for Reporting Year and Month.", - ) - - -# outlier validators -def validate__FAM_AFF__SSN(): - """ - Validate social security number provided. - - If item FAMILY_AFFILIATION ==2 and item CITIZENSHIP_STATUS ==1 or 2, - then item SSN != 000000000 -- 999999999. - """ - # value is instance - def validate(instance, row_schema): - FAMILY_AFFILIATION = ( - instance["FAMILY_AFFILIATION"] - if type(instance) is dict - else getattr(instance, "FAMILY_AFFILIATION") - ) - CITIZENSHIP_STATUS = ( - instance["CITIZENSHIP_STATUS"] - if type(instance) is dict - else getattr(instance, "CITIZENSHIP_STATUS") - ) - SSN = instance["SSN"] if type(instance) is dict else getattr(instance, "SSN") - if FAMILY_AFFILIATION == 2 and ( - CITIZENSHIP_STATUS == 1 or CITIZENSHIP_STATUS == 2 - ): - if SSN in [str(i) * 9 for i in range(10)]: - return ( - False, - f"{row_schema.record_type}: If FAMILY_AFFILIATION ==2 and CITIZENSHIP_STATUS==1 or 2, " - "then SSN != 000000000 -- 999999999.", - ["FAMILY_AFFILIATION", "CITIZENSHIP_STATUS", "SSN"], - ) - else: - return (True, None, ["FAMILY_AFFILIATION", "CITIZENSHIP_STATUS", "SSN"]) - else: - return (True, None, ["FAMILY_AFFILIATION", "CITIZENSHIP_STATUS", "SSN"]) - - return validate - -def validate_header_section_matches_submission(datafile, section, generate_error): - """Validate header section matches submission section.""" - is_valid = datafile.section == section - - error = None - if not is_valid: - error = generate_error( - schema=None, - error_category=ParserErrorCategoryChoices.PRE_CHECK, - error_message=f"Data does not match the expected layout for {datafile.section}.", - record=None, - field=None, - ) - - return is_valid, error - - -def validate_tribe_fips_program_agree(program_type, tribe_code, state_fips_code, generate_error): - """Validate tribe code, fips code, and program type all agree with eachother.""" - is_valid = False - - if program_type == 'TAN' and value_is_empty(state_fips_code, 2, extra_vals={'0'*2}): - is_valid = not value_is_empty(tribe_code, 3, extra_vals={'0'*3}) - else: - is_valid = value_is_empty(tribe_code, 3, extra_vals={'0'*3}) - - error = None - if not is_valid: - error = generate_error( - schema=None, - error_category=ParserErrorCategoryChoices.PRE_CHECK, - - error_message=f"Tribe Code ({tribe_code}) inconsistency with Program Type ({program_type}) and " + - f"FIPS Code ({state_fips_code}).", - record=None, - field=None - ) - - return is_valid, error - - -def validate_header_rpt_month_year(datafile, header, generate_error): - """Validate header rpt_month_year.""" - # the header year/quarter represent a calendar period, and frontend year/qtr represents a fiscal period - header_calendar_qtr = f"Q{header['quarter']}" - header_calendar_year = header['year'] - file_calendar_year, file_calendar_qtr = fiscal_to_calendar(datafile.year, f"{datafile.quarter}") - - is_valid = file_calendar_year is not None and file_calendar_qtr is not None - is_valid = is_valid and file_calendar_year == header_calendar_year and file_calendar_qtr == header_calendar_qtr - - error = None - if not is_valid: - error = generate_error( - schema=None, - error_category=ParserErrorCategoryChoices.PRE_CHECK, - error_message=f"Submitted reporting year:{header['year']}, quarter:Q{header['quarter']} doesn't match " - + f"file reporting year:{datafile.year}, quarter:{datafile.quarter}.", - record=None, - field=None, - ) - return is_valid, error - - -def _is_all_zeros(value, start, end): - """Check if a value is all zeros.""" - return value[start:end] == "0" * (end - start) - - -def t3_m3_child_validator(which_child): - """T3 child validator.""" - def t3_first_child_validator_func(value, temp, friendly_name, item_num): - if not _is_empty(value, 1, 60) and len(value) >= 60: - return (True, None) - elif not len(value) >= 60: - return (False, f"The first child record is too short at {len(value)} " - "characters and must be at least 60 characters.") - else: - return (False, "The first child record is empty.") - - def t3_second_child_validator_func(value, temp, friendly_name, item_num): - if not _is_empty(value, 60, 101) and len(value) >= 101 and \ - not _is_empty(value, 8, 19) and \ - not _is_all_zeros(value, 60, 101): - return (True, None) - elif not len(value) >= 101: - return (False, f"The second child record is too short at {len(value)} " - "characters and must be at least 101 characters.") - else: - return (False, "The second child record is empty.") - - return t3_first_child_validator_func if which_child == 1 else t3_second_child_validator_func - - -def is_quiet_preparser_errors(min_length, empty_from=61, empty_to=101): - """Return a function that checks if the length is valid and if the value is empty.""" - def return_value(value): - is_length_valid = len(value) >= min_length - is_empty = value_is_empty( - value[empty_from:empty_to], - len(value[empty_from:empty_to]) - ) - return not (is_length_valid and not is_empty and not _is_all_zeros(value, empty_from, empty_to)) - return return_value - - -def validate__WORK_ELIGIBLE_INDICATOR__HOH__AGE(): - """If WORK_ELIGIBLE_INDICATOR == 11 and AGE < 19, then RELATIONSHIP_HOH != 1.""" - # value is instance - def validate(instance, row_schema): - false_case = (False, - f"{row_schema.record_type}: If WORK_ELIGIBLE_INDICATOR == 11 and AGE < 19, " - "then RELATIONSHIP_HOH != 1", - ['WORK_ELIGIBLE_INDICATOR', 'RELATIONSHIP_HOH', 'DATE_OF_BIRTH'] - ) - true_case = (True, - None, - ['WORK_ELIGIBLE_INDICATOR', 'RELATIONSHIP_HOH', 'DATE_OF_BIRTH'], - ) - try: - WORK_ELIGIBLE_INDICATOR = ( - instance["WORK_ELIGIBLE_INDICATOR"] - if type(instance) is dict - else getattr(instance, "WORK_ELIGIBLE_INDICATOR") - ) - RELATIONSHIP_HOH = ( - instance["RELATIONSHIP_HOH"] - if type(instance) is dict - else getattr(instance, "RELATIONSHIP_HOH") - ) - RELATIONSHIP_HOH = int(RELATIONSHIP_HOH) - - DOB = str( - instance["DATE_OF_BIRTH"] - if type(instance) is dict - else getattr(instance, "DATE_OF_BIRTH") - ) - - RPT_MONTH_YEAR = str( - instance["RPT_MONTH_YEAR"] - if type(instance) is dict - else getattr(instance, "RPT_MONTH_YEAR") - ) - - RPT_MONTH_YEAR += "01" - - DOB_datetime = datetime.datetime.strptime(DOB, '%Y%m%d') - RPT_MONTH_YEAR_datetime = datetime.datetime.strptime(RPT_MONTH_YEAR, '%Y%m%d') - AGE = (RPT_MONTH_YEAR_datetime - DOB_datetime).days / 365.25 - - if WORK_ELIGIBLE_INDICATOR == "11" and AGE < 19: - if RELATIONSHIP_HOH == 1: - return false_case - else: - return true_case - else: - return true_case - except Exception: - vals = {"WORK_ELIGIBLE_INDICATOR": WORK_ELIGIBLE_INDICATOR, - "RELATIONSHIP_HOH": RELATIONSHIP_HOH, - "DOB": DOB - } - logger.debug("Caught exception in validator: validate__WORK_ELIGIBLE_INDICATOR__HOH__AGE. " + - f"With field values: {vals}.") - # Per conversation with Alex on 03/26/2024, returning the true case during exception handling to avoid - # confusing the STTs. - return true_case - - return validate diff --git a/tdrs-backend/tdpservice/parsers/validators/__init__.py b/tdrs-backend/tdpservice/parsers/validators/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tdrs-backend/tdpservice/parsers/validators/base.py b/tdrs-backend/tdpservice/parsers/validators/base.py new file mode 100644 index 000000000..1f4617ff9 --- /dev/null +++ b/tdrs-backend/tdpservice/parsers/validators/base.py @@ -0,0 +1,171 @@ +"""Base functions to be overloaded and composed from within the other validator classes.""" + +import functools +from .util import _is_empty + + +def _handle_cast(val, cast): + return cast(val) + + +def _handle_kwargs(val, **kwargs): + if 'cast' in kwargs and kwargs['cast'] is not None: + val = _handle_cast(val, kwargs['cast']) + + return val + + +def base_validator(makeValidator): + """Wrap validator funcs to handle kwargs.""" + @functools.wraps(makeValidator) + def _validator(*args, **kwargs): + validator = makeValidator(*args, **kwargs) + + def _validate(val): + val = _handle_kwargs(val, **kwargs) + return validator(val) + + return _validate + + return _validator + + +@base_validator +def isEqual(option, **kwargs): + """Return a function that tests if an input param is equal to option.""" + return lambda val: val == option + + +@base_validator +def isNotEqual(option, **kwargs): + """Return a function that tests if an input param is not equal to option.""" + return lambda val: val != option + + +@base_validator +def isOneOf(options, **kwargs): + """Return a function that tests if an input param is one of options.""" + def check_option(value): + # split the option if it is a range and append the range to the options + for option in options: + if "-" in str(option): + start, end = option.split("-") + options.extend([i for i in range(int(start), int(end) + 1)]) + options.remove(option) + return value in options + + return lambda val: check_option(val) + + +@base_validator +def isNotOneOf(options, **kwargs): + """Return a function that tests if an input param is not one of options.""" + return lambda val: val not in options + + +@base_validator +def isGreaterThan(option, inclusive=False, **kwargs): + """Return a function that tests if an input param is greater than option.""" + return lambda val: val > option if not inclusive else val >= option + + +@base_validator +def isLessThan(option, inclusive=False, **kwargs): + """Return a function that tests if an input param is less than option.""" + return lambda val: val < option if not inclusive else val <= option + + +@base_validator +def isBetween(min, max, inclusive=False, **kwargs): + """Return a function that tests if an input param is between min and max.""" + return lambda val: min < val < max if not inclusive else min <= val <= max + + +@base_validator +def startsWith(substr, **kwargs): + """Return a function that tests if an input param starts with substr.""" + return lambda val: str(val).startswith(substr) + + +@base_validator +def contains(substr, **kwargs): + """Return a function that tests if an input param contains substr.""" + return lambda val: str(val).find(substr) != -1 + + +@base_validator +def isNumber(**kwargs): + """Return a function that tests if an input param is numeric.""" + return lambda val: str(val).strip().isnumeric() + + +@base_validator +def isAlphaNumeric(**kwargs): + """Return a function that tests if an input param is alphanumeric.""" + return lambda val: val.isalnum() + + +@base_validator +def isEmpty(start=0, end=None, **kwargs): + """Return a function that tests if an input param is empty or all fill chars.""" + return lambda val: _is_empty(val, start, end) + + +@base_validator +def isNotEmpty(start=0, end=None, **kwargs): + """Return a function that tests if an input param is not empty or all fill chars.""" + return lambda val: not _is_empty(val, start, end) + + +@base_validator +def isBlank(**kwargs): + """Return a function that tests if an input param is all space.""" + return lambda val: val.isspace() + + +@base_validator +def hasLength(length, **kwargs): + """Return a function that tests if an input param has length equal to length.""" + return lambda val: len(val) == length + + +@base_validator +def hasLengthGreaterThan(length, inclusive=False, **kwargs): + """Return a function that tests if an input param has length greater than length.""" + return lambda val: len(val) > length if not inclusive else len(val) >= length + + +@base_validator +def intHasLength(length, **kwargs): + """Return a function that tests if an integer input param has a number of digits equal to length.""" + return lambda val: sum(c.isdigit() for c in str(val)) == length + + +@base_validator +def isNotZero(number_of_zeros=1, **kwargs): + """Return a function that tests if an input param is zero or all zeros.""" + return lambda val: val != "0" * number_of_zeros + + +@base_validator +def dateYearIsLargerThan(year, **kwargs): + """Return a function that tests that an input date has a year value larger than the given year.""" + return lambda val: int(val) > year + + +@base_validator +def dateMonthIsValid(**kwargs): + """Return a function that tests that an input date has a month value that is valid.""" + return lambda val: int(val) in range(1, 13) + + +@base_validator +def dateDayIsValid(**kwargs): + """Return a function that tests that an input date has a day value that is valid.""" + return lambda val: int(val) in range(1, 32) + + +@base_validator +def quarterIsValid(**kwargs): + """Return a function that tests that an input date has a quarter value that is valid.""" + return lambda val: int(val) > 0 and int(val) < 5 diff --git a/tdrs-backend/tdpservice/parsers/validators/category1.py b/tdrs-backend/tdpservice/parsers/validators/category1.py new file mode 100644 index 000000000..7482a17d9 --- /dev/null +++ b/tdrs-backend/tdpservice/parsers/validators/category1.py @@ -0,0 +1,208 @@ +"""Overloads and custom validators for category 1 (preparsing).""" + +from tdpservice.parsers.models import ParserErrorCategoryChoices +from tdpservice.parsers.util import fiscal_to_calendar, year_month_to_year_quarter +from . import base +from .util import ValidationErrorArgs, make_validator, _is_all_zeros, _is_empty, value_is_empty + + +def format_error_context(eargs: ValidationErrorArgs): + """Format the error message for consistency across cat1 validators.""" + return f'{eargs.row_schema.record_type} Item {eargs.item_num} ({eargs.friendly_name}):' + + +def recordIsNotEmpty(start=0, end=None, **kwargs): + """Return a function that tests that a record/line is not empty.""" + return make_validator( + base.isNotEmpty(start, end, **kwargs), + lambda eargs: f'{format_error_context(eargs)} {str(eargs.value)} contains blanks ' + f'between positions {start} and {end if end else len(str(eargs.value))}.' + ) + + +def recordHasLength(length, **kwargs): + """Return a function that tests that a record/line has the specified length.""" + return make_validator( + base.hasLength(length, **kwargs), + lambda eargs: + f"{eargs.row_schema.record_type}: record length is {len(eargs.value)} characters but must be {length}.", + ) + + +def recordHasLengthBetween(min, max, **kwargs): + """Return a function that tests that a record/line has a length between min and max.""" + _validator = base.isBetween(min, max, inclusive=True, **kwargs) + return make_validator( + lambda record: _validator(len(record)), + lambda eargs: + f"{eargs.row_schema.record_type}: record length of {len(eargs.value)} " + f"characters is not in the range [{min}, {max}].", + ) + + +def recordStartsWith(substr, func=None, **kwargs): + """Return a function that tests that a record/line starts with a specified substr.""" + return make_validator( + base.startsWith(substr, **kwargs), + func if func else lambda eargs: f'{eargs.value} must start with {substr}.' + ) + + +def caseNumberNotEmpty(start=0, end=None, **kwargs): + """Return a function that tests that a record/line is not blank between the Case Number indices.""" + return make_validator( + base.isNotEmpty(start, end, **kwargs), + lambda eargs: f'{eargs.row_schema.record_type}: Case number {str(eargs.value)} cannot contain blanks.' + ) + + +def or_priority_validators(validators=[]): + """Return a validator that is true based on a priority of validators. + + validators: ordered list of validators to be checked + """ + def or_priority_validators_func(value, eargs): + for validator in validators: + result, msg = validator(value, eargs) + if not result: + return (result, msg) + return (True, None) + + return or_priority_validators_func + + +def validate_fieldYearMonth_with_headerYearQuarter(): + """Validate that the field year and month match the header year and quarter.""" + def validate_reporting_month_year_fields_header(line, eargs): + row_schema = eargs.row_schema + field_month_year = row_schema.get_field_values_by_names( + line, ['RPT_MONTH_YEAR']).get('RPT_MONTH_YEAR') + df_quarter = row_schema.datafile.quarter + df_year = row_schema.datafile.year + + # get reporting month year from header + field_year, field_quarter = year_month_to_year_quarter(f"{field_month_year}") + file_calendar_year, file_calendar_qtr = fiscal_to_calendar(df_year, f"{df_quarter}") + + if str(file_calendar_year) == str(field_year) and file_calendar_qtr == field_quarter: + return (True, None) + + return ( + False, + f"{row_schema.record_type}: Reporting month year {field_month_year} " + + f"does not match file reporting year:{df_year}, quarter:{df_quarter}.", + ) + + return validate_reporting_month_year_fields_header + + +def validateRptMonthYear(): + """Validate RPT_MONTH_YEAR.""" + return make_validator( + lambda value: value[2:8].isdigit() and int(value[2:6]) > 1900 and value[6:8] in { + "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12" + }, + lambda eargs: + f"{format_error_context(eargs)} The value: {eargs.value[2:8]}, " + "does not follow the YYYYMM format for Reporting Year and Month.", + ) + + +def t3_m3_child_validator(which_child): + """T3 child validator.""" + def t3_first_child_validator_func(line, eargs): + if not _is_empty(line, 1, 60) and len(line) >= 60: + return (True, None) + elif not len(line) >= 60: + return (False, f"The first child record is too short at {len(line)} " + "characters and must be at least 60 characters.") + else: + return (False, "The first child record is empty.") + + def t3_second_child_validator_func(line, eargs): + if not _is_empty(line, 60, 101) and len(line) >= 101 and \ + not _is_empty(line, 8, 19) and \ + not _is_all_zeros(line, 60, 101): + return (True, None) + elif not len(line) >= 101: + return (False, f"The second child record is too short at {len(line)} " + "characters and must be at least 101 characters.") + else: + return (False, "The second child record is empty.") + + return t3_first_child_validator_func if which_child == 1 else t3_second_child_validator_func + + +def calendarQuarterIsValid(start=0, end=None): + """Validate that the calendar quarter value is valid.""" + return make_validator( + lambda value: value[start:end].isnumeric() and int(value[start:end - 1]) >= 2020 + and int(value[end - 1:end]) > 0 and int(value[end - 1:end]) < 5, + lambda eargs: f"{eargs.row_schema.record_type}: {eargs.value[start:end]} is invalid. " + "Calendar Quarter must be a numeric representing the Calendar Year and Quarter formatted as YYYYQ", + ) + + +# file pre-check validators +def validate_tribe_fips_program_agree(program_type, tribe_code, state_fips_code, generate_error): + """Validate tribe code, fips code, and program type all agree with eachother.""" + is_valid = False + + if program_type == 'TAN' and value_is_empty(state_fips_code, 2, extra_vals={'0'*2}): + is_valid = not value_is_empty(tribe_code, 3, extra_vals={'0'*3}) + else: + is_valid = value_is_empty(tribe_code, 3, extra_vals={'0'*3}) + + error = None + if not is_valid: + error = generate_error( + schema=None, + error_category=ParserErrorCategoryChoices.PRE_CHECK, + + error_message=f"Tribe Code ({tribe_code}) inconsistency with Program Type ({program_type}) and " + + f"FIPS Code ({state_fips_code}).", + record=None, + field=None + ) + + return is_valid, error + + +def validate_header_section_matches_submission(datafile, section, generate_error): + """Validate header section matches submission section.""" + is_valid = datafile.section == section + + error = None + if not is_valid: + error = generate_error( + schema=None, + error_category=ParserErrorCategoryChoices.PRE_CHECK, + error_message=f"Data does not match the expected layout for {datafile.section}.", + record=None, + field=None, + ) + + return is_valid, error + + +def validate_header_rpt_month_year(datafile, header, generate_error): + """Validate header rpt_month_year.""" + # the header year/quarter represent a calendar period, and frontend year/qtr represents a fiscal period + header_calendar_qtr = f"Q{header['quarter']}" + header_calendar_year = header['year'] + file_calendar_year, file_calendar_qtr = fiscal_to_calendar(datafile.year, f"{datafile.quarter}") + + is_valid = file_calendar_year is not None and file_calendar_qtr is not None + is_valid = is_valid and file_calendar_year == header_calendar_year and file_calendar_qtr == header_calendar_qtr + + error = None + if not is_valid: + error = generate_error( + schema=None, + error_category=ParserErrorCategoryChoices.PRE_CHECK, + error_message=f"Submitted reporting year:{header['year']}, quarter:Q{header['quarter']} doesn't match " + + f"file reporting year:{datafile.year}, quarter:{datafile.quarter}.", + record=None, + field=None, + ) + return is_valid, error diff --git a/tdrs-backend/tdpservice/parsers/validators/category2.py b/tdrs-backend/tdpservice/parsers/validators/category2.py new file mode 100644 index 000000000..2867b0c18 --- /dev/null +++ b/tdrs-backend/tdpservice/parsers/validators/category2.py @@ -0,0 +1,191 @@ +"""Overloaded base validators and custom validators for category 2 validation (field validation).""" + +from tdpservice.parsers.util import clean_options_string +from . import base +from .util import ValidationErrorArgs, validator, make_validator + + +def format_error_context(eargs: ValidationErrorArgs): + """Format the error message for consistency across cat2 validators.""" + return f'{eargs.row_schema.record_type} Item {eargs.item_num} ({eargs.friendly_name}):' + + +@validator(base.isEqual) +def isEqual(option, **kwargs): + """Return a custom message for the isEqual validator.""" + return lambda eargs: f"{format_error_context(eargs)} {eargs.value} does not match {option}." + + +@validator(base.isNotEqual) +def isNotEqual(option, **kwargs): + """Return a custom message for the isNotEqual validator.""" + return lambda eargs: f"{format_error_context(eargs)} {eargs.value} matches {option}." + + +@validator(base.isOneOf) +def isOneOf(options, **kwargs): + """Return a custom message for the isOneOf validator.""" + return lambda eargs: f"{format_error_context(eargs)} {eargs.value} is not in {clean_options_string(options)}." + + +@validator(base.isNotOneOf) +def isNotOneOf(options, **kwargs): + """Return a custom message for the isNotOneOf validator.""" + return lambda eargs: f"{format_error_context(eargs)} {eargs.value} is in {clean_options_string(options)}." + + +@validator(base.isGreaterThan) +def isGreaterThan(option, inclusive=False, **kwargs): + """Return a custom message for the isGreaterThan validator.""" + return lambda eargs: f"{format_error_context(eargs)} {eargs.value} is not larger than {option}." + + +@validator(base.isLessThan) +def isLessThan(option, inclusive=False, **kwargs): + """Return a custom message for the isLessThan validator.""" + return lambda eargs: f"{format_error_context(eargs)} {eargs.value} is not smaller than {option}." + + +@validator(base.isBetween) +def isBetween(min, max, inclusive=False, **kwargs): + """Return a custom message for the isBetween validator.""" + def inclusive_err(eargs): + return f"{format_error_context(eargs)} {eargs.value} is not in range [{min}, {max}]." + + def exclusive_err(eargs): + return f"{format_error_context(eargs)} {eargs.value} is not between {min} and {max}." + + return inclusive_err if inclusive else exclusive_err + + +@validator(base.startsWith) +def startsWith(substr, **kwargs): + """Return a custom message for the startsWith validator.""" + return lambda eargs: f"{format_error_context(eargs)} {eargs.value} does not start with {substr}." + + +@validator(base.contains) +def contains(substr, **kwargs): + """Return a custom message for the contains validator.""" + return lambda eargs: f"{format_error_context(eargs)} {eargs.value} does not contain {substr}." + + +@validator(base.isNumber) +def isNumber(**kwargs): + """Return a custom message for the isNumber validator.""" + return lambda eargs: f"{format_error_context(eargs)} {eargs.value} is not a number." + + +@validator(base.isAlphaNumeric) +def isAlphaNumeric(**kwargs): + """Return a custom message for the isAlphaNumeric validator.""" + return lambda eargs: f"{format_error_context(eargs)} {eargs.value} is not alphanumeric." + + +@validator(base.isEmpty) +def isEmpty(start=0, end=None, **kwargs): + """Return a custom message for the isEmpty validator.""" + return lambda eargs: ( + f'{format_error_context(eargs)} {eargs.value} is not blank ' + f'between positions {start} and {end if end else len(eargs.value)}.' + ) + + +@validator(base.isNotEmpty) +def isNotEmpty(start=0, end=None, **kwargs): + """Return a custom message for the isNotEmpty validator.""" + return lambda eargs: ( + f'{format_error_context(eargs)} {str(eargs.value)} contains blanks ' + f'between positions {start} and {end if end else len(str(eargs.value))}.' + ) + + +@validator(base.isBlank) +def isBlank(**kwargs): + """Return a custom message for the isBlank validator.""" + return lambda eargs: f"{format_error_context(eargs)} {eargs.value} is not blank." + + +@validator(base.hasLength) +def hasLength(length, **kwargs): + """Return a custom message for the hasLength validator.""" + return lambda eargs: ( + f"{format_error_context(eargs)} field length " + f"is {len(eargs.value)} characters but must be {length}." + ) + + +@validator(base.hasLengthGreaterThan) +def hasLengthGreaterThan(length, inclusive=False, **kwargs): + """Return a custom message for the hasLengthGreaterThan validator.""" + return lambda eargs: ( + f"{format_error_context(eargs)} Value length {len(eargs.value)} is not greater than {length}." + ) + + +@validator(base.intHasLength) +def intHasLength(length, **kwargs): + """Return a custom message for the intHasLength validator.""" + return lambda eargs: f"{format_error_context(eargs)} {eargs.value} does not have exactly {length} digits." + + +@validator(base.isNotZero) +def isNotZero(number_of_zeros=1, **kwargs): + """Return a custom message for the isNotZero validator.""" + return lambda eargs: f"{format_error_context(eargs)} {eargs.value} is zero." + + +# custom validators, written using the previous validator functions +def dateYearIsLargerThan(year, **kwargs): + """Validate that in a monthyear combination, the year is larger than the given year.""" + _validator = base.dateYearIsLargerThan(year, **kwargs) + return make_validator( + lambda value: _validator(int(str(value)[:4])), + lambda eargs: f"{format_error_context(eargs)} Year {str(eargs.value)[:4]} must be larger than {year}.", + ) + + +def dateMonthIsValid(**kwargs): + """Validate that in a monthyear combination, the month is a valid month.""" + _validator = base.dateMonthIsValid(**kwargs) + return make_validator( + lambda val: _validator(int(str(val)[4:6])), + lambda eargs: f"{format_error_context(eargs)} {str(eargs.value)[4:6]} is not a valid month.", + ) + + +def dateDayIsValid(**kwargs): + """Validate that in a monthyearday combination, the day is a valid day.""" + _validator = base.dateDayIsValid(**kwargs) + return make_validator( + lambda value: _validator(int(str(value)[6:])), + lambda eargs: f"{format_error_context(eargs)} {str(eargs.value)[6:]} is not a valid day.", + ) + + +def quarterIsValid(**kwargs): + """Validate in a year quarter combination, the quarter is valid.""" + _validator = base.quarterIsValid(**kwargs) + return make_validator( + lambda value: _validator(int(str(value)[-1])), + lambda eargs: f"{format_error_context(eargs)} {str(eargs.value)[-1]} is not a valid quarter.", + ) + + +def validateRace(): + """Validate race.""" + return make_validator( + base.isBetween(0, 2, inclusive=True), + lambda eargs: + f"{format_error_context(eargs)} {eargs.value} is not in range [0, 2]." + ) + + +def validateHeaderUpdateIndicator(): + """Validate the header update indicator.""" + return make_validator( + base.isEqual('D'), + lambda eargs: + f"HEADER Update Indicator must be set to D instead of {eargs.value}. " + "Please review Exporting Complete Data Using FTANF in the Knowledge Center." + ) diff --git a/tdrs-backend/tdpservice/parsers/validators/category3.py b/tdrs-backend/tdpservice/parsers/validators/category3.py new file mode 100644 index 000000000..cb278e5e2 --- /dev/null +++ b/tdrs-backend/tdpservice/parsers/validators/category3.py @@ -0,0 +1,394 @@ +"""Overloaded base validators and custom postparsing validators.""" + +import datetime +import logging +from tdpservice.parsers.util import get_record_value_by_field_name +from . import base +from .util import ValidationErrorArgs, validator, make_validator, evaluate_all + +logger = logging.getLogger(__name__) + + +def format_error_context(eargs: ValidationErrorArgs): + """Format the error message for consistency across cat3 validators.""" + return f'Item {eargs.item_num} ({eargs.friendly_name})' + + +@validator(base.isEqual) +def isEqual(option, **kwargs): + """Return a custom message for the isEqual validator.""" + return lambda eargs: f'must match {option}' + + +@validator(base.isNotEqual) +def isNotEqual(option, **kwargs): + """Return a custom message for the isNotEqual validator.""" + return lambda eargs: f'must not be equal to {option}' + + +@validator(base.isOneOf) +def isOneOf(options, **kwargs): + """Return a custom message for the isOneOf validator.""" + return lambda eargs: f'must be one of {options}' + + +@validator(base.isNotOneOf) +def isNotOneOf(options, **kwargs): + """Return a custom message for the isNotOneOf validator.""" + return lambda eargs: f'must not be one of {options}' + + +@validator(base.isGreaterThan) +def isGreaterThan(option, inclusive=False, **kwargs): + """Return a custom message for the isGreaterThan validator.""" + return lambda eargs: f'must be greater than {option}' + + +@validator(base.isLessThan) +def isLessThan(option, inclusive=False, **kwargs): + """Return a custom message for the isLessThan validator.""" + return lambda eargs: f'must be less than {option}' + + +@validator(base.isBetween) +def isBetween(min, max, inclusive=False, **kwargs): + """Return a custom message for the isBetween validator.""" + return lambda eargs: f'must be between {min} and {max}' + + +@validator(base.startsWith) +def startsWith(substr, **kwargs): + """Return a custom message for the startsWith validator.""" + return lambda eargs: f'must start with {substr}' + + +@validator(base.contains) +def contains(substr, **kwargs): + """Return a custom message for the contains validator.""" + return lambda eargs: f'must contain {substr}' + + +@validator(base.isNumber) +def isNumber(**kwargs): + """Return a custom message for the isNumber validator.""" + return lambda eargs: 'must be a number' + + +@validator(base.isAlphaNumeric) +def isAlphaNumeric(**kwargs): + """Return a custom message for the isAlphaNumeric validator.""" + return lambda eargs: 'must be alphanumeric' + + +@validator(base.isEmpty) +def isEmpty(start=0, end=None, **kwargs): + """Return a custom message for the isEmpty validator.""" + return lambda eargs: 'must be empty' + + +@validator(base.isNotEmpty) +def isNotEmpty(start=0, end=None, **kwargs): + """Return a custom message for the isNotEmpty validator.""" + return lambda eargs: 'must not be empty' + + +@validator(base.isBlank) +def isBlank(**kwargs): + """Return a custom message for the isBlank validator.""" + return lambda eargs: 'must be blank' + + +@validator(base.hasLength) +def hasLength(length, **kwargs): + """Return a custom message for the hasLength validator.""" + return lambda eargs: f'must have length {length}' + + +@validator(base.hasLengthGreaterThan) +def hasLengthGreaterThan(length, inclusive=False, **kwargs): + """Return a custom message for the hasLengthGreaterThan validator.""" + return lambda eargs: f'must have length greater than {length}' + + +@validator(base.intHasLength) +def intHasLength(length, **kwargs): + """Return a custom message for the intHasLength validator.""" + return lambda eargs: f'must have length {length}' + + +@validator(base.isNotZero) +def isNotZero(number_of_zeros=1, **kwargs): + """Return a custom message for the isNotZero validator.""" + return lambda eargs: 'must not be zero' + + +def isOlderThan(min_age): + """Validate that value is larger than min_age.""" + def _validate(val): + birth_year = int(str(val)[:4]) + age = datetime.date.today().year - birth_year + _validator = base.isGreaterThan(min_age) + result = _validator(age) + return result + + return make_validator( + _validate, + lambda eargs: + f"{str(eargs.value)[:4]} must be less " + f"than or equal to {datetime.date.today().year - min_age} to meet the minimum age requirement." + ) + + +def validateSSN(): + """Validate that SSN value is not a repeating digit.""" + options = [str(i) * 9 for i in range(0, 10)] + return make_validator( + base.isNotOneOf(options), + lambda eargs: f"is in {options}." + ) + + +# compositional validators, build an error message using multiple of the above functions +def ifThenAlso(condition_field_name, condition_function, result_field_name, result_function, **kwargs): + """Return second validation if the first validator is true. + + :param condition_field: function that returns (bool, string) to represent validation state + :param condition_function: function that returns (bool, string) to represent validation state + :param result_field: function that returns (bool, string) to represent validation state + :param result_function: function that returns (bool, string) to represent validation state + """ + def if_then_validator_func(record, row_schema): + condition_value = get_record_value_by_field_name(record, condition_field_name) + condition_field = row_schema.get_field_by_name(condition_field_name) + condition_field_eargs = ValidationErrorArgs( + value=condition_value, + row_schema=row_schema, + friendly_name=condition_field.friendly_name, + item_num=condition_field.item, + ) + condition_success, msg1 = condition_function(condition_value, condition_field_eargs) + + result_value = get_record_value_by_field_name(record, result_field_name) + result_field = row_schema.get_field_by_name(result_field_name) + result_field_eargs = ValidationErrorArgs( + value=result_value, + row_schema=row_schema, + friendly_name=result_field.friendly_name, + item_num=result_field.item, + ) + result_success, msg2 = result_function(result_value, result_field_eargs) + + if not condition_success: + return (True, None, [result_field_name, condition_field_name]) # order is important + elif not result_success: + center_error = None + if condition_success: + center_error = f'{format_error_context(condition_field_eargs)} is {condition_value}' + else: + center_error = msg1 + error_message = ( + f"Since {center_error}, then {format_error_context(result_field_eargs)} " + f"{result_value} {msg2}" + ) + + return (result_success, error_message, [condition_field_name, result_field_name]) + else: + return (result_success, None, [condition_field_name, result_field_name]) + + return if_then_validator_func + + +def orValidators(validators, **kwargs): + """Return a validator that is true only if one of the validators is true.""" + is_if_result_func = kwargs.get('if_result', False) + + def _validate(value, eargs): + validator_results = evaluate_all(validators, value, eargs) + + if not any(result[0] for result in validator_results): + error_msg = f'{format_error_context(eargs)} {value} ' if not is_if_result_func else '' + error_msg += " or ".join([result[1] for result in validator_results]) + '.' + return (False, error_msg) + + return (True, None) + + return _validate + + +# custom validators +def sumIsEqual(condition_field_name, sum_fields=[]): + """Validate that the sum of the sum_fields equals the condition_field.""" + def sumIsEqualFunc(record, row_schema): + sum = 0 + for field in sum_fields: + val = get_record_value_by_field_name(record, field) + sum += 0 if val is None else val + + condition_val = get_record_value_by_field_name(record, condition_field_name) + condition_field = row_schema.get_field_by_name(condition_field_name) + fields = [condition_field_name] + fields.extend(sum_fields) + + if sum == condition_val: + return (True, None, fields) + return ( + False, + f"{row_schema.record_type}: The sum of {sum_fields} does not equal {condition_field_name} " + f"{condition_field.friendly_name} Item {condition_field.item}.", + fields + ) + + return sumIsEqualFunc + + +def sumIsLarger(fields, val): + """Validate that the sum of the fields is larger than val.""" + def sumIsLargerFunc(record, row_schema): + sum = 0 + for field in fields: + temp_val = get_record_value_by_field_name(record, field) + sum += 0 if temp_val is None else temp_val + + if sum > val: + return (True, None, fields) + + return ( + False, + f"{row_schema.record_type}: The sum of {fields} is not larger than {val}.", + fields, + ) + + return sumIsLargerFunc + + +def validate__FAM_AFF__SSN(): + """ + Validate social security number provided. + + Since item FAMILY_AFFILIATION ==2 and item CITIZENSHIP_STATUS ==1 or 2, + then item SSN != 000000000 -- 999999999. + """ + # value is instance + def validate(record, row_schema): + fam_affil_field = row_schema.get_field_by_name('FAMILY_AFFILIATION') + FAMILY_AFFILIATION = get_record_value_by_field_name(record, 'FAMILY_AFFILIATION') + fam_affil_eargs = ValidationErrorArgs( + value=FAMILY_AFFILIATION, + row_schema=row_schema, + friendly_name=fam_affil_field.friendly_name, + item_num=fam_affil_field.item, + ) + + cit_stat_field = row_schema.get_field_by_name('CITIZENSHIP_STATUS') + CITIZENSHIP_STATUS = get_record_value_by_field_name(record, 'CITIZENSHIP_STATUS') + cit_stat_eargs = ValidationErrorArgs( + value=CITIZENSHIP_STATUS, + row_schema=row_schema, + friendly_name=cit_stat_field.friendly_name, + item_num=cit_stat_field.item, + ) + + ssn_field = row_schema.get_field_by_name('SSN') + SSN = get_record_value_by_field_name(record, 'SSN') + ssn_eargs = ValidationErrorArgs( + value=SSN, + row_schema=row_schema, + friendly_name=ssn_field.friendly_name, + item_num=ssn_field.item, + ) + + if FAMILY_AFFILIATION == 2 and ( + CITIZENSHIP_STATUS == 1 or CITIZENSHIP_STATUS == 2 + ): + if SSN in [str(i) * 9 for i in range(10)]: + return ( + False, + f"{row_schema.record_type}: Since {format_error_context(fam_affil_eargs)} is 2 " + f"and {format_error_context(cit_stat_eargs)} is 1 or 2, " + f"then {format_error_context(ssn_eargs)} must not be in 000000000 -- 999999999.", + ["FAMILY_AFFILIATION", "CITIZENSHIP_STATUS", "SSN"], + ) + else: + return (True, None, ["FAMILY_AFFILIATION", "CITIZENSHIP_STATUS", "SSN"]) + else: + return (True, None, ["FAMILY_AFFILIATION", "CITIZENSHIP_STATUS", "SSN"]) + + return validate + + +def validate__WORK_ELIGIBLE_INDICATOR__HOH__AGE(): + """If WORK_ELIGIBLE_INDICATOR == 11 and AGE < 19, then RELATIONSHIP_HOH != 1.""" + # value is instance + def validate(record, row_schema): + work_elig_field = row_schema.get_field_by_name('WORK_ELIGIBLE_INDICATOR') + work_elig_eargs = ValidationErrorArgs( + value=None, + row_schema=row_schema, + friendly_name=work_elig_field.friendly_name, + item_num=work_elig_field.item, + ) + + relat_hoh_field = row_schema.get_field_by_name('RELATIONSHIP_HOH') + relat_hoh_eargs = ValidationErrorArgs( + value=None, + row_schema=row_schema, + friendly_name=relat_hoh_field.friendly_name, + item_num=relat_hoh_field.item, + ) + + dob_field = row_schema.get_field_by_name('DATE_OF_BIRTH') + age_eargs = ValidationErrorArgs( + value=None, + row_schema=row_schema, + friendly_name='Age', + item_num=dob_field.item, + ) + + false_case = ( + False, + f"{row_schema.record_type}: Since {format_error_context(work_elig_eargs)} is 11 " + f"and {format_error_context(age_eargs)} is less than 19, " + f"then {format_error_context(relat_hoh_eargs)} must not be 1.", + ['WORK_ELIGIBLE_INDICATOR', 'RELATIONSHIP_HOH', 'DATE_OF_BIRTH'] + ) + true_case = ( + True, + None, + ['WORK_ELIGIBLE_INDICATOR', 'RELATIONSHIP_HOH', 'DATE_OF_BIRTH'], + ) + try: + WORK_ELIGIBLE_INDICATOR = get_record_value_by_field_name(record, 'WORK_ELIGIBLE_INDICATOR') + RELATIONSHIP_HOH = int(get_record_value_by_field_name(record, 'RELATIONSHIP_HOH')) + DOB = get_record_value_by_field_name(record, 'DATE_OF_BIRTH') + RPT_MONTH_YEAR = get_record_value_by_field_name(record, 'RPT_MONTH_YEAR') + RPT_MONTH_YEAR += "01" + + DOB_datetime = datetime.datetime.strptime(DOB, '%Y%m%d') + RPT_MONTH_YEAR_datetime = datetime.datetime.strptime(RPT_MONTH_YEAR, '%Y%m%d') + + # age computation should use generic + AGE = (RPT_MONTH_YEAR_datetime - DOB_datetime).days / 365.25 + + if WORK_ELIGIBLE_INDICATOR == "11" and AGE < 19: + if RELATIONSHIP_HOH == 1: + return false_case + else: + return true_case + else: + return true_case + except Exception as e: + vals = { + "WORK_ELIGIBLE_INDICATOR": WORK_ELIGIBLE_INDICATOR, + "RELATIONSHIP_HOH": RELATIONSHIP_HOH, + "DOB": DOB + } + logger.debug( + "Caught exception in validator: validate__WORK_ELIGIBLE_INDICATOR__HOH__AGE. " + + f"With field values: {vals}." + ) + logger.error(f'Exception: {e}') + # Per conversation with Alex on 03/26/2024, returning the true case during exception handling to avoid + # confusing the STTs. + return true_case + + return validate diff --git a/tdrs-backend/tdpservice/parsers/validators/test/__init__.py b/tdrs-backend/tdpservice/parsers/validators/test/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tdrs-backend/tdpservice/parsers/validators/test/test_base.py b/tdrs-backend/tdpservice/parsers/validators/test/test_base.py new file mode 100644 index 000000000..8496e762c --- /dev/null +++ b/tdrs-backend/tdpservice/parsers/validators/test/test_base.py @@ -0,0 +1,285 @@ +"""Test base validators.""" + + +import pytest +from .. import base + + +@pytest.mark.parametrize('val, option, kwargs, expected', [ + (1, 1, {}, True), + (1, 2, {}, False), + (True, True, {}, True), + (True, False, {}, False), + (False, False, {}, True), + (1, True, {'cast': bool}, True), + (0, True, {'cast': bool}, False), + ('1', '1', {}, True), + ('abc', 'abc', {}, True), + ('abc', 'ABC', {}, False), + ('abc', 'xyz', {}, False), + ('123', '123', {}, True), + ('123', '321', {}, False), + ('123', 123, {'cast': int}, True), + ('123', '123', {'cast': int}, False), + (123, '123', {'cast': str}, True), + (123, '123', {'cast': bool}, False), +]) +def test_isEqual(val, kwargs, option, expected): + """Test isEqual validator.""" + _validator = base.isEqual(option, **kwargs) + assert _validator(val) == expected + + +@pytest.mark.parametrize('val, option, kwargs, expected', [ + (1, 1, {}, False), + (1, 2, {}, True), + (True, True, {}, False), + (True, False, {}, True), + (False, False, {}, False), + (1, True, {'cast': bool}, False), + (0, True, {'cast': bool}, True), + ('1', '1', {}, False), + ('abc', 'abc', {}, False), + ('abc', 'ABC', {}, True), + ('abc', 'xyz', {}, True), + ('123', '123', {}, False), + ('123', '321', {}, True), + ('123', 123, {'cast': int}, False), + ('123', '123', {'cast': int}, True), + (123, '123', {'cast': str}, False), + (123, '123', {'cast': bool}, True), +]) +def test_isNotEqual(val, option, kwargs, expected): + """Test isNotEqual validator.""" + _validator = base.isNotEqual(option, **kwargs) + assert _validator(val) == expected + + +@pytest.mark.parametrize('val, options, kwargs, expected', [ + (1, [1, 2, 3], {}, True), + (1, ['1', '2', '3'], {}, False), + (1, ['1', '2', '3'], {'cast': str}, True), + ('1', ['1', '2', '3'], {}, True), + ('1', [1, 2, 3], {}, False), + ('1', [1, 2, 3], {'cast': int}, True), +]) +def test_isOneOf(val, options, kwargs, expected): + """Test isOneOf validator.""" + _validator = base.isOneOf(options, **kwargs) + assert _validator(val) == expected + + +@pytest.mark.parametrize('val, options, kwargs, expected', [ + (1, [1, 2, 3], {}, False), + (1, ['1', '2', '3'], {}, True), + (1, ['1', '2', '3'], {'cast': str}, False), + ('1', ['1', '2', '3'], {}, False), + ('1', [1, 2, 3], {}, True), + ('1', [1, 2, 3], {'cast': int}, False), +]) +def test_isNotOneOf(val, options, kwargs, expected): + """Test isNotOneOf validator.""" + _validator = base.isNotOneOf(options, **kwargs) + assert _validator(val) == expected + + +@pytest.mark.parametrize('val, option, inclusive, kwargs, expected', [ + (1, 0, False, {}, True), + (1, 1, False, {}, False), + (1, 1, True, {}, True), + ('1', 0, False, {'cast': int}, True), + ('30', '40', False, {}, False), +]) +def test_isGreaterThan(val, option, inclusive, kwargs, expected): + """Test isGreaterThan validator.""" + _validator = base.isGreaterThan(option, inclusive, **kwargs) + assert _validator(val) == expected + + +@pytest.mark.parametrize('val, option, inclusive, kwargs, expected', [ + (1, 0, False, {}, False), + (1, 1, False, {}, False), + (1, 1, True, {}, True), + ('1', 0, False, {'cast': int}, False), + ('30', '40', False, {}, True), +]) +def test_isLessThan(val, option, inclusive, kwargs, expected): + """Test isLessThan validator.""" + _validator = base.isLessThan(option, inclusive, **kwargs) + assert _validator(val) == expected + + +@pytest.mark.parametrize('val, min, max, inclusive, kwargs, expected', [ + (10, 1, 20, False, {}, True), + (1, 1, 20, False, {}, False), + (20, 1, 20, False, {}, False), + (20, 1, 20, True, {}, True), + ('20', 1, 20, False, {'cast': int}, False), +]) +def test_isBetween(val, min, max, inclusive, kwargs, expected): + """Test isBetween validator.""" + _validator = base.isBetween(min, max, inclusive, **kwargs) + assert _validator(val) == expected + + +@pytest.mark.parametrize('val, substr, kwargs, expected', [ + ('abcdefg', 'abc', {}, True), + ('abcdefg', 'xyz', {}, False), + (12345, '12', {}, True), # don't need 'cast' +]) +def test_startsWith(val, substr, kwargs, expected): + """Test startsWith validator.""" + _validator = base.startsWith(substr, **kwargs) + assert _validator(val) == expected + + +@pytest.mark.parametrize('val, substr, kwargs, expected', [ + ('abcdefg', 'abc', {}, True), + ('abcdefg', 'efg', {}, True), + ('abcdefg', 'cd', {}, True), + ('abcdefg', 'cf', {}, False), + (10001, '10', {}, True), # don't need 'cast' +]) +def test_contains(val, substr, kwargs, expected): + """Test contains validator.""" + _validator = base.contains(substr, **kwargs) + assert _validator(val) == expected + + +@pytest.mark.parametrize('val, kwargs, expected', [ + (1, {}, True), + (10, {}, True), + ('abc', {}, False), + ('123', {}, True), # don't need 'cast' + ('123abc', {}, False), +]) +def test_isNumber(val, kwargs, expected): + """Test isNumber validator.""" + _validator = base.isNumber(**kwargs) + assert _validator(val) == expected + + +@pytest.mark.parametrize('val, kwargs, expected', [ + ('abcdefg', {}, True), + ('abc123', {}, True), + ('abc123!', {}, False), + ('abc==6', {}, False), + (10, {'cast': str}, True), +]) +def test_isAlphaNumeric(val, kwargs, expected): + """Test isAlphaNumeric validator.""" + _validator = base.isAlphaNumeric(**kwargs) + assert _validator(val) == expected + + +@pytest.mark.parametrize('val, start, end, kwargs, expected', [ + ('1000', 0, 4, {}, False), + ('1000', 1, 4, {}, False), + ('', 0, 1, {}, True), + ('', 1, 4, {}, True), + # (None, 0, 0, {}, True), # this strangely fails.... investigate + (None, 0, 10, {}, True), + (' ', 0, 4, {}, True), + ('####', 0, 4, {}, True), + ('1###', 1, 4, {}, True), + (' 1', 0, 3, {}, True), + (' 1', 0, 4, {}, False), +]) +def test_isEmpty(val, start, end, kwargs, expected): + """Test isEmpty validator.""" + _validator = base.isEmpty(start, end, **kwargs) + assert _validator(val) == expected + + +@pytest.mark.parametrize('val, start, end, kwargs, expected', [ + ('1000', 0, 4, {}, True), + ('1000', 1, 4, {}, True), + ('', 0, 1, {}, False), + ('', 1, 4, {}, False), + # (None, 0, 0, {}, False), # this strangely fails.... investigate + (None, 0, 10, {}, False), + (' ', 0, 4, {}, False), + ('####', 0, 4, {}, False), + ('1###', 1, 4, {}, False), + (' 1', 0, 3, {}, False), + (' 1', 0, 4, {}, True), +]) +def test_isNotEmpty(val, start, end, kwargs, expected): + """Test isNotEmpty validator.""" + _validator = base.isNotEmpty(start, end, **kwargs) + assert _validator(val) == expected + + +@pytest.mark.parametrize('val, kwargs, expected', [ + (' ', {}, True), + ('1000', {}, False), + ('0000', {}, False), + ('####', {}, False), + ('----', {}, False), + ('', {}, False), +]) +def test_isBlank(val, kwargs, expected): + """Test isBlank validator.""" + _validator = base.isBlank(**kwargs) + assert _validator(val) == expected + + +@pytest.mark.parametrize('val, length, kwargs, expected', [ + ('12345', 5, {}, True), + ('123456', 5, {}, False), + ([1, 2, 3], 5, {}, False), + ([1, 2, 3], 3, {}, True), +]) +def test_hasLength(val, length, kwargs, expected): + """Test hasLength validator.""" + _validator = base.hasLength(length, **kwargs) + assert _validator(val) == expected + + +@pytest.mark.parametrize('val, length, inclusive, kwargs, expected', [ + ('12345', 3, False, {}, True), + ('12345', 5, False, {}, False), + ('12345', 5, True, {}, True), + ([1, 2, 3], 5, False, {}, False), + ([1, 2, 3], 3, False, {}, False), + ([1, 2, 3], 3, True, {}, True), + ([1, 2, 3], 1, False, {}, True), +]) +def test_hasLengthGreaterThan(val, length, inclusive, kwargs, expected): + """Test hasLengthGreaterThan validator.""" + _validator = base.hasLengthGreaterThan(length, inclusive, **kwargs) + assert _validator(val) == expected + + +@pytest.mark.parametrize('val, length, kwargs, expected', [ + (1001, 5, {}, False), + (1001, 4, {}, True), + (1001, 3, {}, False), + (321, 5, {}, False), + (321, 3, {}, True), + (321, 2, {}, False), + (1000, 3, {}, False), + ('0001', 3, {}, False), + ('0001', 4, {}, True), + ('1000', 3, {}, False), + ('1000', 4, {}, True), +]) +def test_intHasLength(val, length, kwargs, expected): + """Test intHasLength validator.""" + _validator = base.intHasLength(length, **kwargs) + assert _validator(val) == expected + + +@pytest.mark.parametrize('val, number_of_zeros, kwargs, expected', [ + ('000', 3, {}, False), + ('0 0', 3, {}, True), + ('100', 3, {}, True), + ('123', 3, {}, True), + ('000', 4, {}, True), + (000, 3, {'cast': str}, True), + (000, 1, {'cast': str}, False), +]) +def test_isNotZero(val, number_of_zeros, kwargs, expected): + """Test isNotZero validator.""" + _validator = base.isNotZero(number_of_zeros, **kwargs) + assert _validator(val) == expected diff --git a/tdrs-backend/tdpservice/parsers/validators/test/test_category1.py b/tdrs-backend/tdpservice/parsers/validators/test/test_category1.py new file mode 100644 index 000000000..dbf2b08cc --- /dev/null +++ b/tdrs-backend/tdpservice/parsers/validators/test/test_category1.py @@ -0,0 +1,90 @@ +"""Test category1 validators.""" + + +import pytest +from .. import category1 +from ..util import ValidationErrorArgs +from ...row_schema import RowSchema + +test_schema = RowSchema( + record_type="Test", + document=None, + preparsing_validators=[], + postparsing_validators=[], + fields=[], +) + + +def _make_eargs(line): + return ValidationErrorArgs( + value=line, + row_schema=test_schema, + friendly_name='test field', + item_num='1' + ) + + +def _validate_and_assert(validator, line, exp_result, exp_message): + result, msg = validator(line, _make_eargs(line)) + assert result == exp_result + assert msg == exp_message + + +@pytest.mark.parametrize('line, kwargs, exp_result, exp_message', [ + ('asdfasdf', {}, True, None), + ('00000000', {}, True, None), + ('########', {}, False, 'Test Item 1 (test field): ######## contains blanks between positions 0 and 8.'), + (' ', {}, False, 'Test Item 1 (test field): contains blanks between positions 0 and 8.'), +]) +def test_recordIsNotEmpty(line, kwargs, exp_result, exp_message): + """Test recordIsNotEmpty error messages.""" + _validator = category1.recordIsNotEmpty(**kwargs) + _validate_and_assert(_validator, line, exp_result, exp_message) + + +@pytest.mark.parametrize('line, length, kwargs, exp_result, exp_message', [ + ('1234', 4, {}, True, None), + ('12345', 4, {}, False, 'Test: record length is 5 characters but must be 4.'), + ('123', 4, {}, False, 'Test: record length is 3 characters but must be 4.'), +]) +def test_recordHasLength(line, length, kwargs, exp_result, exp_message): + """Test recordHasLength error messages.""" + _validator = category1.recordHasLength(length, **kwargs) + _validate_and_assert(_validator, line, exp_result, exp_message) + + +@pytest.mark.parametrize('line, min, max, kwargs, exp_result, exp_message', [ + ('1234', 2, 6, {}, True, None), + ('1234', 2, 4, {}, True, None), + ('1234', 4, 6, {}, True, None), + ('1234', 1, 2, {}, False, 'Test: record length of 4 characters is not in the range [1, 2].'), + ('1234', 6, 8, {}, False, 'Test: record length of 4 characters is not in the range [6, 8].'), +]) +def test_recordHasLengthBetween(line, min, max, kwargs, exp_result, exp_message): + """Test recordHasLengthBetween error messages.""" + _validator = category1.recordHasLengthBetween(min, max, **kwargs) + _validate_and_assert(_validator, line, exp_result, exp_message) + + +@pytest.mark.parametrize('line, substr, kwargs, exp_result, exp_message', [ + ('12345', '12', {}, True, None), + ('ABC123', 'ABC', {}, True, None), + ('ABC123', 'abc', {}, False, 'ABC123 must start with abc.'), + ('12345', 'abc', {}, False, '12345 must start with abc.'), +]) +def test_recordStartsWith(line, substr, kwargs, exp_result, exp_message): + """Test recordStartsWith error messages.""" + _validator = category1.recordStartsWith(substr, **kwargs) + _validate_and_assert(_validator, line, exp_result, exp_message) + + +@pytest.mark.parametrize('line, start, end, kwargs, exp_result, exp_message', [ + ('1234', 1, 3, {}, True, None), + ('1004', 1, 3, {}, True, None), + ('1 4', 1, 3, {}, False, 'Test: Case number 1 4 cannot contain blanks.'), + ('1##4', 1, 3, {}, False, 'Test: Case number 1##4 cannot contain blanks.'), +]) +def test_caseNumberNotEmpty(line, start, end, kwargs, exp_result, exp_message): + """Test caseNumberNotEmpty error messages.""" + _validator = category1.caseNumberNotEmpty(start, end, **kwargs) + _validate_and_assert(_validator, line, exp_result, exp_message) diff --git a/tdrs-backend/tdpservice/parsers/validators/test/test_category2.py b/tdrs-backend/tdpservice/parsers/validators/test/test_category2.py new file mode 100644 index 000000000..653d1434e --- /dev/null +++ b/tdrs-backend/tdpservice/parsers/validators/test/test_category2.py @@ -0,0 +1,264 @@ +"""Test category2 validators.""" + + +import pytest +from .. import category2 +from ..util import ValidationErrorArgs +from ...row_schema import RowSchema + + +test_schema = RowSchema( + record_type="Test", + document=None, + preparsing_validators=[], + postparsing_validators=[], + fields=[], +) + + +def _make_eargs(val): + return ValidationErrorArgs( + value=val, + row_schema=test_schema, + friendly_name='test field', + item_num='1' + ) + + +def _validate_and_assert(validator, val, exp_result, exp_message): + result, msg = validator(val, _make_eargs(val)) + assert result == exp_result + assert msg == exp_message + + +@pytest.mark.parametrize('val, option, kwargs, exp_result, exp_message', [ + (10, 10, {}, True, None), + (1, 10, {}, False, 'Test Item 1 (test field): 1 does not match 10.'), +]) +def test_isEqual(val, option, kwargs, exp_result, exp_message): + """Test isEqual validator error messages.""" + _validator = category2.isEqual(option, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, option, kwargs, exp_result, exp_message', [ + (1, 10, {}, True, None), + (10, 10, {}, False, 'Test Item 1 (test field): 10 matches 10.'), +]) +def test_isNotEqual(val, option, kwargs, exp_result, exp_message): + """Test isNotEqual validator error messages.""" + _validator = category2.isNotEqual(option, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, options, kwargs, exp_result, exp_message', [ + (1, [1, 2, 3], {}, True, None), + (1, [4, 5, 6], {}, False, 'Test Item 1 (test field): 1 is not in [4, 5, 6].'), +]) +def test_isOneOf(val, options, kwargs, exp_result, exp_message): + """Test isOneOf validator error messages.""" + _validator = category2.isOneOf(options, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, options, kwargs, exp_result, exp_message', [ + (1, [4, 5, 6], {}, True, None), + (1, [1, 2, 3], {}, False, 'Test Item 1 (test field): 1 is in [1, 2, 3].'), +]) +def test_isNotOneOf(val, options, kwargs, exp_result, exp_message): + """Test isNotOneOf validator error messages.""" + _validator = category2.isNotOneOf(options, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, option, inclusive, kwargs, exp_result, exp_message', [ + (10, 5, True, {}, True, None), + (10, 20, True, {}, False, 'Test Item 1 (test field): 10 is not larger than 20.'), + (10, 10, False, {}, False, 'Test Item 1 (test field): 10 is not larger than 10.'), +]) +def test_isGreaterThan(val, option, inclusive, kwargs, exp_result, exp_message): + """Test isGreaterThan validator error messages.""" + _validator = category2.isGreaterThan(option, inclusive, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, option, inclusive, kwargs, exp_result, exp_message', [ + (5, 10, True, {}, True, None), + (5, 3, True, {}, False, 'Test Item 1 (test field): 5 is not smaller than 3.'), + (5, 5, False, {}, False, 'Test Item 1 (test field): 5 is not smaller than 5.'), +]) +def test_isLessThan(val, option, inclusive, kwargs, exp_result, exp_message): + """Test isLessThan validator error messages.""" + _validator = category2.isLessThan(option, inclusive, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, min, max, inclusive, kwargs, exp_result, exp_message', [ + (5, 1, 10, True, {}, True, None), + (20, 1, 10, True, {}, False, 'Test Item 1 (test field): 20 is not in range [1, 10].'), + (5, 1, 10, False, {}, True, None), + (20, 1, 10, False, {}, False, 'Test Item 1 (test field): 20 is not between 1 and 10.'), +]) +def test_isBetween(val, min, max, inclusive, kwargs, exp_result, exp_message): + """Test isBetween validator error messages.""" + _validator = category2.isBetween(min, max, inclusive, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, substr, kwargs, exp_result, exp_message', [ + ('abcdef', 'abc', {}, True, None), + ('abcdef', 'xyz', {}, False, 'Test Item 1 (test field): abcdef does not start with xyz.') +]) +def test_startsWith(val, substr, kwargs, exp_result, exp_message): + """Test startsWith validator error messages.""" + _validator = category2.startsWith(substr, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, substr, kwargs, exp_result, exp_message', [ + ('abc123', 'c1', {}, True, None), + ('abc123', 'xy', {}, False, 'Test Item 1 (test field): abc123 does not contain xy.'), +]) +def test_contains(val, substr, kwargs, exp_result, exp_message): + """Test contains validator error messages.""" + _validator = category2.contains(substr, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, kwargs, exp_result, exp_message', [ + (1001, {}, True, None), + ('ABC', {}, False, 'Test Item 1 (test field): ABC is not a number.'), +]) +def test_isNumber(val, kwargs, exp_result, exp_message): + """Test isNumber validator error messages.""" + _validator = category2.isNumber(**kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, kwargs, exp_result, exp_message', [ + ('F*&k', {}, False, 'Test Item 1 (test field): F*&k is not alphanumeric.'), + ('Fork', {}, True, None), +]) +def test_isAlphaNumeric(val, kwargs, exp_result, exp_message): + """Test isAlphaNumeric validator error messages.""" + _validator = category2.isAlphaNumeric(**kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, start, end, kwargs, exp_result, exp_message', [ + (' ', 0, 4, {}, True, None), + ('1001', 0, 4, {}, False, 'Test Item 1 (test field): 1001 is not blank between positions 0 and 4.'), +]) +def test_isEmpty(val, start, end, kwargs, exp_result, exp_message): + """Test isEmpty validator error messages.""" + _validator = category2.isEmpty(start, end, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, start, end, kwargs, exp_result, exp_message', [ + ('1001', 0, 4, {}, True, None), + (' ', 0, 4, {}, False, 'Test Item 1 (test field): contains blanks between positions 0 and 4.'), +]) +def test_isNotEmpty(val, start, end, kwargs, exp_result, exp_message): + """Test isNotEmpty validator error messages.""" + _validator = category2.isNotEmpty(start, end, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, kwargs, exp_result, exp_message', [ + (' ', {}, True, None), + ('0000', {}, False, 'Test Item 1 (test field): 0000 is not blank.'), +]) +def test_isBlank(val, kwargs, exp_result, exp_message): + """Test isBlank validator error messages.""" + _validator = category2.isBlank(**kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, length, kwargs, exp_result, exp_message', [ + ('123', 3, {}, True, None), + ('123', 4, {}, False, 'Test Item 1 (test field): field length is 3 characters but must be 4.'), +]) +def test_hasLength(val, length, kwargs, exp_result, exp_message): + """Test hasLength validator error messages.""" + _validator = category2.hasLength(length, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, length, inclusive, kwargs, exp_result, exp_message', [ + ('123', 3, True, {}, True, None), + ('123', 3, False, {}, False, 'Test Item 1 (test field): Value length 3 is not greater than 3.'), +]) +def test_hasLengthGreaterThan(val, length, inclusive, kwargs, exp_result, exp_message): + """Test hasLengthGreaterThan validator error messages.""" + _validator = category2.hasLengthGreaterThan(length, inclusive, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, length, kwargs, exp_result, exp_message', [ + (101, 3, {}, True, None), + (101, 2, {}, False, 'Test Item 1 (test field): 101 does not have exactly 2 digits.'), +]) +def test_intHasLength(val, length, kwargs, exp_result, exp_message): + """Test intHasLength validator error messages.""" + _validator = category2.intHasLength(length, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, number_of_zeros, kwargs, exp_result, exp_message', [ + ('111', 3, {}, True, None), + ('000', 3, {}, False, 'Test Item 1 (test field): 000 is zero.'), +]) +def test_isNotZero(val, number_of_zeros, kwargs, exp_result, exp_message): + """Test isNotZero validator error messages.""" + _validator = category2.isNotZero(number_of_zeros, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, year, kwargs, exp_result, exp_message', [ + ('202201', 2020, {}, True, None), + ('201001', 2020, {}, False, 'Test Item 1 (test field): Year 2010 must be larger than 2020.'), + ('202001', 2020, {}, False, 'Test Item 1 (test field): Year 2020 must be larger than 2020.'), +]) +def test_dateYearIsLargerThan(val, year, kwargs, exp_result, exp_message): + """Test dateYearIsLargerThan validator error messages.""" + _validator = category2.dateYearIsLargerThan(year, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, kwargs, exp_result, exp_message', [ + ('202010', {}, True, None), + ('202001', {}, True, None), + ('202012', {}, True, None), + ('202015', {}, False, 'Test Item 1 (test field): 15 is not a valid month.'), +]) +def test_dateMonthIsValid(val, kwargs, exp_result, exp_message): + """Test dateMonthIsValid validator error messages.""" + _validator = category2.dateMonthIsValid(**kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, kwargs, exp_result, exp_message', [ + ('20201001', {}, True, None), + ('20201031', {}, True, None), + ('20201032', {}, False, 'Test Item 1 (test field): 32 is not a valid day.'), + ('20201050', {}, False, 'Test Item 1 (test field): 50 is not a valid day.'), +]) +def test_dateDayIsValid(val, kwargs, exp_result, exp_message): + """Test dateDayIsValid validator error messages.""" + _validator = category2.dateDayIsValid(**kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, kwargs, exp_result, exp_message', [ + ('20201', {}, True, None), + ('20204', {}, True, None), + ('20200', {}, False, 'Test Item 1 (test field): 0 is not a valid quarter.'), + ('20205', {}, False, 'Test Item 1 (test field): 5 is not a valid quarter.'), + ('20207', {}, False, 'Test Item 1 (test field): 7 is not a valid quarter.'), + +]) +def test_quarterIsValid(val, kwargs, exp_result, exp_message): + """Test quarterIsValid validator error messages.""" + _validator = category2.quarterIsValid(**kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) diff --git a/tdrs-backend/tdpservice/parsers/validators/test/test_category3.py b/tdrs-backend/tdpservice/parsers/validators/test/test_category3.py new file mode 100644 index 000000000..40090e8e6 --- /dev/null +++ b/tdrs-backend/tdpservice/parsers/validators/test/test_category3.py @@ -0,0 +1,598 @@ +"""Test category3 validators.""" + + +import pytest +import datetime +from .. import category3 +from ..util import ValidationErrorArgs +from ...row_schema import RowSchema +from ...fields import Field + +# export all error messages to file + +test_schema = RowSchema( + record_type="Test", + document=None, + preparsing_validators=[], + postparsing_validators=[], + fields=[], +) + + +def _make_eargs(val): + return ValidationErrorArgs( + value=val, + row_schema=test_schema, + friendly_name='test field', + item_num='1' + ) + + +def _validate_and_assert(validator, val, exp_result, exp_message): + result, msg = validator(val, _make_eargs(val)) + print(f'result: {result}; msg: {msg}') + assert result == exp_result + assert msg == exp_message + + +@pytest.mark.parametrize('val, option, kwargs, exp_result, exp_message', [ + (10, 10, {}, True, None), + (1, 10, {}, False, 'must match 10'), +]) +def test_isEqual(val, option, kwargs, exp_result, exp_message): + """Test isEqual validator error messages.""" + _validator = category3.isEqual(option, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, option, kwargs, exp_result, exp_message', [ + (1, 10, {}, True, None), + (10, 10, {}, False, 'must not be equal to 10'), +]) +def test_isNotEqual(val, option, kwargs, exp_result, exp_message): + """Test isNotEqual validator error messages.""" + _validator = category3.isNotEqual(option, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, options, kwargs, exp_result, exp_message', [ + (1, [1, 2, 3], {}, True, None), + (1, [4, 5, 6], {}, False, 'must be one of [4, 5, 6]'), +]) +def test_isOneOf(val, options, kwargs, exp_result, exp_message): + """Test isOneOf validator error messages.""" + _validator = category3.isOneOf(options, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, options, kwargs, exp_result, exp_message', [ + (1, [4, 5, 6], {}, True, None), + (1, [1, 2, 3], {}, False, 'must not be one of [1, 2, 3]'), +]) +def test_isNotOneOf(val, options, kwargs, exp_result, exp_message): + """Test isNotOneOf validator error messages.""" + _validator = category3.isNotOneOf(options, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, option, inclusive, kwargs, exp_result, exp_message', [ + (10, 5, True, {}, True, None), + (10, 20, True, {}, False, 'must be greater than 20'), + (10, 10, False, {}, False, 'must be greater than 10'), +]) +def test_isGreaterThan(val, option, inclusive, kwargs, exp_result, exp_message): + """Test isGreaterThan validator error messages.""" + _validator = category3.isGreaterThan(option, inclusive, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, option, inclusive, kwargs, exp_result, exp_message', [ + (5, 10, True, {}, True, None), + (5, 3, True, {}, False, 'must be less than 3'), + (5, 5, False, {}, False, 'must be less than 5'), +]) +def test_isLessThan(val, option, inclusive, kwargs, exp_result, exp_message): + """Test isLessThan validator error messages.""" + _validator = category3.isLessThan(option, inclusive, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, min, max, inclusive, kwargs, exp_result, exp_message', [ + (5, 1, 10, True, {}, True, None), + (20, 1, 10, True, {}, False, 'must be between 1 and 10'), + (5, 1, 10, False, {}, True, None), + (20, 1, 10, False, {}, False, 'must be between 1 and 10'), +]) +def test_isBetween(val, min, max, inclusive, kwargs, exp_result, exp_message): + """Test isBetween validator error messages.""" + _validator = category3.isBetween(min, max, inclusive, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, substr, kwargs, exp_result, exp_message', [ + ('abcdef', 'abc', {}, True, None), + ('abcdef', 'xyz', {}, False, 'must start with xyz') +]) +def test_startsWith(val, substr, kwargs, exp_result, exp_message): + """Test startsWith validator error messages.""" + _validator = category3.startsWith(substr, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, substr, kwargs, exp_result, exp_message', [ + ('abc123', 'c1', {}, True, None), + ('abc123', 'xy', {}, False, 'must contain xy'), +]) +def test_contains(val, substr, kwargs, exp_result, exp_message): + """Test contains validator error messages.""" + _validator = category3.contains(substr, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, kwargs, exp_result, exp_message', [ + (1001, {}, True, None), + ('ABC', {}, False, 'must be a number'), +]) +def test_isNumber(val, kwargs, exp_result, exp_message): + """Test isNumber validator error messages.""" + _validator = category3.isNumber(**kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, kwargs, exp_result, exp_message', [ + ('F*&k', {}, False, 'must be alphanumeric'), + ('Fork', {}, True, None), +]) +def test_isAlphaNumeric(val, kwargs, exp_result, exp_message): + """Test isAlphaNumeric validator error messages.""" + _validator = category3.isAlphaNumeric(**kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, start, end, kwargs, exp_result, exp_message', [ + (' ', 0, 4, {}, True, None), + ('1001', 0, 4, {}, False, 'must be empty'), +]) +def test_isEmpty(val, start, end, kwargs, exp_result, exp_message): + """Test isEmpty validator error messages.""" + _validator = category3.isEmpty(start, end, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, start, end, kwargs, exp_result, exp_message', [ + ('1001', 0, 4, {}, True, None), + (' ', 0, 4, {}, False, 'must not be empty'), +]) +def test_isNotEmpty(val, start, end, kwargs, exp_result, exp_message): + """Test isNotEmpty validator error messages.""" + _validator = category3.isNotEmpty(start, end, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, kwargs, exp_result, exp_message', [ + (' ', {}, True, None), + ('0000', {}, False, 'must be blank'), +]) +def test_isBlank(val, kwargs, exp_result, exp_message): + """Test isBlank validator error messages.""" + _validator = category3.isBlank(**kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, length, kwargs, exp_result, exp_message', [ + ('123', 3, {}, True, None), + ('123', 4, {}, False, 'must have length 4'), +]) +def test_hasLength(val, length, kwargs, exp_result, exp_message): + """Test hasLength validator error messages.""" + _validator = category3.hasLength(length, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, length, inclusive, kwargs, exp_result, exp_message', [ + ('123', 3, True, {}, True, None), + ('123', 3, False, {}, False, 'must have length greater than 3'), +]) +def test_hasLengthGreaterThan(val, length, inclusive, kwargs, exp_result, exp_message): + """Test hasLengthGreaterThan validator error messages.""" + _validator = category3.hasLengthGreaterThan(length, inclusive, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, length, kwargs, exp_result, exp_message', [ + (101, 3, {}, True, None), + (101, 2, {}, False, 'must have length 2'), +]) +def test_intHasLength(val, length, kwargs, exp_result, exp_message): + """Test intHasLength validator error messages.""" + _validator = category3.intHasLength(length, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, number_of_zeros, kwargs, exp_result, exp_message', [ + ('111', 3, {}, True, None), + ('000', 3, {}, False, 'must not be zero'), +]) +def test_isNotZero(val, number_of_zeros, kwargs, exp_result, exp_message): + """Test isNotZero validator error messages.""" + _validator = category3.isNotZero(number_of_zeros, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, min_age, kwargs, exp_result, exp_message', [ + ('199510', 18, {}, True, None), + ( + f'{datetime.date.today().year - 18}01', 18, {}, False, + '2006 must be less than or equal to 2006 to meet the minimum age requirement.' + ), + ( + '202010', 18, {}, False, + '2020 must be less than or equal to 2006 to meet the minimum age requirement.' + ), +]) +def test_isOlderThan(val, min_age, kwargs, exp_result, exp_message): + """Test isOlderThan validator error messages.""" + _validator = category3.isOlderThan(min_age, **kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('val, kwargs, exp_result, exp_message', [ + ('123456789', {}, True, None), + ('987654321', {}, True, None), + ( + '111111111', {}, False, + "is in ['000000000', '111111111', '222222222', '333333333', " + "'444444444', '555555555', '666666666', '777777777', '888888888', '999999999']." + ), + ( + '999999999', {}, False, + "is in ['000000000', '111111111', '222222222', '333333333', " + "'444444444', '555555555', '666666666', '777777777', '888888888', '999999999']." + ), + ( + '888888888', {}, False, + "is in ['000000000', '111111111', '222222222', '333333333', " + "'444444444', '555555555', '666666666', '777777777', '888888888', '999999999']." + ), +]) +def test_validateSSN(val, kwargs, exp_result, exp_message): + """Test validateSSN validator error messages.""" + _validator = category3.validateSSN(**kwargs) + _validate_and_assert(_validator, val, exp_result, exp_message) + + +@pytest.mark.parametrize('condition_val, result_val, exp_result, exp_message, exp_fields', [ + (1, 1, True, None, ['TestField3', 'TestField1']), # condition fails, valid + (10, 1, True, None, ['TestField1', 'TestField3']), # condition pass, result pass + # condition pass, result fail + ( + 10, 20, False, + 'Since Item 1 (test1) is 10, then Item 3 (test3) 20 must be less than 10', + ['TestField1', 'TestField3'] + ), +]) +def test_ifThenAlso(condition_val, result_val, exp_result, exp_message, exp_fields): + """Test ifThenAlso validator error messages.""" + schema = RowSchema( + fields=[ + Field( + item='1', + name='TestField1', + friendly_name='test1', + type='number', + startIndex=0, + endIndex=1 + ), + Field( + item='2', + name='TestField2', + friendly_name='test2', + type='number', + startIndex=1, + endIndex=2 + ), + Field( + item='3', + name='TestField3', + friendly_name='test3', + type='number', + startIndex=2, + endIndex=3 + ) + ] + ) + instance = { + 'TestField1': condition_val, + 'TestField2': 1, + 'TestField3': result_val, + } + _validator = category3.ifThenAlso( + condition_field_name='TestField1', + condition_function=category3.isEqual(10), + result_field_name='TestField3', + result_function=category3.isLessThan(10) + ) + is_valid, error_msg, fields = _validator(instance, schema) + assert is_valid == exp_result + assert error_msg == exp_message + assert fields == exp_fields + + +@pytest.mark.parametrize('condition_val, result_val, exp_result, exp_message, exp_fields', [ + (1, 1, True, None, ['TestField3', 'TestField1']), # condition fails, valid + (10, 1, True, None, ['TestField1', 'TestField3']), # condition pass, result pass + (10, 110, True, None, ['TestField1', 'TestField3']), + # condition pass, result fail + ( + 10, 20, False, + 'Since Item 1 (test1) is 10, then Item 3 (test3) 20 must be less than 10 or must be greater than 100.', + ['TestField1', 'TestField3'] + ), +]) +def test_ifThenAlso_or(condition_val, result_val, exp_result, exp_message, exp_fields): + """Test ifThenAlso validator error messages.""" + schema = RowSchema( + fields=[ + Field( + item='1', + name='TestField1', + friendly_name='test1', + type='number', + startIndex=0, + endIndex=1 + ), + Field( + item='2', + name='TestField2', + friendly_name='test2', + type='number', + startIndex=1, + endIndex=2 + ), + Field( + item='3', + name='TestField3', + friendly_name='test3', + type='number', + startIndex=2, + endIndex=3 + ) + ] + ) + instance = { + 'TestField1': condition_val, + 'TestField2': 1, + 'TestField3': result_val, + } + _validator = category3.ifThenAlso( + condition_field_name='TestField1', + condition_function=category3.isEqual(10), + result_field_name='TestField3', + result_function=category3.orValidators([ + category3.isLessThan(10), + category3.isGreaterThan(100) + ], if_result=True) + ) + is_valid, error_msg, fields = _validator(instance, schema) + assert is_valid == exp_result + assert error_msg == exp_message + assert fields == exp_fields + + +@pytest.mark.parametrize('val, exp_result, exp_message', [ + (10, True, None), + (3, True, None), + (100, False, 'Item 1 (TestField1) 100 must match 10 or must be less than 5.'), +]) +def test_orValidators(val, exp_result, exp_message): + """Test orValidators error messages.""" + _validator = category3.orValidators([ + category3.isEqual(10), + category3.isLessThan(5) + ]) + + eargs = ValidationErrorArgs( + value=val, + row_schema=RowSchema(), + friendly_name='TestField1', + item_num='1' + ) + + is_valid, error_msg = _validator(val, eargs) + assert is_valid == exp_result + assert error_msg == exp_message + + +def test_sumIsEqual(): + """Test sumIsEqual postparsing validator.""" + schema = RowSchema( + fields=[ + Field( + item='1', + name='TestField1', + friendly_name='test1', + type='number', + startIndex=0, + endIndex=1 + ), + Field( + item='2', + name='TestField2', + friendly_name='test2', + type='number', + startIndex=1, + endIndex=2 + ), + Field( + item='3', + name='TestField3', + friendly_name='test3', + type='number', + startIndex=2, + endIndex=3 + ) + ] + ) + instance = { + 'TestField1': 2, + 'TestField2': 1, + 'TestField3': 9, + } + result = category3.sumIsEqual('TestField2', ['TestField1', 'TestField3'])(instance, schema) + assert result == ( + False, + "T1: The sum of ['TestField1', 'TestField3'] does not equal TestField2 test2 Item 2.", + ['TestField2', 'TestField1', 'TestField3'] + ) + instance['TestField2'] = 11 + result = category3.sumIsEqual('TestField2', ['TestField1', 'TestField3'])(instance, schema) + assert result == (True, None, ['TestField2', 'TestField1', 'TestField3']) + + +def test_sumIsLarger(): + """Test sumIsLarger postparsing validator.""" + schema = RowSchema( + fields=[ + Field( + item='1', + name='TestField1', + friendly_name='test1', + type='number', + startIndex=0, + endIndex=1 + ), + Field( + item='2', + name='TestField2', + friendly_name='test2', + type='number', + startIndex=1, + endIndex=2 + ), + Field( + item='3', + name='TestField3', + friendly_name='test3', + type='number', + startIndex=2, + endIndex=3 + ) + ] + ) + instance = { + 'TestField1': 2, + 'TestField2': 1, + 'TestField3': 5, + } + result = category3.sumIsLarger(['TestField1', 'TestField3'], 10)(instance, schema) + assert result == ( + False, + "T1: The sum of ['TestField1', 'TestField3'] is not larger than 10.", + ['TestField1', 'TestField3'] + ) + instance['TestField3'] = 9 + result = category3.sumIsLarger(['TestField1', 'TestField3'], 10)(instance, schema) + assert result == (True, None, ['TestField1', 'TestField3']) + + +def test_validate__FAM_AFF__SSN(): + """Test `validate__FAM_AFF__SSN` gives a valid result.""" + schema = RowSchema( + fields=[ + Field( + item='1', + name='FAMILY_AFFILIATION', + friendly_name='family affiliation', + type='number', + startIndex=0, + endIndex=1 + ), + Field( + item='2', + name='CITIZENSHIP_STATUS', + friendly_name='citizenship status', + type='number', + startIndex=1, + endIndex=2 + ), + Field( + item='3', + name='SSN', + friendly_name='social security number', + type='number', + startIndex=2, + endIndex=11 + ) + ] + ) + instance = { + 'FAMILY_AFFILIATION': 2, + 'CITIZENSHIP_STATUS': 1, + 'SSN': '0'*9, + } + result = category3.validate__FAM_AFF__SSN()(instance, schema) + assert result == ( + False, + 'T1: Since Item 1 (family affiliation) is 2 and Item 2 (citizenship status) is 1 or 2, ' + 'then Item 3 (social security number) must not be in 000000000 -- 999999999.', + ['FAMILY_AFFILIATION', 'CITIZENSHIP_STATUS', 'SSN'] + ) + instance['SSN'] = '1'*8 + '0' + result = category3.validate__FAM_AFF__SSN()(instance, schema) + assert result == (True, None, ['FAMILY_AFFILIATION', 'CITIZENSHIP_STATUS', 'SSN']) + + +def test_validate__WORK_ELIGIBLE_INDICATOR__HOH__AGE(): + """Test `validate__WORK_ELIGIBLE_INDICATOR__HOH__AGE` gives a valid result.""" + schema = RowSchema( + fields=[ + Field( + item='1', + name='WORK_ELIGIBLE_INDICATOR', + friendly_name='work eligible indicator', + type='string', + startIndex=0, + endIndex=1 + ), + Field( + item='2', + name='RELATIONSHIP_HOH', + friendly_name='relationship w/ head of household', + type='string', + startIndex=1, + endIndex=2 + ), + Field( + item='3', + name='DATE_OF_BIRTH', + friendly_name='date of birth', + type='string', + startIndex=2, + endIndex=10 + ), + Field( + item='4', + name='RPT_MONTH_YEAR', + friendly_name='report month/year', + type='string', + startIndex=10, + endIndex=16 + ) + ] + ) + instance = { + 'WORK_ELIGIBLE_INDICATOR': '11', + 'RELATIONSHIP_HOH': '1', + 'DATE_OF_BIRTH': '20200101', + 'RPT_MONTH_YEAR': '202010', + } + result = category3.validate__WORK_ELIGIBLE_INDICATOR__HOH__AGE()(instance, schema) + assert result == ( + False, + 'T1: Since Item 1 (work eligible indicator) is 11 and Item 3 (Age) is less than 19, ' + 'then Item 2 (relationship w/ head of household) must not be 1.', + ['WORK_ELIGIBLE_INDICATOR', 'RELATIONSHIP_HOH', 'DATE_OF_BIRTH'] + ) + instance['DATE_OF_BIRTH'] = '19950101' + result = category3.validate__WORK_ELIGIBLE_INDICATOR__HOH__AGE()(instance, schema) + assert result == (True, None, ['WORK_ELIGIBLE_INDICATOR', 'RELATIONSHIP_HOH', 'DATE_OF_BIRTH']) diff --git a/tdrs-backend/tdpservice/parsers/validators/util.py b/tdrs-backend/tdpservice/parsers/validators/util.py new file mode 100644 index 000000000..2ded171ff --- /dev/null +++ b/tdrs-backend/tdpservice/parsers/validators/util.py @@ -0,0 +1,108 @@ +"""Validation helper functions and data classes.""" + + +import functools +import logging +from dataclasses import dataclass +from typing import Any + +logger = logging.getLogger(__name__) + + +def make_validator(validator_func, error_func): + """ + Return a function accepting a value input and returning (bool, string) to represent validation state. + + @param validator_func: a function accepting a val and returning a bool + @param error_func: a function accepting a ValidationErrorArguments obj and returning a string + @return: a function returning (True, None) for success or (False, string) for failure, + with the string representing the error message + """ + def validator(value, eargs): + try: + if validator_func(value): + return (True, None) + except Exception: + logger.exception("Caught exception in validator.") + return (False, error_func(eargs)) + + return validator + + +# decorator helper +# outer function wraps the decorator to handle arguments to the decorator itself +def validator(baseValidator): + """ + Wrap error generation func to create a validator with baseValidator. + + @param baseValidator: a function from parsers.validators.base + @param errorFunc: a function returning an error generator for make_validator + @return: make_validator with the results of baseValidator and errorFunc both evaluated + """ + # inner decorator wraps the given function and returns a function + # that gives us our final make_validator + def _decorator(errorFunc): + @functools.wraps(errorFunc) + def _validator(*args, **kwargs): + validator_func = baseValidator(*args, **kwargs) + error_func = errorFunc(*args, **kwargs) + return make_validator(validator_func, error_func) + return _validator + return _decorator + + +def value_is_empty(value, length, extra_vals={}): + """Handle 'empty' values as field inputs.""" + # TODO: have to build mixed type handling for value + empty_values = { + '', + ' '*length, # ' ' + '#'*length, # '#####' + '_'*length, # '_____' + } + + empty_values = empty_values.union(extra_vals) + + return value is None or value in empty_values + + +def _is_empty(value, start, end): + end = end if end else len(str(value)) + vlen = end - start + subv = str(value)[start:end] + return value_is_empty(subv, vlen) or len(subv) < vlen + + +def _is_all_zeros(value, start, end): + """Check if a value is all zeros.""" + return value[start:end] == "0" * (end - start) + + +def evaluate_all(validators, value, eargs): + """Evaluate all validators in the list and compose the result tuples in an array.""" + return [ + validator(value, eargs) + for validator in validators + ] + + +def is_quiet_preparser_errors(min_length, empty_from=61, empty_to=101): + """Return a function that checks if the length is valid and if the value is empty.""" + def return_value(value): + is_length_valid = len(value) >= min_length + is_empty = value_is_empty( + value[empty_from:empty_to], + len(value[empty_from:empty_to]) + ) + return not (is_length_valid and not is_empty and not _is_all_zeros(value, empty_from, empty_to)) + return return_value + + +@dataclass +class ValidationErrorArgs: + """Dataclass for args to `make_validator` `error_func`s.""" + + value: Any + row_schema: object # RowSchema causes circular import + friendly_name: str + item_num: str diff --git a/tdrs-backend/tdpservice/scheduling/management/db_backup.py b/tdrs-backend/tdpservice/scheduling/management/db_backup.py index 2ee42c14a..11beceaed 100644 --- a/tdrs-backend/tdpservice/scheduling/management/db_backup.py +++ b/tdrs-backend/tdpservice/scheduling/management/db_backup.py @@ -57,28 +57,16 @@ def get_system_values(): sys_values['S3_SECRET_ACCESS_KEY'] = sys_values['S3_CREDENTIALS']['secret_access_key'] sys_values['S3_BUCKET'] = sys_values['S3_CREDENTIALS']['bucket'] sys_values['S3_REGION'] = sys_values['S3_CREDENTIALS']['region'] - sys_values['DATABASE_URI'] = OS_ENV['DATABASE_URL'] + # Set AWS credentials in env, Boto3 uses the env variables for connection os.environ["AWS_ACCESS_KEY_ID"] = sys_values['S3_ACCESS_KEY_ID'] os.environ["AWS_SECRET_ACCESS_KEY"] = sys_values['S3_SECRET_ACCESS_KEY'] # Set Database connection info - AWS_RDS_SERVICE_JSON = json.loads(OS_ENV['VCAP_SERVICES'])['aws-rds'][0]['credentials'] - sys_values['DATABASE_PORT'] = AWS_RDS_SERVICE_JSON['port'] - sys_values['DATABASE_PASSWORD'] = AWS_RDS_SERVICE_JSON['password'] - sys_values['DATABASE_DB_NAME'] = AWS_RDS_SERVICE_JSON['db_name'] - sys_values['DATABASE_HOST'] = AWS_RDS_SERVICE_JSON['host'] - sys_values['DATABASE_USERNAME'] = AWS_RDS_SERVICE_JSON['username'] - - # write .pgpass - with open('/home/vcap/.pgpass', 'w') as f: - f.write(sys_values['DATABASE_HOST'] + ":" - + sys_values['DATABASE_PORT'] + ":" - + settings.DATABASES['default']['NAME'] + ":" - + sys_values['DATABASE_USERNAME'] + ":" - + sys_values['DATABASE_PASSWORD']) - os.environ['PGPASSFILE'] = '/home/vcap/.pgpass' - os.system('chmod 0600 /home/vcap/.pgpass') + AWS_RDS_SERVICE_JSON = json.loads(OS_ENV['VCAP_SERVICES'])['aws-rds'][0] + sys_values['DATABASE_URI'] = AWS_RDS_SERVICE_JSON['credentials']['uri'].rsplit('/', 1)[0] + sys_values['DATABASE_DB_NAME'] = AWS_RDS_SERVICE_JSON['credentials']['db_name'] + return sys_values @@ -94,19 +82,11 @@ def backup_database(file_name, pg_dump -F c --no-acl --no-owner -f backup.pg postgresql://${USERNAME}:${PASSWORD}@${HOST}:${PORT}/${NAME} """ try: - # TODO: This is a bandaid until the correct logic is determined for the system values with respect to the - # correct database name. - # cmd = postgres_client + "pg_dump -Fc --no-acl -f " + file_name + " -d " + database_uri - db_host = settings.DATABASES['default']['HOST'] - db_port = settings.DATABASES['default']['PORT'] - db_name = settings.DATABASES['default']['NAME'] - db_user = settings.DATABASES['default']['USER'] - - export_password = f"export PGPASSWORD={settings.DATABASES['default']['PASSWORD']}" - cmd = (f"{export_password} && {postgres_client}pg_dump -h {db_host} -p {db_port} -d {db_name} -U {db_user} " - f"-F c --no-password --no-acl --no-owner -f {file_name}") + cmd = f"{postgres_client}pg_dump -Fc --no-acl -f {file_name} -d {database_uri}" logger.info(f"Executing backup command: {cmd}") - os.system(cmd) + code = os.system(cmd) + if code != 0: + raise Exception("pg_dump command failed with a non zero exit code.") msg = "Successfully executed backup. Wrote pg dumpfile to {}".format(file_name) logger.info(msg) LogEntry.objects.log_action( @@ -268,28 +248,47 @@ def get_database_credentials(database_uri): database_name = database_uri return [username, password, host, port, database_name] - -def main(argv, sys_values, system_user): - """Handle commandline args.""" - arg_file = "/tmp/backup.pg" - arg_database = sys_values['DATABASE_URI'] +def get_opts(argv, db_name): + """Parse command line options.""" + arg_file = f"/tmp/{db_name}_backup.pg" arg_to_restore = False arg_to_backup = False + restore_db_name = None + opts, args = getopt.getopt(argv, "hbrf:n:", ["help", "backup", "restore", "file=", "restore_db_name="]) + for opt, arg in opts: + if "backup" in opt or "-b" in opt: + arg_to_backup = True + elif "restore" in opt or "-r" in opt: + arg_to_restore = True + if "file" in opt or "-f" in opt and arg: + arg_file = arg if arg[0] == "/" else "/tmp/" + arg + if "restore_db_name" in opt or "-n" in opt and arg: + restore_db_name = arg + + if arg_to_restore and not restore_db_name: + raise ValueError("You must pass a `-n ` when trying to restore a DB.") + + return arg_file, arg_to_backup, arg_to_restore, restore_db_name + +def get_db_name(sys_values): + """ + Get the correct database name. - try: - opts, args = getopt.getopt(argv, "hbrf:d:", ["help", "backup", "restore", "file=", "database=", ]) - for opt, arg in opts: - if "backup" in opt or "-b" in opt: - arg_to_backup = True - elif "restore" in opt or "-r" in opt: - arg_to_restore = True - if "file" in opt or "-f" in opt and arg: - arg_file = arg if arg[0] == "/" else "/tmp/" + arg - if "database" in opt or "-d" in opt: - arg_database = arg + In prod we use the default database name that AWS creates. In the Dev and Staging environments the databases are + named based off of their app; i.e. tdp_db_raft. The deploy script sets the APP_DB_NAME environment variable for all + apps except prod. + """ + env_db_name = os.getenv("APP_DB_NAME", None) + if env_db_name is None: + return sys_values['DATABASE_DB_NAME'] + return env_db_name - except Exception as e: - raise e +def main(argv, sys_values, system_user): + """Handle commandline args.""" + db_base_uri = sys_values['DATABASE_URI'] + + db_name = get_db_name(sys_values) + arg_file, arg_to_backup, arg_to_restore, restore_db_name = get_opts(argv, db_name) if arg_to_backup: LogEntry.objects.log_action( @@ -303,7 +302,7 @@ def main(argv, sys_values, system_user): # back up database backup_database(file_name=arg_file, postgres_client=sys_values['POSTGRES_CLIENT_DIR'], - database_uri=arg_database, + database_uri=f"{db_base_uri}/{db_name}", system_user=system_user) # upload backup file @@ -348,7 +347,7 @@ def main(argv, sys_values, system_user): # restore database restore_database(file_name=arg_file, postgres_client=sys_values['POSTGRES_CLIENT_DIR'], - database_uri=arg_database, + database_uri=f"{db_base_uri}/{restore_db_name}", system_user=system_user) LogEntry.objects.log_action( diff --git a/tdrs-backend/tdpservice/scheduling/parser_task.py b/tdrs-backend/tdpservice/scheduling/parser_task.py index 732d6fbe6..2b1fb3d51 100644 --- a/tdrs-backend/tdpservice/scheduling/parser_task.py +++ b/tdrs-backend/tdpservice/scheduling/parser_task.py @@ -3,12 +3,15 @@ from celery import shared_task import logging from django.contrib.auth.models import Group +from django.db.utils import DatabaseError from tdpservice.users.models import AccountApprovalStatusChoices, User from tdpservice.data_files.models import DataFile from tdpservice.parsers.parse import parse_datafile -from tdpservice.parsers.models import DataFileSummary +from tdpservice.parsers.models import DataFileSummary, ParserErrorCategoryChoices from tdpservice.parsers.aggregates import case_aggregates_by_month, total_errors_by_month +from tdpservice.parsers.util import log_parser_exception, make_generate_parser_error from tdpservice.email.helpers.data_file import send_data_submitted_email +from tdpservice.search_indexes.models.reparse_meta import ReparseMeta logger = logging.getLogger(__name__) @@ -20,28 +23,53 @@ def parse(data_file_id, should_send_submission_email=True): # passing the data file FileField across redis was rendering non-serializable failures, doing the below lookup # to avoid those. I suppose good practice to not store/serializer large file contents in memory when stored in redis # for undetermined amount of time. - data_file = DataFile.objects.get(id=data_file_id) + try: + data_file = DataFile.objects.get(id=data_file_id) + logger.info(f"DataFile parsing started for file {data_file.filename}") - logger.info(f"DataFile parsing started for file {data_file.filename}") + dfs = DataFileSummary.objects.create(datafile=data_file, status=DataFileSummary.Status.PENDING) + errors = parse_datafile(data_file, dfs) + dfs.status = dfs.get_status() - dfs = DataFileSummary.objects.create(datafile=data_file, status=DataFileSummary.Status.PENDING) - errors = parse_datafile(data_file, dfs) - dfs.status = dfs.get_status() + if "Case Data" in data_file.section: + dfs.case_aggregates = case_aggregates_by_month(data_file, dfs.status) + else: + dfs.case_aggregates = total_errors_by_month(data_file, dfs.status) - if "Case Data" in data_file.section: - dfs.case_aggregates = case_aggregates_by_month(data_file, dfs.status) - else: - dfs.case_aggregates = total_errors_by_month(data_file, dfs.status) + dfs.save() - dfs.save() + logger.info(f"Parsing finished for file -> {repr(data_file)} with status " + f"{dfs.status} and {len(errors)} errors.") - logger.info(f"Parsing finished for file -> {repr(data_file)} with status {dfs.status} and {len(errors)} errors.") + if should_send_submission_email is True: + recipients = User.objects.filter( + stt=data_file.stt, + account_approval_status=AccountApprovalStatusChoices.APPROVED, + groups=Group.objects.get(name='Data Analyst') + ).values_list('username', flat=True).distinct() - if should_send_submission_email is True: - recipients = User.objects.filter( - stt=data_file.stt, - account_approval_status=AccountApprovalStatusChoices.APPROVED, - groups=Group.objects.get(name='Data Analyst') - ).values_list('username', flat=True).distinct() - - send_data_submitted_email(dfs, recipients) + send_data_submitted_email(dfs, recipients) + except DatabaseError as e: + log_parser_exception(data_file, + f"Encountered Database exception in parser_task.py: \n{e}", + "error" + ) + ReparseMeta.increment_files_failed(data_file.reparse_meta_models) + except Exception as e: + generate_error = make_generate_parser_error(data_file, None) + error = generate_error(schema=None, + error_category=ParserErrorCategoryChoices.PRE_CHECK, + error_message=("We're sorry, an unexpected error has occurred and the file has been " + "rejected. Please contact the TDP support team at TANFData@acf.hhs.gov " + "for further assistance."), + record=None, + field=None + ) + error.save() + dfs.set_status(DataFileSummary.Status.REJECTED) + dfs.save() + log_parser_exception(data_file, + (f"Uncaught exception while parsing datafile: {data_file.pk}! Please review the logs to " + f"see if manual intervention is required. Exception: \n{e}"), + "critical") + ReparseMeta.increment_files_failed(data_file.reparse_meta_models) diff --git a/tdrs-backend/tdpservice/search_indexes/admin/__init__.py b/tdrs-backend/tdpservice/search_indexes/admin/__init__.py index 91469dfa5..b8d2e6626 100644 --- a/tdrs-backend/tdpservice/search_indexes/admin/__init__.py +++ b/tdrs-backend/tdpservice/search_indexes/admin/__init__.py @@ -1,6 +1,6 @@ from django.contrib import admin from .. import models -from . import tanf, tribal, ssp +from . import tanf, tribal, ssp, reparse_meta admin.site.register(models.tanf.TANF_T1, tanf.TANF_T1Admin) admin.site.register(models.tanf.TANF_T2, tanf.TANF_T2Admin) @@ -25,3 +25,5 @@ admin.site.register(models.ssp.SSP_M5, ssp.SSP_M5Admin) admin.site.register(models.ssp.SSP_M6, ssp.SSP_M6Admin) admin.site.register(models.ssp.SSP_M7, ssp.SSP_M7Admin) + +admin.site.register(models.reparse_meta.ReparseMeta, reparse_meta.ReparseMetaAdmin) diff --git a/tdrs-backend/tdpservice/search_indexes/admin/filters.py b/tdrs-backend/tdpservice/search_indexes/admin/filters.py index 1d8caf0f8..399e45e36 100644 --- a/tdrs-backend/tdpservice/search_indexes/admin/filters.py +++ b/tdrs-backend/tdpservice/search_indexes/admin/filters.py @@ -2,8 +2,8 @@ from django.utils.translation import ugettext_lazy as _ from django.contrib.admin import SimpleListFilter from django.db.models import Q as Query -from more_admin_filters import MultiSelectDropdownFilter from tdpservice.stts.models import STT +from tdpservice.search_indexes.admin.multiselect_filter import MultiSelectDropdownFilter import datetime @@ -49,6 +49,7 @@ class STTFilter(MultiSelectDropdownFilter): def __init__(self, field, request, params, model, model_admin, field_path): super(MultiSelectDropdownFilter, self).__init__(field, request, params, model, model_admin, field_path) self.lookup_choices = self._get_lookup_choices(request) + self.title = _("STT") def _get_lookup_choices(self, request): """Filter queryset to guarantee lookup_choices only has STTs associated with the record type.""" diff --git a/tdrs-backend/tdpservice/search_indexes/admin/multiselect_filter.py b/tdrs-backend/tdpservice/search_indexes/admin/multiselect_filter.py new file mode 100644 index 000000000..071ff985b --- /dev/null +++ b/tdrs-backend/tdpservice/search_indexes/admin/multiselect_filter.py @@ -0,0 +1,191 @@ +"""File containing multiselect filter classes and mixins.""" +import urllib.parse +from django.contrib import admin +from django.db.models import Q +from django.utils.translation import gettext_lazy as _ +from django.contrib.admin.utils import reverse_field_path +from django.core.exceptions import ValidationError +from django.contrib.admin.options import IncorrectLookupParameters + + +def flatten_used_parameters(used_parameters: dict, keep_list: bool = True): + """Flatten length 1 lists in dictionary.""" + # FieldListFilter.__init__ calls prepare_lookup_value, + # which returns a list if lookup_kwarg ends with "__in" + for k, v in used_parameters.items(): + if len(v) == 1 and (isinstance(v[0], list) or not keep_list): + used_parameters[k] = v[0] + +class MultiSelectMixin(object): + """Mixin for multi-select filters.""" + + def queryset(self, request, queryset): + """Build queryset based on choices.""" + params = Q() + for lookup_arg, value in self.used_parameters.items(): + params |= Q(**{lookup_arg: value}) + try: + return queryset.filter(params) + except (ValueError, ValidationError) as e: + # Fields may raise a ValueError or ValidationError when converting + # the parameters to the correct type. + raise IncorrectLookupParameters(e) + + def querystring_for_choices(self, val, changelist): + """Build query string based on new val.""" + lookup_vals = self.lookup_vals[:] + if val in self.lookup_vals: + lookup_vals.remove(val) + else: + lookup_vals.append(val) + if lookup_vals: + query_string = changelist.get_query_string({ + self.lookup_kwarg: ','.join(lookup_vals), + }, []) + else: + query_string = changelist.get_query_string({}, [self.lookup_kwarg]) + return query_string + + def querystring_for_isnull(self, changelist): + """Build query string based on a null val.""" + if self.lookup_val_isnull: + query_string = changelist.get_query_string({}, [self.lookup_kwarg_isnull]) + else: + query_string = changelist.get_query_string({ + self.lookup_kwarg_isnull: 'True', + }, []) + return query_string + + def has_output(self): + """Return if there is output.""" + return len(self.lookup_choices) > 1 + + def get_facet_counts(self, pk_attname, filtered_qs): + """Return count of __in facets.""" + if not self.lookup_kwarg.endswith("__in"): + raise NotImplementedError("Facets are only supported for default lookup_kwarg values, ending with '__in' " + "(got '%s')" % self.lookup_kwarg) + + orig_lookup_kwarg = self.lookup_kwarg + self.lookup_kwarg = self.lookup_kwarg.removesuffix("in") + "exact" + counts = super().get_facet_counts(pk_attname, filtered_qs) + self.lookup_kwarg = orig_lookup_kwarg + return counts + + +class MultiSelectFilter(MultiSelectMixin, admin.AllValuesFieldListFilter): + """Multi select filter for all kind of fields.""" + + def __init__(self, field, request, params, model, model_admin, field_path): + self.lookup_kwarg = '%s__in' % field_path + self.lookup_kwarg_isnull = '%s__isnull' % field_path + lookup_vals = request.GET.get(self.lookup_kwarg) + self.lookup_vals = lookup_vals.split(',') if lookup_vals else list() + self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull) + self.empty_value_display = model_admin.get_empty_value_display() + parent_model, reverse_path = reverse_field_path(model, field_path) + # Obey parent ModelAdmin queryset when deciding which options to show + if model == parent_model: + queryset = model_admin.get_queryset(request) + else: + queryset = parent_model._default_manager.all() + self.lookup_choices = (queryset + .distinct() + .order_by(field.name) + .values_list(field.name, flat=True)) + super(admin.AllValuesFieldListFilter, self).__init__(field, request, params, model, model_admin, field_path) + flatten_used_parameters(self.used_parameters) + self.used_parameters = self.prepare_used_parameters(self.used_parameters) + + def prepare_querystring_value(self, value): + """Preparse the query string value.""" + # mask all commas or these values will be used + # in a comma-seperated-list as get-parameter + return str(value).replace(',', '%~') + + def prepare_used_parameters(self, used_parameters): + """Prepare parameters.""" + # remove comma-mask from list-values for __in-lookups + for key, value in used_parameters.items(): + if not key.endswith('__in'): + continue + used_parameters[key] = [v.replace('%~', ',') for v in value] + return used_parameters + + def choices(self, changelist): + """Generate choices.""" + add_facets = getattr(changelist, "add_facets", False) + facet_counts = self.get_facet_queryset(changelist) if add_facets else None + yield { + 'selected': not self.lookup_vals and self.lookup_val_isnull is None, + 'query_string': changelist.get_query_string({}, [self.lookup_kwarg, self.lookup_kwarg_isnull]), + 'display': _('All'), + } + include_none = False + count = None + empty_title = self.empty_value_display + for i, val in enumerate(self.lookup_choices): + if add_facets: + count = facet_counts[f"{i}__c"] + if val is None: + include_none = True + empty_title = f"{empty_title} ({count})" if add_facets else empty_title + continue + val = str(val) + qval = self.prepare_querystring_value(val) + yield { + 'selected': qval in self.lookup_vals, + 'query_string': self.querystring_for_choices(qval, changelist), + "display": f"{val} ({count})" if add_facets else val, + } + if include_none: + yield { + 'selected': bool(self.lookup_val_isnull), + 'query_string': self.querystring_for_isnull(changelist), + 'display': empty_title, + } + + +class MultiSelectDropdownFilter(MultiSelectFilter): + """Multi select dropdown filter for all kind of fields.""" + + template = 'multiselectdropdownfilter.html' + + def choices(self, changelist): + """Generate choices.""" + add_facets = getattr(changelist, "add_facets", False) + facet_counts = self.get_facet_queryset(changelist) if add_facets else None + query_string = changelist.get_query_string({}, [self.lookup_kwarg, self.lookup_kwarg_isnull]) + yield { + 'selected': not self.lookup_vals and self.lookup_val_isnull is None, + 'query_string': query_string, + 'display': _('All'), + } + include_none = False + count = None + empty_title = self.empty_value_display + for i, val in enumerate(self.lookup_choices): + if add_facets: + count = facet_counts[f"{i}__c"] + if val is None: + include_none = True + empty_title = f"{empty_title} ({count})" if add_facets else empty_title + continue + + val = str(val) + qval = self.prepare_querystring_value(val) + yield { + 'selected': qval in self.lookup_vals, + 'query_string': query_string, + "display": f"{val} ({count})" if add_facets else val, + 'value': urllib.parse.quote_plus(val), + 'key': self.lookup_kwarg, + } + if include_none: + yield { + 'selected': bool(self.lookup_val_isnull), + 'query_string': query_string, + "display": empty_title, + 'value': 'True', + 'key': self.lookup_kwarg_isnull, + } diff --git a/tdrs-backend/tdpservice/search_indexes/admin/reparse_meta.py b/tdrs-backend/tdpservice/search_indexes/admin/reparse_meta.py new file mode 100644 index 000000000..f030501f8 --- /dev/null +++ b/tdrs-backend/tdpservice/search_indexes/admin/reparse_meta.py @@ -0,0 +1,25 @@ +"""ModelAdmin classes for parsed SSP data files.""" +from .mixins import ReadOnlyAdminMixin +from tdpservice.data_files.admin.admin import DataFileInline + + +class ReparseMetaAdmin(ReadOnlyAdminMixin): + """ModelAdmin class for parsed M1 data files.""" + + inlines = [DataFileInline] + + list_display = [ + 'id', + 'created_at', + 'timeout_at', + 'success', + 'finished', + 'db_backup_location', + ] + + list_filter = [ + 'success', + 'finished', + 'fiscal_year', + 'fiscal_quarter', + ] diff --git a/tdrs-backend/tdpservice/search_indexes/management/commands/clean_and_reparse.py b/tdrs-backend/tdpservice/search_indexes/management/commands/clean_and_reparse.py index f6cf2c930..a3b746a66 100644 --- a/tdrs-backend/tdpservice/search_indexes/management/commands/clean_and_reparse.py +++ b/tdrs-backend/tdpservice/search_indexes/management/commands/clean_and_reparse.py @@ -1,17 +1,20 @@ -"""Delete and re-parse a set of datafiles.""" +"""Delete and reparse a set of datafiles.""" from django.core.management.base import BaseCommand from django.core.management import call_command from django.db.utils import DatabaseError from elasticsearch.exceptions import ElasticsearchException from tdpservice.data_files.models import DataFile -from tdpservice.parsers.models import ParserError +from tdpservice.parsers.models import DataFileSummary, ParserError from tdpservice.scheduling import parser_task -from tdpservice.search_indexes.documents import tanf, ssp, tribal +from tdpservice.search_indexes.util import DOCUMENTS, count_all_records +from tdpservice.search_indexes.models.reparse_meta import ReparseMeta from tdpservice.core.utils import log from django.contrib.admin.models import ADDITION from tdpservice.users.models import User -from datetime import datetime +from datetime import timedelta +from django.utils import timezone +from django.conf import settings import logging logger = logging.getLogger(__name__) @@ -20,73 +23,83 @@ class Command(BaseCommand): """Command class.""" - help = "Delete and re-parse a set of datafiles. All re-parsed data will be moved into a new set of Elastic indexes." + help = "Delete and reparse a set of datafiles.." def add_arguments(self, parser): """Add arguments to the management command.""" parser.add_argument("-q", "--fiscal_quarter", type=str, choices=["Q1", "Q2", "Q3", "Q4"], - help="Re-parse all files in the fiscal quarter, e.g. Q1.") - parser.add_argument("-y", "--fiscal_year", type=int, help="Re-parse all files in the fiscal year, e.g. 2021.") - parser.add_argument("-a", "--all", action='store_true', help="Clean and re-parse all datafiles. If selected, " + help="Reparse all files in the fiscal quarter, e.g. Q1.") + parser.add_argument("-y", "--fiscal_year", type=int, help="Reparse all files in the fiscal year, e.g. 2021.") + parser.add_argument("-a", "--all", action='store_true', help="Clean and reparse all datafiles. If selected, " "fiscal_year/quarter aren't necessary.") - parser.add_argument("-n", "--new_indices", action='store_true', help="Move re-parsed data to new Elastic " - "indices.") - parser.add_argument("-d", "--delete_indices", action='store_true', help="Requires new_indices. Delete the " - "current Elastic indices.") def __get_log_context(self, system_user): """Return logger context.""" context = {'user_id': system_user.id, 'action_flag': ADDITION, - 'object_repr': "Clean and Re-parse" + 'object_repr': "Clean and Reparse" } return context def __backup(self, backup_file_name, log_context): """Execute Postgres DB backup.""" try: - logger.info("Beginning re-parse DB Backup.") + logger.info("Beginning reparse DB Backup.") call_command('backup_db', '-b', '-f', f'{backup_file_name}') - logger.info("Backup complete! Commencing clean and re-parse.") + logger.info("Backup complete! Commencing clean and reparse.") log("Database backup complete.", logger_context=log_context, level='info') except Exception as e: - log("Database backup FAILED. Clean and re-parse NOT executed. Database and Elastic are CONSISTENT!", + log("Database backup FAILED. Clean and reparse NOT executed. Database and Elastic are CONSISTENT!", logger_context=log_context, level='error') raise e - def __handle_elastic(self, new_indices, delete_indices, log_context): + def __handle_elastic(self, new_indices, log_context): """Create new Elastic indices and delete old ones.""" if new_indices: try: - if not delete_indices: - call_command('tdp_search_index', '--create', '-f', '--use-alias', '--use-alias-keep-index') - else: - call_command('tdp_search_index', '--create', '-f', '--use-alias') + call_command('tdp_search_index', '--create', '-f', '--use-alias') log("Index creation complete.", logger_context=log_context, level='info') except ElasticsearchException as e: - log("Elastic index creation FAILED. Clean and re-parse NOT executed. " + log("Elastic index creation FAILED. Clean and reparse NOT executed. " "Database is CONSISTENT, Elastic is INCONSISTENT!", logger_context=log_context, level='error') raise e except Exception as e: - log("Caught generic exception in __handle_elastic. Clean and re-parse NOT executed. " + log("Caught generic exception in __handle_elastic. Clean and reparse NOT executed. " "Database is CONSISTENT, Elastic is INCONSISTENT!", logger_context=log_context, level='error') raise e - def __delete_records(self, docs, file_ids, new_indices, log_context): + def __delete_summaries(self, file_ids, log_context): + """Raw delete all DataFileSummary objects.""" + try: + qset = DataFileSummary.objects.filter(datafile_id__in=file_ids) + qset._raw_delete(qset.db) + except DatabaseError as e: + log('Encountered a DatabaseError while deleting DataFileSummary from Postgres. The database ' + 'and Elastic are INCONSISTENT! Restore the DB from the backup as soon as possible!', + logger_context=log_context, + level='critical') + raise e + except Exception as e: + log('Caught generic exception while deleting DataFileSummary. The database and Elastic are INCONSISTENT! ' + 'Restore the DB from the backup as soon as possible!', + logger_context=log_context, + level='critical') + raise e + + def __delete_records(self, file_ids, new_indices, log_context): """Delete records, errors, and documents from Postgres and Elastic.""" total_deleted = 0 - self.__delete_errors(file_ids, log_context) - for doc in docs: + for doc in DOCUMENTS: try: model = doc.Django.model qset = model.objects.filter(datafile_id__in=file_ids) @@ -133,15 +146,19 @@ def __delete_errors(self, file_ids, log_context): level='critical') raise e - def __handle_datafiles(self, files, log_context): - """Delete, re-save, and re-parse selected datafiles.""" + def __delete_associated_models(self, meta_model, file_ids, new_indices, log_context): + """Delete all models associated to the selected datafiles.""" + self.__delete_summaries(file_ids, log_context) + self.__delete_errors(file_ids, log_context) + num_deleted = self.__delete_records(file_ids, new_indices, log_context) + meta_model.num_records_deleted = num_deleted + + def __handle_datafiles(self, files, meta_model, log_context): + """Delete, re-save, and reparse selected datafiles.""" for file in files: try: - logger.info(f"Deleting file with PK: {file.pk}") - file.delete() + file.reparse_meta_models.add(meta_model) file.save() - logger.info(f"New file PK: {file.pk}") - # latest version only? -> possible new ticket parser_task.parse.delay(file.pk, should_send_submission_email=False) except DatabaseError as e: log('Encountered a DatabaseError while re-creating datafiles. The database ' @@ -156,13 +173,62 @@ def __handle_datafiles(self, files, log_context): level='critical') raise e + def __count_total_num_records(self, log_context): + """Count total number of records in the database for meta object.""" + try: + return count_all_records() + except DatabaseError as e: + log('Encountered a DatabaseError while counting records for meta model. The database ' + f'and Elastic are consistent! Cancelling reparse to be safe. \n{e}', + logger_context=log_context, + level='error') + exit(1) + except Exception as e: + log('Encountered generic exception while counting records for meta model. ' + f'The database and Elastic are consistent! Cancelling reparse to be safe. \n{e}', + logger_context=log_context, + level='error') + exit(1) + + def __assert_sequential_execution(self, log_context): + """Assert that no other reparse commands are still executing.""" + latest_meta_model = ReparseMeta.get_latest() + now = timezone.now() + is_not_none = latest_meta_model is not None + if (is_not_none and latest_meta_model.timeout_at is None): + log(f"The latest ReparseMeta model's (ID: {latest_meta_model.pk}) timeout_at field is None. " + "Cannot safely execute reparse, please fix manually.", + logger_context=log_context, + level='error') + exit(1) + if (is_not_none and not ReparseMeta.assert_all_files_done(latest_meta_model) and + not now > latest_meta_model.timeout_at): + log('A previous execution of the reparse command is RUNNING. Cannot execute in parallel, exiting.', + logger_context=log_context, + level='warn') + exit(1) + elif (is_not_none and latest_meta_model.timeout_at is not None and now > latest_meta_model.timeout_at and not + ReparseMeta.assert_all_files_done(latest_meta_model)): + log("Previous reparse has exceeded the timeout. Allowing execution of the command.", + logger_context=log_context, + level='warn') + + def __calculate_timeout(self, num_files, num_records): + """Estimate a timeout parameter based on the number of files and the number of records.""" + # Increase by an order of magnitude to have the bases covered. + line_parse_time = settings.MEDIAN_LINE_PARSE_TIME * 10 + time_to_queue_datafile = 10 + time_in_seconds = num_files * time_to_queue_datafile + num_records * line_parse_time + delta = timedelta(seconds=time_in_seconds) + logger.info(f"Setting timeout for the reparse event to be {delta} seconds from meta model creation date.") + return delta + def handle(self, *args, **options): - """Delete and re-parse datafiles matching a query.""" + """Delete and reparse datafiles matching a query.""" fiscal_year = options.get('fiscal_year', None) fiscal_quarter = options.get('fiscal_quarter', None) reparse_all = options.get('all', False) - new_indices = options.get('new_indices', False) - delete_indices = options.get('delete_indices', False) + new_indices = reparse_all is True args_passed = fiscal_year is not None or fiscal_quarter is not None or reparse_all @@ -173,7 +239,7 @@ def handle(self, *args, **options): backup_file_name = "/tmp/reparsing_backup" files = DataFile.objects.all() - continue_msg = "You have selected to re-parse datafiles for FY {fy} and {q}. The re-parsed files " + continue_msg = "You have selected to reparse datafiles for FY {fy} and {q}. The reparsed files " if reparse_all: backup_file_name += "_FY_All_Q1-4" continue_msg = continue_msg.format(fy="All", q="Q1-4") @@ -200,11 +266,9 @@ def handle(self, *args, **options): fmt_str = "be" if new_indices else "NOT be" continue_msg += "will {new_index} stored in new indices and the old indices ".format(new_index=fmt_str) - fmt_str = "be" if delete_indices else "NOT be" - continue_msg += "will {old_index} deleted.".format(old_index=fmt_str) - - fmt_str = f"ALL ({files.count()})" if reparse_all else f"({files.count()})" - continue_msg += "\nThese options will delete and re-parse {0} datafiles.".format(fmt_str) + num_files = files.count() + fmt_str = f"ALL ({num_files})" if reparse_all else f"({num_files})" + continue_msg += "\nThese options will delete and reparse {0} datafiles.".format(fmt_str) c = str(input(f'\n{continue_msg}\nContinue [y/n]? ')).lower() if c not in ['y', 'yes']: @@ -218,54 +282,56 @@ def handle(self, *args, **options): all_fy = "All" all_q = "Q1-4" - log(f"Starting clean and re-parse command for FY {fiscal_year if fiscal_year else all_fy} and " + log(f"Starting clean and reparse command for FY {fiscal_year if fiscal_year else all_fy} and " f"{fiscal_quarter if fiscal_quarter else all_q}", logger_context=log_context, level='info') - if files.count() == 0: + if num_files == 0: log(f"No files available for the selected Fiscal Year: {fiscal_year if fiscal_year else all_fy} and " f"Quarter: {fiscal_quarter if fiscal_quarter else all_q}. Nothing to do.", logger_context=log_context, level='warn') return + self.__assert_sequential_execution(log_context) + meta_model = ReparseMeta.objects.create(fiscal_quarter=fiscal_quarter, + fiscal_year=fiscal_year, + all=reparse_all, + new_indices=new_indices, + delete_old_indices=new_indices, + num_files_to_reparse=num_files) + # Backup the Postgres DB - pattern = "%Y-%m-%d_%H.%M.%S" - backup_file_name += f"_{datetime.now().strftime(pattern)}.pg" + backup_file_name += f"_rpv{meta_model.pk}.pg" self.__backup(backup_file_name, log_context) + meta_model.db_backup_location = backup_file_name + meta_model.save() + # Create and delete Elastic indices if necessary - self.__handle_elastic(new_indices, delete_indices, log_context) + self.__handle_elastic(new_indices, log_context) # Delete records from Postgres and Elastic if necessary file_ids = files.values_list('id', flat=True).distinct() - docs = [ - tanf.TANF_T1DataSubmissionDocument, tanf.TANF_T2DataSubmissionDocument, - tanf.TANF_T3DataSubmissionDocument, tanf.TANF_T4DataSubmissionDocument, - tanf.TANF_T5DataSubmissionDocument, tanf.TANF_T6DataSubmissionDocument, - tanf.TANF_T7DataSubmissionDocument, - - ssp.SSP_M1DataSubmissionDocument, ssp.SSP_M2DataSubmissionDocument, ssp.SSP_M3DataSubmissionDocument, - ssp.SSP_M4DataSubmissionDocument, ssp.SSP_M5DataSubmissionDocument, ssp.SSP_M6DataSubmissionDocument, - ssp.SSP_M7DataSubmissionDocument, - - tribal.Tribal_TANF_T1DataSubmissionDocument, tribal.Tribal_TANF_T2DataSubmissionDocument, - tribal.Tribal_TANF_T3DataSubmissionDocument, tribal.Tribal_TANF_T4DataSubmissionDocument, - tribal.Tribal_TANF_T5DataSubmissionDocument, tribal.Tribal_TANF_T6DataSubmissionDocument, - tribal.Tribal_TANF_T7DataSubmissionDocument - ] - total_deleted = self.__delete_records(docs, file_ids, new_indices, log_context) - logger.info(f"Deleted a total of {total_deleted} records accross {files.count()} files.") + meta_model.total_num_records_initial = self.__count_total_num_records(log_context) + meta_model.save() + + self.__delete_associated_models(meta_model, file_ids, new_indices, log_context) + + meta_model.timeout_at = meta_model.created_at + self.__calculate_timeout(num_files, + meta_model.num_records_deleted) + meta_model.save() + logger.info(f"Deleted a total of {meta_model.num_records_deleted} records accross {num_files} files.") # Delete and re-save datafiles to handle cascading dependencies - logger.info(f'Deleting and re-parsing {files.count()} files') - self.__handle_datafiles(files, log_context) + logger.info(f'Deleting and re-parsing {num_files} files') + self.__handle_datafiles(files, meta_model, log_context) log("Database cleansing complete and all files have been re-scheduling for parsing and validation.", logger_context=log_context, level='info') - log(f"Clean and re-parse command completed. All files for FY {fiscal_year if fiscal_year else all_fy} and " + log(f"Clean and reparse command completed. All files for FY {fiscal_year if fiscal_year else all_fy} and " f"{fiscal_quarter if fiscal_quarter else all_q} have been queued for parsing.", logger_context=log_context, level='info') diff --git a/tdrs-backend/tdpservice/search_indexes/management/commands/tdp_search_index.py b/tdrs-backend/tdpservice/search_indexes/management/commands/tdp_search_index.py index 19f3b7d89..a531ae558 100644 --- a/tdrs-backend/tdpservice/search_indexes/management/commands/tdp_search_index.py +++ b/tdrs-backend/tdpservice/search_indexes/management/commands/tdp_search_index.py @@ -13,6 +13,7 @@ from tdpservice.core.utils import log from django.contrib.admin.models import ADDITION from tdpservice.users.models import User +from tdpservice.search_indexes.models.reparse_meta import ReparseMeta class Command(search_index.Command): @@ -28,11 +29,17 @@ def __get_log_context(self): } return context + def __get_index_suffix(self): + meta_model = ReparseMeta.get_latest() + if meta_model is not None and not meta_model.finished: + return f"_rpv{meta_model.pk}" + fmt = "%Y-%m-%d_%H.%M.%S" + return f"_{datetime.now().strftime(fmt)}" + def _create(self, models, aliases, options): log_context = self.__get_log_context() alias_index_pairs = [] - fmt = "%Y-%m-%d_%H.%M.%S" - index_suffix = f"_{datetime.now().strftime(fmt)}" + index_suffix = self.__get_index_suffix() for index in registry.get_indices(models): new_index = index._name + index_suffix diff --git a/tdrs-backend/tdpservice/search_indexes/migrations/0030_reparse_meta_model.py b/tdrs-backend/tdpservice/search_indexes/migrations/0030_reparse_meta_model.py new file mode 100644 index 000000000..3b9828be7 --- /dev/null +++ b/tdrs-backend/tdpservice/search_indexes/migrations/0030_reparse_meta_model.py @@ -0,0 +1,39 @@ +# Generated by Django 3.2.15 on 2024-08-01 20:10 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('search_indexes', '0029_tanf_tribal_ssp_alter_verbose_names'), + ] + + operations = [ + migrations.CreateModel( + name='ReparseMeta', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('timeout_at', models.DateTimeField(auto_now_add=False, null=True)), + ('finished', models.BooleanField(default=False)), + ('success', models.BooleanField(default=False,help_text="All files completed parsing.")), + ('num_files_to_reparse', models.PositiveIntegerField(default=0)), + ('files_completed', models.PositiveIntegerField(default=0)), + ('files_failed', models.PositiveIntegerField(default=0)), + ('num_records_deleted', models.PositiveIntegerField(default=0)), + ('num_records_created', models.PositiveIntegerField(default=0)), + ('total_num_records_initial', models.PositiveBigIntegerField(default=0)), + ('total_num_records_post', models.PositiveBigIntegerField(default=0)), + ('db_backup_location', models.CharField(max_length=512)), + ('fiscal_quarter', models.CharField(max_length=2, null=True)), + ('fiscal_year', models.PositiveIntegerField(null=True)), + ('all', models.BooleanField(default=False)), + ('new_indices', models.BooleanField(default=False)), + ('delete_old_indices', models.BooleanField(default=False)), + ], + options={ + 'verbose_name': 'Reparse Meta Model', + }, + ), + ] diff --git a/tdrs-backend/tdpservice/search_indexes/models/__init__.py b/tdrs-backend/tdpservice/search_indexes/models/__init__.py index 42b15a650..85df15209 100644 --- a/tdrs-backend/tdpservice/search_indexes/models/__init__.py +++ b/tdrs-backend/tdpservice/search_indexes/models/__init__.py @@ -1,5 +1,6 @@ -from . import tanf, tribal, ssp +from . import tanf, tribal, ssp, reparse_meta tanf = tanf tribal = tribal ssp = ssp +reparse_meta = reparse_meta diff --git a/tdrs-backend/tdpservice/search_indexes/models/reparse_meta.py b/tdrs-backend/tdpservice/search_indexes/models/reparse_meta.py new file mode 100644 index 000000000..15f659d64 --- /dev/null +++ b/tdrs-backend/tdpservice/search_indexes/models/reparse_meta.py @@ -0,0 +1,144 @@ +"""Meta data model for tracking reparsed files.""" + +from django.db import models, transaction +from django.db.utils import DatabaseError +from django.db.models import Max +from tdpservice.search_indexes.util import count_all_records +import logging + +logger = logging.getLogger(__name__) + + +class ReparseMeta(models.Model): + """ + Meta data model representing a single execution of `clean_and_reparse`. + + Because this model is intended to be queried in a distributed and parrallel fashion, all queries should rely on + database level locking to ensure race conditions aren't introduced. See `increment_files_reparsed` for an example. + """ + + class Meta: + """Meta class for the model.""" + + verbose_name = "Reparse Meta Model" + + created_at = models.DateTimeField(auto_now_add=True) + timeout_at = models.DateTimeField(auto_now_add=False, null=True) + + finished = models.BooleanField(default=False) + success = models.BooleanField(default=False, help_text="All files completed parsing.") + + num_files_to_reparse = models.PositiveIntegerField(default=0) + files_completed = models.PositiveIntegerField(default=0) + files_failed = models.PositiveIntegerField(default=0) + + num_records_deleted = models.PositiveIntegerField(default=0) + num_records_created = models.PositiveIntegerField(default=0) + + total_num_records_initial = models.PositiveBigIntegerField(default=0) + total_num_records_post = models.PositiveBigIntegerField(default=0) + + db_backup_location = models.CharField(max_length=512) + + # Options used to select the files to reparse + fiscal_quarter = models.CharField(max_length=2, null=True) + fiscal_year = models.PositiveIntegerField(null=True) + all = models.BooleanField(default=False) + new_indices = models.BooleanField(default=False) + delete_old_indices = models.BooleanField(default=False) + + @staticmethod + def assert_all_files_done(meta_model): + """ + Check if all files have been parsed with or without exceptions. + + This function assumes the meta_model has been passed in a distributed/thread safe way. If the database row + containing this model has not been locked the caller will experience race issues. + """ + if (meta_model.finished or meta_model.files_completed == meta_model.num_files_to_reparse or + meta_model.files_completed + meta_model.files_failed == meta_model.num_files_to_reparse or + meta_model.files_failed == meta_model.num_files_to_reparse): + return True + return False + + @staticmethod + def set_reparse_finished(meta_model): + """ + Set status/completion fields to appropriate values. + + This function assumes the meta_model has been passed in a distributed/thread safe way. If the database row + containing this model has not been locked the caller will experience race issues. + """ + meta_model.finished = True + meta_model.success = meta_model.files_completed == meta_model.num_files_to_reparse + meta_model.total_num_records_post = count_all_records() + meta_model.save() + + @staticmethod + def increment_files_completed(reparse_meta_models): + """ + Increment the count of files that have completed parsing for the datafile's current/latest reparse model. + + Because this function can be called in parallel we use `select_for_update` because multiple parse tasks can + referrence the same ReparseMeta object that is being queried below. `select_for_update` provides a DB lock on + the object and forces other transactions on the object to wait until this one completes. + """ + if reparse_meta_models.exists(): + with transaction.atomic(): + try: + meta_model = reparse_meta_models.select_for_update().latest("pk") + meta_model.files_completed += 1 + if ReparseMeta.assert_all_files_done(meta_model): + ReparseMeta.set_reparse_finished(meta_model) + meta_model.save() + except DatabaseError: + logger.exception("Encountered exception while trying to update the `files_reparsed` field on the " + f"ReparseMeta object with ID: {meta_model.pk}.") + + @staticmethod + def increment_files_failed(reparse_meta_models): + """ + Increment the count of files that failed parsing for the datafile's current/latest reparse meta model. + + Because this function can be called in parallel we use `select_for_update` because multiple parse tasks can + referrence the same ReparseMeta object that is being queried below. `select_for_update` provides a DB lock on + the object and forces other transactions on the object to wait until this one completes. + """ + if reparse_meta_models.exists(): + with transaction.atomic(): + try: + meta_model = reparse_meta_models.select_for_update().latest("pk") + meta_model.files_failed += 1 + if ReparseMeta.assert_all_files_done(meta_model): + ReparseMeta.set_reparse_finished(meta_model) + meta_model.save() + except DatabaseError: + logger.exception("Encountered exception while trying to update the `files_failed` field on the " + f"ReparseMeta object with ID: {meta_model.pk}.") + + @staticmethod + def increment_records_created(reparse_meta_models, num_created): + """ + Increment the count of records created for the datafile's current/latest reparse meta model. + + Because this function can be called in parallel we use `select_for_update` because multiple parse tasks can + referrence the same ReparseMeta object that is being queried below. `select_for_update` provides a DB lock on + the object and forces other transactions on the object to wait until this one completes. + """ + if reparse_meta_models.exists(): + with transaction.atomic(): + try: + meta_model = reparse_meta_models.select_for_update().latest("pk") + meta_model.num_records_created += num_created + meta_model.save() + except DatabaseError: + logger.exception("Encountered exception while trying to update the `files_failed` field on the " + f"ReparseMeta object with ID: {meta_model.pk}.") + + @staticmethod + def get_latest(): + """Get the ReparseMeta model with the greatest pk.""" + max_pk = ReparseMeta.objects.all().aggregate(Max('pk')) + if max_pk.get("pk__max", None) is None: + return None + return ReparseMeta.objects.get(pk=max_pk["pk__max"]) diff --git a/tdrs-backend/tdpservice/search_indexes/templates/multiselectdropdownfilter.html b/tdrs-backend/tdpservice/search_indexes/templates/multiselectdropdownfilter.html new file mode 100644 index 000000000..c8a8e9c78 --- /dev/null +++ b/tdrs-backend/tdpservice/search_indexes/templates/multiselectdropdownfilter.html @@ -0,0 +1,48 @@ +{% load i18n admin_urls %} +

      {% blocktrans with filter_title=title %} By {{ filter_title }} {% endblocktrans %}

      + +
      + {% for choice in choices|slice:":1" %} + Show {{ choice.display }} + {% endfor %} + +
      + + diff --git a/tdrs-backend/tdpservice/search_indexes/util.py b/tdrs-backend/tdpservice/search_indexes/util.py new file mode 100644 index 000000000..a7e8e9e94 --- /dev/null +++ b/tdrs-backend/tdpservice/search_indexes/util.py @@ -0,0 +1,26 @@ +"""Utility functions and definitions for models and documents.""" +from tdpservice.search_indexes.documents import tanf, ssp, tribal + +DOCUMENTS = [ + tanf.TANF_T1DataSubmissionDocument, tanf.TANF_T2DataSubmissionDocument, + tanf.TANF_T3DataSubmissionDocument, tanf.TANF_T4DataSubmissionDocument, + tanf.TANF_T5DataSubmissionDocument, tanf.TANF_T6DataSubmissionDocument, + tanf.TANF_T7DataSubmissionDocument, + + ssp.SSP_M1DataSubmissionDocument, ssp.SSP_M2DataSubmissionDocument, ssp.SSP_M3DataSubmissionDocument, + ssp.SSP_M4DataSubmissionDocument, ssp.SSP_M5DataSubmissionDocument, ssp.SSP_M6DataSubmissionDocument, + ssp.SSP_M7DataSubmissionDocument, + + tribal.Tribal_TANF_T1DataSubmissionDocument, tribal.Tribal_TANF_T2DataSubmissionDocument, + tribal.Tribal_TANF_T3DataSubmissionDocument, tribal.Tribal_TANF_T4DataSubmissionDocument, + tribal.Tribal_TANF_T5DataSubmissionDocument, tribal.Tribal_TANF_T6DataSubmissionDocument, + tribal.Tribal_TANF_T7DataSubmissionDocument + ] + +def count_all_records(): + """Count total number of records in the database.""" + total_num_records = 0 + for doc in DOCUMENTS: + model = doc.Django.model + total_num_records += model.objects.all().count() + return total_num_records diff --git a/tdrs-backend/tdpservice/settings/common.py b/tdrs-backend/tdpservice/settings/common.py index 05542a561..7a7baad72 100644 --- a/tdrs-backend/tdpservice/settings/common.py +++ b/tdrs-backend/tdpservice/settings/common.py @@ -53,7 +53,6 @@ class Common(Configuration): "storages", "django_elasticsearch_dsl", "django_elasticsearch_dsl_drf", - "more_admin_filters", # Local apps "tdpservice.core.apps.CoreConfig", "tdpservice.users", @@ -162,7 +161,7 @@ class Common(Configuration): TEMPLATES = [ { "BACKEND": "django.template.backends.django.DjangoTemplates", - "DIRS": STATICFILES_DIRS, + "DIRS": [os.path.join(BASE_DIR, "templates")], "APP_DIRS": True, "OPTIONS": { "context_processors": [ @@ -271,9 +270,8 @@ class Common(Configuration): # Sessions SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies" SESSION_COOKIE_HTTPONLY = True - SESSION_TIMEOUT = 30 SESSION_EXPIRE_AT_BROWSER_CLOSE = True - SESSION_COOKIE_AGE = 30 * 60 # 30 minutes + SESSION_COOKIE_AGE = 15 * 60 # 15 minutes # The CSRF token Cookie holds no security benefits when confined to HttpOnly. # Setting this to false to allow the frontend to include it in the header # of API POST calls to prevent false negative authorization errors. @@ -532,3 +530,4 @@ class Common(Configuration): GENERATE_TRAILER_ERRORS = os.getenv("GENERATE_TRAILER_ERRORS", False) IGNORE_DUPLICATE_ERROR_PRECEDENCE = os.getenv("IGNORE_DUPLICATE_ERROR_PRECEDENCE", False) BULK_CREATE_BATCH_SIZE = os.getenv("BULK_CREATE_BATCH_SIZE", 10000) + MEDIAN_LINE_PARSE_TIME = os.getenv("MEDIAN_LINE_PARSE_TIME", 0.0005574226379394531) diff --git a/tdrs-backend/tdpservice/settings/local.py b/tdrs-backend/tdpservice/settings/local.py index 171608fe5..bffbddd66 100644 --- a/tdrs-backend/tdpservice/settings/local.py +++ b/tdrs-backend/tdpservice/settings/local.py @@ -1,5 +1,8 @@ """Define configuration settings for local environment.""" import os +import logging +import django + from distutils.util import strtobool from .common import Common @@ -43,3 +46,28 @@ class Local(Common): } REDIS_SERVER_LOCAL = bool(strtobool(os.getenv("REDIS_SERVER_LOCAL", "TRUE"))) + + if os.getenv("ENABLE_SENTRY", "no") == "yes": + # SENTRY + import sentry_sdk + from sentry_sdk.integrations.django import DjangoIntegration + from sentry_sdk.integrations.logging import LoggingIntegration + sentry_sdk.init( + dsn="http://43ebf8abe1434ec6aea2c7b92c465a0e@host.docker.internal:9001/2", + # Set traces_sample_rate to 1.0 to capture 100% + # of transactions for performance monitoring. + integrations=[ + DjangoIntegration( + transaction_style='url', + middleware_spans=True, + signals_spans=True, + signals_denylist=[ + django.db.models.signals.pre_init, + django.db.models.signals.post_init, + ], + cache_spans=False, + ), + LoggingIntegration(level=logging.DEBUG, event_level=logging.DEBUG) + ], + traces_sample_rate=1.0, + ) diff --git a/tdrs-backend/tdpservice/templates/error_pages/500.html b/tdrs-backend/tdpservice/templates/error_pages/500.html new file mode 100644 index 000000000..0060aa69f --- /dev/null +++ b/tdrs-backend/tdpservice/templates/error_pages/500.html @@ -0,0 +1,105 @@ + + + + + + + + + + + + + + + Page not found - TANF Data Portal + + + + +
      + Skip to main content +
      U.S. flag

      An Official website of the United States government

      +
      +
      + + +
      +
      +
      +
      +
      +
      +
      +
      +

      Error

      +

      We’re sorry, there was an error in response.

      + +

      {{ error }}.

      +
      + + + Contact Us + +
      +
      +
      +
      +
      +
      +
      +
      + +
      + + + \ No newline at end of file diff --git a/tdrs-backend/tdpservice/users/api/login.py b/tdrs-backend/tdpservice/users/api/login.py index 338508148..5e511963b 100644 --- a/tdrs-backend/tdpservice/users/api/login.py +++ b/tdrs-backend/tdpservice/users/api/login.py @@ -199,40 +199,13 @@ def login_user(request, user, user_status): ) logger.info("%s: %s on %s", user_status, user.username, timezone.now) - def get(self, request, *args, **kwargs): - """Handle decoding auth token and authenticate user.""" - code = request.GET.get("code", None) - state = request.GET.get("state", None) - - if code is None: - logger.info("Redirecting call to main page. No code provided.") - return HttpResponseRedirect(settings.FRONTEND_BASE_URL) - - if state is None: - logger.info("Redirecting call to main page. No state provided.") - return HttpResponseRedirect(settings.FRONTEND_BASE_URL) - - token_endpoint_response = self.get_token_endpoint_response(code) - - if token_endpoint_response.status_code != 200: - return Response( - { - "error": ( - "Invalid Validation Code Or OpenID Connect Authenticator " - "Down!" - ) - }, - status=status.HTTP_400_BAD_REQUEST, - ) - - token_data = token_endpoint_response.json() - id_token = token_data.get("id_token") - + def _get_user_id_token(self, request, state, token_data): + """Get the user and id_token from the request.""" try: decoded_payload = self.validate_and_decode_payload(request, state, token_data) + id_token = token_data.get("id_token") user = self.handle_user(request, id_token, decoded_payload) return response_redirect(user, id_token) - except (InactiveUser, ExpiredToken) as e: logger.exception(e) return Response( @@ -276,6 +249,39 @@ def get(self, request, *args, **kwargs): status=status.HTTP_400_BAD_REQUEST, ) + def get(self, request, *args, **kwargs): + """Handle decoding auth token and authenticate user.""" + code = request.GET.get("code", None) + state = request.GET.get("state", None) + + if code is None or state is None: + logger.info(f"Redirecting call to main page. No {'code' if code is None else 'state'} provided.") + return HttpResponseRedirect(settings.FRONTEND_BASE_URL) + + try: + token_endpoint_response = self.get_token_endpoint_response(code) + except Exception as e: + logger.exception(e) + return Response( + { + "error": str(e) + }, + status=status.HTTP_503_SERVICE_UNAVAILABLE + ) + + if token_endpoint_response.status_code != 200: + return Response( + { + "error": ( + "Invalid Validation Code Or OpenID Connect Authenticator " + "Down!" + ) + }, + status=status.HTTP_400_BAD_REQUEST, + ) + + token_data = token_endpoint_response.json() + return self._get_user_id_token(request, state, token_data) class TokenAuthorizationLoginDotGov(TokenAuthorizationOIDC): """Define methods for handling login request from login.gov.""" @@ -333,7 +339,11 @@ def decode_payload(self, token_data, options=None): id_token = token_data.get("id_token") access_token = token_data.get("access_token") - ams_configuration = LoginRedirectAMS.get_ams_configuration() + try: + ams_configuration = LoginRedirectAMS.get_ams_configuration() + except Exception as e: + logger.error(e) + raise Exception(e) certs_endpoint = ams_configuration["jwks_uri"] cert_str = generate_jwt_from_jwks(certs_endpoint) issuer = ams_configuration["issuer"] @@ -351,7 +361,11 @@ def decode_payload(self, token_data, options=None): def get_token_endpoint_response(self, code): """Build out the query string params and full URL path for token endpoint.""" # First fetch the token endpoint from AMS. - ams_configuration = LoginRedirectAMS.get_ams_configuration() + try: + ams_configuration = LoginRedirectAMS.get_ams_configuration() + except Exception as e: + logger.error(e) + raise Exception(e) options = { "client_id": settings.AMS_CLIENT_ID, "client_secret": settings.AMS_CLIENT_SECRET, @@ -374,7 +388,11 @@ def get_auth_options(self, access_token, sub): auth_options = {} # Fetch userinfo endpoint for AMS to authenticate against hhsid, or # other user claims. - ams_configuration = LoginRedirectAMS.get_ams_configuration() + try: + ams_configuration = LoginRedirectAMS.get_ams_configuration() + except Exception as e: + logger.error(e) + raise Exception(e) userinfo_response = requests.post(ams_configuration["userinfo_endpoint"], {"access_token": access_token}) user_info = userinfo_response.json() diff --git a/tdrs-backend/tdpservice/users/api/login_redirect_oidc.py b/tdrs-backend/tdpservice/users/api/login_redirect_oidc.py index 585c0ff47..2a63adc6d 100644 --- a/tdrs-backend/tdpservice/users/api/login_redirect_oidc.py +++ b/tdrs-backend/tdpservice/users/api/login_redirect_oidc.py @@ -4,11 +4,13 @@ import requests import secrets import time +from rest_framework import status from urllib.parse import quote_plus, urlencode from django.conf import settings -from django.http import HttpResponseRedirect +from django.http import HttpResponseRedirect, HttpResponse from django.views.generic.base import RedirectView +from django.template.loader import render_to_string logger = logging.getLogger(__name__) @@ -93,19 +95,32 @@ def get_ams_configuration(): Includes currently published URLs for authorization, token, etc. """ - r = requests.get(settings.AMS_CONFIGURATION_ENDPOINT) - data = r.json() - return data + r = requests.get(settings.AMS_CONFIGURATION_ENDPOINT, timeout=10) + if r.status_code != 200: + logger.error( + f"Failed to get AMS configuration: {r.status_code} - {r.text}" + ) + raise Exception(f"Failed to get AMS configuration: {r.status_code} - {r.text}") + else: + data = r.json() + return data def get(self, request, *args, **kwargs): """Handle login workflow based on request origin.""" # Create state and nonce to track requests state = secrets.token_hex(32) nonce = secrets.token_hex(32) - """Get request and manage login information with AMS OpenID.""" - configuration = self.get_ams_configuration() - + try: + configuration = self.get_ams_configuration() + except Exception as e: + logger.error(f"Failed to get AMS configuration: {e}") + rendered = render_to_string( + 'error_pages/500.html', + {'error': f"Failed to get AMS configuration: {e}", + 'frontend': settings.FRONTEND_BASE_URL}) + return HttpResponse(rendered, + status=status.HTTP_503_SERVICE_UNAVAILABLE,) auth_params = { "client_id": settings.AMS_CLIENT_ID, "nonce": nonce, diff --git a/tdrs-backend/tdpservice/users/api/middleware.py b/tdrs-backend/tdpservice/users/api/middleware.py index 5b82ae93f..7a8922384 100644 --- a/tdrs-backend/tdpservice/users/api/middleware.py +++ b/tdrs-backend/tdpservice/users/api/middleware.py @@ -13,7 +13,7 @@ def __call__(self, request): """Update cookie.""" response = self.get_response(request) now = datetime.datetime.now() - timeout = now + datetime.timedelta(minutes=settings.SESSION_TIMEOUT) + timeout = now + datetime.timedelta(minutes=settings.SESSION_COOKIE_AGE) # if there is no user, the user is currently # in the authentication process so we can't diff --git a/tdrs-backend/tdpservice/users/api/utils.py b/tdrs-backend/tdpservice/users/api/utils.py index 910646e05..5f6e348c6 100644 --- a/tdrs-backend/tdpservice/users/api/utils.py +++ b/tdrs-backend/tdpservice/users/api/utils.py @@ -14,14 +14,12 @@ import jwt import requests from jwcrypto import jwk -from rest_framework import status -from rest_framework.response import Response from django.conf import settings logger = logging.getLogger(__name__) now = datetime.datetime.now() -timeout = now + datetime.timedelta(minutes=settings.SESSION_TIMEOUT) +timeout = now + datetime.timedelta(minutes=settings.SESSION_COOKIE_AGE) """ Validate the nonce and state returned by login.gov API calls match those @@ -149,35 +147,6 @@ def get_nonce_and_state(session): return validation_keys -""" -Returns a found users information along with an httpOnly cookie. - -:param self: parameter to permit django python to call a method within its own class -:param user: current user associated with this session -:param status_message: Helper message to note how the user was found -:param id_token: encoded token returned by login.gov/token -""" - - -def response_internal(user, status_message, id_token): - """Respond with an httpOnly cookie to secure the session with the client.""" - response = Response( - {"user_id": user.pk, "email": user.username, "status": status_message}, - status=status.HTTP_200_OK, - ) - response.set_cookie( - "id_token", - value=id_token, - max_age=None, - expires=timeout, - path="/", - domain=None, - secure=True, - httponly=True, - ) - return response - - def response_redirect(self, id_token): """ Redirects to web app with an httpOnly cookie. diff --git a/tdrs-backend/tdpservice/users/test/test_api/test_login.py b/tdrs-backend/tdpservice/users/test/test_api/test_login.py new file mode 100644 index 000000000..a1e7a9a41 --- /dev/null +++ b/tdrs-backend/tdpservice/users/test/test_api/test_login.py @@ -0,0 +1,46 @@ +"""Test the LoginRedirectAMS class.""" +import pytest + +from unittest import mock +from tdpservice.users.api.login_redirect_oidc import LoginRedirectAMS + +@mock.patch("requests.get") +def test_get_ams_configuration(requests_get_mock): + """Test the LoginRedirectAMS class.""" + requests_get_mock.return_value.status_code = 200 + requests_get_mock.return_value.json.return_value = {"key": "test"} + returned_value = LoginRedirectAMS.get_ams_configuration() + assert returned_value == {'key': 'test'} + + # Test if the configuration is not returned + requests_get_mock.return_value.status_code = 503 + with pytest.raises(Exception): + LoginRedirectAMS.get_ams_configuration() + + +@mock.patch("requests.get") +@mock.patch("secrets.token_hex") +def test_LoginRedirectAMS_get(secrets_token_hex_mock, requests_get_mock): + """Test the LoginRedirectAMS class.""" + class DummyRequest: + session = { + "state_nonce_tracker": "dummy_state_nonce_tracker" + } + + requests_get_mock.return_value.status_code = 200 + requests_get_mock.return_value.json.return_value = {"authorization_endpoint": "dummy_authorization_endpoint"} + + secrets_token_hex_mock.return_value = "dummy_state_nonce" + + login_redirect_ams = LoginRedirectAMS() + + response = login_redirect_ams.get(DummyRequest) + assert response.url is not None + assert "dummy_state_nonce" in response.url + assert "dummy_authorization_endpoint" in response.url + + # Test if the AMS server is down + requests_get_mock.return_value.status_code = 503 + login_redirect_ams = LoginRedirectAMS() + response = login_redirect_ams.get("request") + assert response.status_code == 503 diff --git a/tdrs-backend/tdpservice/users/test/test_auth.py b/tdrs-backend/tdpservice/users/test/test_auth.py index 2ace23305..98ed19487 100644 --- a/tdrs-backend/tdpservice/users/test/test_auth.py +++ b/tdrs-backend/tdpservice/users/test/test_auth.py @@ -8,6 +8,7 @@ from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation from rest_framework import status from rest_framework.test import APIRequestFactory +from unittest import mock import jwt import pytest @@ -18,7 +19,6 @@ generate_client_assertion, generate_jwt_from_jwks, generate_token_endpoint_parameters, - response_internal, ) from tdpservice.users.authentication import CustomAuthentication from tdpservice.users.models import User @@ -278,6 +278,27 @@ def test_auth_user_hhs_id_update(self, user): user_by_id = CustomAuthentication.authenticate(username=user.username, hhs_id=self.test_hhs_id) assert str(user_by_id.hhs_id) == self.test_hhs_id + @mock.patch("requests.get") + def test_bad_AMS_configuration( + self, + ams_states_factory, + req_factory, + user + ): + """Test login with state and code.""" + request = req_factory + request = create_session(request, ams_states_factory) + user.hhs_id = self.test_hhs_id + # test new hash + user.login_gov_uuid = None + user.save() + + view = TokenAuthorizationAMS.as_view() + response = view(request) + assert response.status_code == status.HTTP_503_SERVICE_UNAVAILABLE + assert b'Failed to get AMS configuration' in response.render().content + + def test_login_gov_redirect(api_client): """Test login.gov login url redirects.""" response = api_client.get("/v1/login/dotgov") @@ -428,15 +449,6 @@ def test_login_fails_with_bad_data(api_client): assert response.status_code == status.HTTP_400_BAD_REQUEST -@pytest.mark.django_db -def test_response_internal(user): - """Test response internal works.""" - response = response_internal( - user, status_message="hello", id_token={"fake": "stuff"} - ) - assert response.status_code == status.HTTP_200_OK - - @pytest.mark.django_db def test_generate_jwt_from_jwks(mocker): """Test JWT generation.""" diff --git a/tdrs-backend/tdpservice/users/test/test_permissions.py b/tdrs-backend/tdpservice/users/test/test_permissions.py index 0a4772fc8..608305131 100644 --- a/tdrs-backend/tdpservice/users/test/test_permissions.py +++ b/tdrs-backend/tdpservice/users/test/test_permissions.py @@ -156,6 +156,9 @@ def test_ofa_system_admin_permissions(ofa_system_admin): 'search_indexes.add_tribal_tanf_t7', 'search_indexes.view_tribal_tanf_t7', 'search_indexes.change_tribal_tanf_t7', + 'search_indexes.add_reparsemeta', + 'search_indexes.view_reparsemeta', + 'search_indexes.change_reparsemeta', } group_permissions = ofa_system_admin.get_group_permissions() assert group_permissions == expected_permissions diff --git a/tdrs-frontend/src/components/Header/Header.jsx b/tdrs-frontend/src/components/Header/Header.jsx index 201cd55bf..bcb614267 100644 --- a/tdrs-frontend/src/components/Header/Header.jsx +++ b/tdrs-frontend/src/components/Header/Header.jsx @@ -90,7 +90,7 @@ function Header() {