Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Trapi validation #29

Merged
merged 3 commits into from
Aug 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 12 additions & 14 deletions .github/workflows/dev.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,24 +9,22 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout the repository
uses: actions/checkout@v2
uses: actions/checkout@v4

# Cache docker layers for faster build
- uses: satackey/[email protected]
# Ignore the failure of a step and avoid terminating the job.
continue-on-error: true
- uses: actions/setup-python@v5
name: Setup python
with:
python-version: '3.11'
cache: 'pip'

- name: Build
run: docker build -t harness-testing -f Dockerfile.test .
- name: Install requirements
run: pip install -r requirements.txt

- run: pip install -r requirements-runners.txt
- run: pip install -r requirements-test.txt

- name: Run tests and get output
run: |
echo 'TEST_OUTPUT<<EOF' >> $GITHUB_ENV
echo "$(docker run harness-testing)" >> $GITHUB_ENV
echo 'EOF' >> $GITHUB_ENV

- name: Exit if there are any test failures
run: '[[ $TEST_OUTPUT != *FAILED* ]]'
run: pytest

check-format:
name: Check that code matches Black formatter
Expand Down
1 change: 1 addition & 0 deletions requirements-test.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
pytest==7.4.2
pytest-asyncio==0.23.3
pytest-httpx==0.30.0
pytest-mock==3.11.1
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
httpx==0.27.0
pydantic==2.7.1
# reasoner_pydantic==4.1.6
setproctitle==1.3.3
slack_sdk==3.27.2
tqdm==4.66.4
translator-testing-model==0.3.2
10 changes: 5 additions & 5 deletions tests/example_tests.py → tests/helpers/example_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
"tags": [],
"input_id": "MONDO:0010794",
"input_name": "NARP Syndrome",
"input_category": None,
"input_category": "biolink:Disease",
"predicate_id": "biolink:treats",
"predicate_name": "treats",
"output_id": "DRUGBANK:DB00313",
Expand All @@ -48,7 +48,7 @@
"in_v1": None,
"well_known": False,
"test_reference": None,
"runner_settings": ["inferred"],
"test_runner_settings": ["inferred"],
"test_metadata": {
"id": "1",
"name": None,
Expand All @@ -67,7 +67,7 @@
"tags": [],
"input_id": "MONDO:0010794",
"input_name": "NARP Syndrome",
"input_category": None,
"input_category": "biolink:Disease",
"predicate_id": "biolink:treats",
"predicate_name": "treats",
"output_id": "MESH:D001463",
Expand All @@ -81,7 +81,7 @@
"in_v1": None,
"well_known": False,
"test_reference": None,
"runner_settings": ["inferred"],
"test_runner_settings": ["inferred"],
"test_metadata": {
"id": "1",
"name": None,
Expand All @@ -102,7 +102,7 @@
"test_case_predicate_name": "treats",
"test_case_predicate_id": "biolink:treats",
"test_case_input_id": "MONDO:0010794",
"test_case_runner_settings": ["inferred"],
"test_runner_settings": ["inferred"],
}
},
}
Expand Down
61 changes: 61 additions & 0 deletions tests/helpers/logger.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
"""Logging setup."""

import logging


class ColoredFormatter(logging.Formatter):
"""Colored formatter."""

prefix = "[%(asctime)s: %(levelname)s/%(name)s]:"
default = f"{prefix} %(message)s"
error_fmt = f"\x1b[31m{prefix}\x1b[0m %(message)s"
warning_fmt = f"\x1b[33m{prefix}\x1b[0m %(message)s"
info_fmt = f"\x1b[32m{prefix}\x1b[0m %(message)s"
debug_fmt = f"\x1b[34m{prefix}\x1b[0m %(message)s"

def __init__(self, fmt=default):
"""Initialize."""
logging.Formatter.__init__(self, fmt)

def format(self, record):
"""Format record."""
format_orig = self._style._fmt
if record.levelno == logging.DEBUG:
self._style._fmt = ColoredFormatter.debug_fmt
elif record.levelno == logging.INFO:
self._style._fmt = ColoredFormatter.info_fmt
elif record.levelno == logging.WARNING:
self._style._fmt = ColoredFormatter.warning_fmt
elif record.levelno == logging.ERROR:
self._style._fmt = ColoredFormatter.error_fmt
result = logging.Formatter.format(self, record)
self._style._fmt = format_orig
return result


def setup_logger():
"""Set up Test Harness logger."""
logger = logging.getLogger("harness")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
handler.setFormatter(ColoredFormatter())
logger.addHandler(handler)

return logger


def assert_no_level(logger, allowed_level, exceptions=0):
"""
Check that the logger has no records greater than
the allowed level.

Also has a parameter to specify the number of exceptions
to the rule (number of records that will be ignored).
"""
total = 0
for record in logger.records:
if record.levelno >= allowed_level:
total += 1
if total > exceptions:
raise Exception(f"Invalid Log Record: {record}")
80 changes: 80 additions & 0 deletions tests/helpers/mock_responses.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
"""Mock Test Responses."""

kp_response = {
"message": {
"query_graph": {
"nodes": {
"n0": {"ids": ["MESH:D008687"]},
"n1": {"categories": ["biolink:Disease"]},
},
"edges": {
"n0n1": {
"subject": "n0",
"object": "n1",
"predicates": ["biolink:treats"],
}
},
},
"knowledge_graph": {
"nodes": {
"MESH:D008687": {
"categories": ["biolink:SmallMolecule"],
"name": "Metformin",
"attributes": [],
},
"MONDO:0005148": {
"categories": [
"biolink:Disease",
],
"name": "type 2 diabetes mellitus",
"attributes": [],
},
},
"edges": {
"n0n1": {
"subject": "MESH:D008687",
"object": "MONDO:0005148",
"predicate": "biolink:treats",
"sources": [
{
"resource_id": "infores:kp0",
"resource_role": "primary_knowledge_source",
}
],
"attributes": [],
},
},
},
"results": [
{
"node_bindings": {
"n0": [
{
"id": "MESH:D008687",
"attributes": [],
},
],
"n1": [
{
"id": "MONDO:0005148",
"attributes": [],
},
],
},
"analyses": [
{
"resource_id": "kp0",
"edge_bindings": {
"n0n1": [
{
"id": "n0n1",
"attributes": [],
},
],
},
}
],
},
],
},
}
69 changes: 69 additions & 0 deletions tests/helpers/mocks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
from test_harness.reporter import Reporter
from test_harness.slacker import Slacker
from test_harness.runner.query_runner import QueryRunner


class MockReporter(Reporter):
def __init__(self, base_url=None, refresh_token=None, logger=None):
super().__init__()
self.base_path = base_url
self.test_run_id = 1
pass

async def get_auth(self):
pass

async def create_test_run(self, test_env, suite_name):
return 1

async def create_test(self, test, asset):
return 2

async def upload_labels(self, test_id, labels):
pass

async def upload_logs(self, test_id, logs):
pass

async def upload_artifact_references(self, test_id, artifact_references):
pass

async def upload_screenshots(self, test_id, screenshot):
pass

async def upload_log(self, test_id, message):
pass

async def finish_test(self, test_id, result):
return result

async def finish_test_run(self):
pass


class MockSlacker(Slacker):
def __init__(self):
pass

async def post_notification(self, messages=[]):
print(f"posting messages: {messages}")
pass

async def upload_test_results_file(self, filename, extension, results):
pass


class MockQueryRunner(QueryRunner):
async def retrieve_registry(self, trapi_version: str):
self.registry = {
"staging": {
"ars": [
{
"_id": "testing",
"title": "Tester",
"infores": "infores:tester",
"url": "http://tester",
}
],
},
}
44 changes: 0 additions & 44 deletions tests/mocker.py

This file was deleted.

9 changes: 4 additions & 5 deletions tests/test_main.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import pytest

from test_harness.main import main
from .example_tests import example_test_cases

from .mocker import (
from .helpers.example_tests import example_test_cases
from .helpers.mocks import (
MockReporter,
MockSlacker,
)
Expand All @@ -13,17 +13,16 @@
async def test_main(mocker):
"""Test the main function."""
# This article is awesome: https://nedbatchelder.com/blog/201908/why_your_mock_doesnt_work.html
run_ars_test = mocker.patch("test_harness.run.run_ars_test", return_value="Fail")
run_tests = mocker.patch("test_harness.main.run_tests", return_value={})
mocker.patch("test_harness.slacker.Slacker", return_value=MockSlacker())
mocker.patch("test_harness.main.Slacker", return_value=MockSlacker())
mocker.patch("test_harness.main.Reporter", return_value=MockReporter())
await main(
{
"tests": example_test_cases,
"suite": "testing",
"save_to_dashboard": False,
"json_output": False,
"log_level": "ERROR",
}
)
# run_ui_test.assert_called_once()
run_tests.assert_called_once()
Loading
Loading