Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

24.07.25 #339

Merged
merged 4 commits into from
Jul 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
88 changes: 86 additions & 2 deletions biocompute/apis.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
bulk_response_constructor,
response_status,
)
from deepdiff import DeepDiff
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from django.conf import settings
Expand All @@ -34,7 +35,7 @@
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from tests.fixtures.testing_bcos import BCO_000001_DRAFT
from tests.fixtures.testing_bcos import BCO_000001_DRAFT, BCO_000000_DRAFT

hostname = settings.PUBLIC_HOSTNAME
BASE_DIR = settings.BASE_DIR
Expand Down Expand Up @@ -198,7 +199,7 @@ class PublishBcoApi(APIView):
"""

permission_classes = [IsAuthenticated]
# swagger_schema = None
swagger_schema = None
#TODO: Add Swaggar docs
# schema = jsonref.load_uri(
# f"file://{BASE_DIR}/config/IEEE/2791object.json"
Expand Down Expand Up @@ -860,3 +861,86 @@ def get(self, request, bco_accession, bco_version):

bco_counter_increment(bco_instance)
return Response(status=status.HTTP_200_OK, data=bco_instance.contents)

class CompareBcoApi(APIView):
"""Bulk Compare BCOs [Bulk Enabled]

--------------------

Bulk operation to compare BCOs.

```JSON
[
{...BCO CONTENTS...},
{...BCO CONTENTS...}
]

"""

authentication_classes = []
permission_classes = [AllowAny]

@swagger_auto_schema(
operation_id="api_bco_compare",
request_body=openapi.Schema(
type=openapi.TYPE_ARRAY,
title="Bulk Compare BCOs",
items=openapi.Schema(
type=openapi.TYPE_ARRAY,
example=[BCO_000000_DRAFT, BCO_000001_DRAFT],
items=openapi.Schema(
type=openapi.TYPE_OBJECT,
required=["contents"],
description="Contents of the BCO.",
)
),
description="Compare one BCO against another.",
),
responses={
200: "All BCO comparisons are successful.",
207: "Some or all BCO comparisons failed. Each object submitted"
" will have it's own response object with it's own status"
" message:\n",
400: "Bad request."
},
tags=["BCO Management"],
)
def post(self, request):
validator = BcoValidator()
response_data = []
rejected_requests = False
accepted_requests = True
data = request.data

for index, comparison in enumerate(data):
new_bco, old_bco = comparison
identifier = new_bco["object_id"]+ " vs " + old_bco["object_id"]

# new_results = validator.parse_and_validate(bco=new_bco)
# old_results = validator.parse_and_validate(bco=old_bco)
# import pdb; pdb.set_trace()
# new_identifier, new_results = new_results.popitem()
# old_identifier, old_results = bco_results.popitem()

# if results["number_of_errors"] > 0:
# rejected_requests = True
# bco_status = "FAILED"
# status_code = 400
# message = "BCO not valid"

# else:
# accepted_requests = True
# bco_status = "SUCCESS"
# status_code = 200
# message = "BCO valid"

response_data.append(bulk_response_constructor(
identifier = identifier,
status="SUCCESS",
code=200,
# message=message,
data=DeepDiff(new_bco, old_bco).to_json()
))

status_code = response_status(accepted_requests, rejected_requests)
return Response(status=status_code, data=response_data)
28 changes: 25 additions & 3 deletions biocompute/services.py
Original file line number Diff line number Diff line change
Expand Up @@ -608,14 +608,36 @@ def bco_score(bco_instance: Bco) -> Bco:
if "usability_domain" not in contents:
bco_instance.score = 0
return bco_instance

try:
usability_domain_length = sum(len(s) for s in contents['usability_domain'])
score = {"usability_domain_length": usability_domain_length}
except TypeError:
score = {"usability_domain_length": 0}
usability_domain_length = 0

bco_instance.score = usability_domain_length

# Calculate the base score
base_score = usability_domain_length

# Apply the field length modifier
field_length_modifier = 1.2
base_score *= field_length_modifier

# Check for the existence of the error domain
error_domain_exists = "error_domain" in contents
if error_domain_exists:
base_score += 5

# Apply the parametric object multiplier
parametric_object_count = len(contents.get('parametric_objects', []))
parametric_object_multiplier = 1.1
base_score *= (parametric_object_multiplier ** parametric_object_count)

# Add score for each reviewer object (up to 5)
reviewer_object_count = min(5, len(contents.get('reviewer_objects', [])))
base_score += reviewer_object_count

# Finalize the score
bco_instance.score = base_score

return bco_instance
2 changes: 2 additions & 0 deletions biocompute/urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
DraftsPublishApi,
PublishBcoApi,
ValidateBcoApi,
CompareBcoApi,
)

urlpatterns = [
Expand All @@ -17,4 +18,5 @@
path("objects/drafts/publish/", DraftsPublishApi.as_view()),
path("objects/validate/", ValidateBcoApi.as_view()),
path("objects/publish/", PublishBcoApi.as_view()),
path("objects/compare/", CompareBcoApi.as_view()),
]
Loading
Loading