Skip to content

Commit

Permalink
Merge branch 'dev' into feat_1147_interoception
Browse files Browse the repository at this point in the history
  • Loading branch information
saskiad authored Jan 5, 2025
2 parents f481db5 + 13a9c9a commit 4dec699
Show file tree
Hide file tree
Showing 7 changed files with 58 additions and 13 deletions.
11 changes: 7 additions & 4 deletions examples/quality_control.json
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,8 @@
"tags": null,
"notes": "",
"allow_failed_metrics": false,
"latest_status": "Pending"
"latest_status": "Pending",
"created": "2022-11-22T00:00:00Z"
},
{
"modality": {
Expand Down Expand Up @@ -140,9 +141,10 @@
}
],
"tags": null,
"notes": null,
"notes": "Pass when video_1_num_frames==video_2_num_frames",
"allow_failed_metrics": false,
"latest_status": "Pass"
"latest_status": "Pass",
"created": "2022-11-22T00:00:00Z"
},
{
"modality": {
Expand Down Expand Up @@ -199,7 +201,8 @@
"tags": null,
"notes": null,
"allow_failed_metrics": false,
"latest_status": "Pass"
"latest_status": "Pass",
"created": "2022-11-22T00:00:00Z"
}
],
"notes": null
Expand Down
4 changes: 4 additions & 0 deletions examples/quality_control.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@
QCMetric(name="Probe C drift", value="Low", reference="ecephys-drift-map", status_history=[s]),
],
notes="",
created=t,
)

eval1 = QCEvaluation(
Expand All @@ -85,6 +86,8 @@
status_history=[s],
),
],
notes="Pass when video_1_num_frames==video_2_num_frames",
created=t,
)

eval2 = QCEvaluation(
Expand All @@ -97,6 +100,7 @@
QCMetric(name="ProbeB", value=True, status_history=[s]),
QCMetric(name="ProbeC", value=True, status_history=[s]),
],
created=t,
)

q = QualityControl(evaluations=[eval0, eval1, eval2])
Expand Down
8 changes: 8 additions & 0 deletions src/aind_data_schema/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import json
import re
import logging
from pathlib import Path
from typing import Any, Generic, Optional, TypeVar, get_args

Expand All @@ -23,6 +24,9 @@
from aind_data_schema_models.brain_atlas import CCFStructure


MAX_FILE_SIZE = 500 * 1024 # 500KB


def _coerce_naive_datetime(v: Any, handler: ValidatorFunctionWrapHandler) -> AwareDatetime:
"""Validator to wrap around AwareDatetime to set a default timezone as user's locale"""
try:
Expand Down Expand Up @@ -178,3 +182,7 @@ def write_standard_file(

with open(filename, "w") as f:
f.write(self.model_dump_json(indent=3))

# Check that size doesn't exceed the maximum
if len(self.model_dump_json(indent=3)) > MAX_FILE_SIZE:
logging.warning(f"File size exceeds {MAX_FILE_SIZE / 1024} KB: {filename}")
1 change: 1 addition & 0 deletions src/aind_data_schema/components/devices.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,6 +250,7 @@ class LickSensorType(str, Enum):
"""Type of lick sensor"""

CAPACITIVE = "Capacitive"
CONDUCTIVE = "Conductive"
PIEZOELECTIC = "Piezoelectric"


Expand Down
13 changes: 11 additions & 2 deletions src/aind_data_schema/core/quality_control.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,9 @@ class QCEvaluation(AindModel):
),
)
latest_status: Status = Field(default=None, title="Evaluation status")
created: AwareDatetimeWithDefault = Field(
default_factory=lambda: datetime.now(tz=timezone.utc), title="Evaluation creation date"
)

def status(self, date: datetime = datetime.now(tz=timezone.utc)) -> Status:
"""DEPRECATED
Expand Down Expand Up @@ -132,7 +135,7 @@ def compute_latest_status(self):
self.latest_status = self.evaluate_status()
return self

def evaluate_status(self, date: datetime = datetime.now(tz=timezone.utc)) -> Status:
def evaluate_status(self, date: Optional[datetime] = None) -> Status:
"""Loop through all metrics and return the evaluation's status
Any fail -> FAIL
Expand All @@ -144,6 +147,9 @@ def evaluate_status(self, date: datetime = datetime.now(tz=timezone.utc)) -> Sta
Status
Current status of the evaluation
"""
if not date:
date = datetime.now(tz=timezone.utc)

latest_metric_statuses = []

for metric in self.metrics:
Expand Down Expand Up @@ -202,14 +208,17 @@ def status(
modality: Union[Modality.ONE_OF, List[Modality.ONE_OF], None] = None,
stage: Union[Stage, List[Stage], None] = None,
tag: Union[str, List[str], None] = None,
date: datetime = datetime.now(tz=timezone.utc),
date: Optional[datetime] = None,
) -> Status:
"""Loop through all evaluations and return the overall status
Any FAIL -> FAIL
If no fails, then any PENDING -> PENDING
All PASS -> PASS
"""
if not date:
date = datetime.now(tz=timezone.utc)

if not modality and not stage and not tag:
eval_statuses = [evaluation.evaluate_status(date=date) for evaluation in self.evaluations]
else:
Expand Down
23 changes: 22 additions & 1 deletion tests/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,14 @@
from pydantic import ValidationError, create_model, SkipValidation
from typing import Literal

from aind_data_schema.base import AindGeneric, AwareDatetimeWithDefault, is_dict_corrupt, AindModel, AindCoreModel
from aind_data_schema.base import (
AindGeneric,
AwareDatetimeWithDefault,
is_dict_corrupt,
AindModel,
AindCoreModel,
MAX_FILE_SIZE,
)
from aind_data_schema.core.subject import Subject
from aind_data_schema_models.brain_atlas import CCFStructure

Expand Down Expand Up @@ -151,6 +158,20 @@ class Modelv2(AindCoreModel):
# this is to ensure you can't get a bumped schema_version without passing validation
self.assertRaises(ValidationError, lambda: Modelv1(**v2_from_v1.model_dump()))

@patch("builtins.open", new_callable=mock_open)
@patch("logging.warning")
def test_write_standard_file_size_warning(self, mock_logging_warning: MagicMock, mock_open: MagicMock):
"""Tests that a warning is logged if the file size exceeds MAX_FILE_SIZE"""

s = Subject.model_construct()
s.subject_id = "s" * (MAX_FILE_SIZE + 1000)
s.write_standard_file(output_directory=Path("dir"), suffix=".foo.bar")

mock_open.assert_has_calls([call(Path("dir/subject.foo.bar"), "w")])
mock_logging_warning.assert_called_once_with(
f"File size exceeds {MAX_FILE_SIZE / 1024} KB: dir/subject.foo.bar"
)


if __name__ == "__main__":
unittest.main()
11 changes: 5 additions & 6 deletions tests/test_quality_control.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def test_constructors(self):
stage=Stage.PROCESSING,
metrics=[
QCMetric(
name="Multiple values example",
name="Dict example",
value={"stuff": "in_a_dict"},
status_history=[
QCStatus(evaluator="Bob", timestamp=datetime.fromisoformat("2020-10-10"), status=Status.PASS)
Expand Down Expand Up @@ -56,7 +56,7 @@ def test_overall_status(self):
stage=Stage.PROCESSING,
metrics=[
QCMetric(
name="Multiple values example",
name="Dict example",
value={"stuff": "in_a_dict"},
status_history=[
QCStatus(evaluator="Bob", timestamp=datetime.fromisoformat("2020-10-10"), status=Status.PASS)
Expand Down Expand Up @@ -298,7 +298,7 @@ def test_multi_session(self):
stage=Stage.PROCESSING,
metrics=[
QCMetric(
name="Multiple values example",
name="Dict example",
value={"stuff": "in_a_dict"},
status_history=[
QCStatus(evaluator="Automated", timestamp=t0, status=Status.PASS),
Expand All @@ -318,7 +318,7 @@ def test_multi_session(self):
stage=Stage.PROCESSING,
metrics=[
QCMetric(
name="Multiple values example",
name="Dict with evaluated assets list",
value={"stuff": "in_a_dict"},
status_history=[
QCStatus(evaluator="Automated", timestamp=t0, status=Status.PASS),
Expand All @@ -328,7 +328,6 @@ def test_multi_session(self):
],
)

print(context.exception)
self.assertTrue(
"is in a single-asset QCEvaluation and should not have evaluated_assets" in repr(context.exception)
)
Expand All @@ -341,7 +340,7 @@ def test_multi_session(self):
stage=Stage.MULTI_ASSET,
metrics=[
QCMetric(
name="Multiple values example",
name="Missing evaluated assets",
value={"stuff": "in_a_dict"},
status_history=[
QCStatus(evaluator="Automated", timestamp=t0, status=Status.PASS),
Expand Down

0 comments on commit 4dec699

Please sign in to comment.