Moments before disaster.

This commit is contained in:
lwark
2025-01-16 08:36:15 -06:00
parent 5cded949ed
commit bf711369c6
21 changed files with 541 additions and 368 deletions

View File

@@ -1,3 +1,4 @@
- [ ] Can my "to_dict", "to_sub_dict", "to_pydantic" methods be rewritten as properties?
- [ ] Stop displacing date on Irida controls and just do what Turnaround time does. - [ ] Stop displacing date on Irida controls and just do what Turnaround time does.
- [ ] Get Manager window working for KitType, maybe SubmissionType - [ ] Get Manager window working for KitType, maybe SubmissionType
- [x] Find a way to merge AddEdit with ReagentAdder - [x] Find a way to merge AddEdit with ReagentAdder

View File

@@ -20,11 +20,11 @@ def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor() cursor = dbapi_connection.cursor()
if ctx.database_schema == "sqlite": if ctx.database_schema == "sqlite":
execution_phrase = "PRAGMA foreign_keys=ON" execution_phrase = "PRAGMA foreign_keys=ON"
print(f"Executing '{execution_phrase}' in sql.")
else: else:
# print("Nothing to execute, returning") # print("Nothing to execute, returning")
cursor.close() cursor.close()
return return
print(f"Executing '{execution_phrase}' in sql.")
cursor.execute(execution_phrase) cursor.execute(execution_phrase)
cursor.close() cursor.close()
@@ -33,6 +33,17 @@ from .models import *
def update_log(mapper, connection, target): def update_log(mapper, connection, target):
"""
Updates log table whenever an object with LogMixin is updated.
Args:
mapper ():
connection ():
target ():
Returns:
None
"""
state = inspect(target) state = inspect(target)
object_name = state.object.truncated_name object_name = state.object.truncated_name
update = dict(user=getuser(), time=datetime.now(), object=object_name, changes=[]) update = dict(user=getuser(), time=datetime.now(), object=object_name, changes=[])
@@ -43,6 +54,7 @@ def update_log(mapper, connection, target):
if attr.key == "custom": if attr.key == "custom":
continue continue
added = [str(item) for item in hist.added] added = [str(item) for item in hist.added]
# NOTE: Attributes left out to save space
if attr.key in ['artic_technician', 'submission_sample_associations', 'submission_reagent_associations', if attr.key in ['artic_technician', 'submission_sample_associations', 'submission_reagent_associations',
'submission_equipment_associations', 'submission_tips_associations', 'contact_id', 'gel_info', 'submission_equipment_associations', 'submission_tips_associations', 'contact_id', 'gel_info',
'gel_controls', 'source_plates']: 'gel_controls', 'source_plates']:

View File

@@ -175,7 +175,7 @@ class BaseClass(Base):
try: try:
records = [obj.to_sub_dict(**kwargs) for obj in objects] records = [obj.to_sub_dict(**kwargs) for obj in objects]
except AttributeError: except AttributeError:
records = [obj.to_omnigui_dict() for obj in objects] records = [obj.omnigui_dict for obj in objects]
return DataFrame.from_records(records) return DataFrame.from_records(records)
@classmethod @classmethod
@@ -241,7 +241,8 @@ class BaseClass(Base):
report.add_result(Result(msg=e, status="Critical")) report.add_result(Result(msg=e, status="Critical"))
return report return report
def to_omnigui_dict(self) -> dict: @property
def omnigui_dict(self) -> dict:
""" """
For getting any object in an omni-thing friendly output. For getting any object in an omni-thing friendly output.
@@ -255,8 +256,8 @@ class BaseClass(Base):
pass pass
return dicto return dicto
@classmethod @classproperty
def get_pydantic_model(cls) -> BaseModel: def pydantic_model(cls) -> BaseModel:
""" """
Gets the pydantic model corresponding to this object. Gets the pydantic model corresponding to this object.
@@ -271,7 +272,7 @@ class BaseClass(Base):
return model return model
@classproperty @classproperty
def add_edit_tooltips(self) -> dict: def add_edit_tooltips(cls) -> dict:
""" """
Gets tooltips for Omni-add-edit Gets tooltips for Omni-add-edit

View File

@@ -81,7 +81,8 @@ class ControlType(BaseClass):
subtypes = sorted(list(jsoner[genera].keys()), reverse=True) subtypes = sorted(list(jsoner[genera].keys()), reverse=True)
return subtypes return subtypes
def get_instance_class(self) -> Control: @property
def instance_class(self) -> Control:
""" """
Retrieves the Control class associated with this controltype Retrieves the Control class associated with this controltype
@@ -314,7 +315,7 @@ class PCRControl(Control):
def to_sub_dict(self) -> dict: def to_sub_dict(self) -> dict:
""" """
Creates dictionary of fields for this object Creates dictionary of fields for this object.
Returns: Returns:
dict: Output dict of name, ct, subtype, target, reagent_lot and submitted_date dict: Output dict of name, ct, subtype, target, reagent_lot and submitted_date
@@ -471,8 +472,8 @@ class IridaControl(Control):
_dict[key] = data[genus][key] _dict[key] = data[genus][key]
yield _dict yield _dict
@classmethod @classproperty
def get_modes(cls) -> List[str]: def modes(cls) -> List[str]:
""" """
Get all control modes from database Get all control modes from database

View File

@@ -9,7 +9,7 @@ from sqlalchemy.orm import relationship, validates, Query
from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext.associationproxy import association_proxy
from datetime import date, datetime, timedelta from datetime import date, datetime, timedelta
from tools import check_authorization, setup_lookup, Report, Result, check_regex_match, yaml_regex_creator, timezone from tools import check_authorization, setup_lookup, Report, Result, check_regex_match, yaml_regex_creator, timezone
from typing import List, Literal, Generator, Any from typing import List, Literal, Generator, Any, Tuple
from pandas import ExcelFile from pandas import ExcelFile
from pathlib import Path from pathlib import Path
from . import Base, BaseClass, Organization, LogMixin from . import Base, BaseClass, Organization, LogMixin
@@ -157,30 +157,62 @@ class KitType(BaseClass):
else: else:
return (item.reagent_role for item in relevant_associations) return (item.reagent_role for item in relevant_associations)
def construct_xl_map_for_use(self, submission_type: str | SubmissionType) -> Generator[(str, str), None, None]: def construct_xl_map_for_use(self, submission_type: str | SubmissionType) -> Tuple[dict|None, KitType]:
""" """
Creates map of locations in Excel workbook for a SubmissionType Creates map of locations in Excel workbook for a SubmissionType
Args: Args:
new_kit ():
submission_type (str | SubmissionType): Submissiontype.name submission_type (str | SubmissionType): Submissiontype.name
Returns: Returns:
Generator[(str, str), None, None]: Tuple containing information locations. Generator[(str, str), None, None]: Tuple containing information locations.
""" """
new_kit = self
# NOTE: Account for submission_type variable type. # NOTE: Account for submission_type variable type.
match submission_type: match submission_type:
case str(): case str():
assocs = [item for item in self.kit_reagentrole_associations if # assocs = [item for item in self.kit_reagentrole_associations if
item.submission_type.name == submission_type] # item.submission_type.name == submission_type]
logger.debug(f"Query for {submission_type}")
submission_type = SubmissionType.query(name=submission_type)
case SubmissionType(): case SubmissionType():
assocs = [item for item in self.kit_reagentrole_associations if item.submission_type == submission_type] pass
case _: case _:
raise ValueError(f"Wrong variable type: {type(submission_type)} used!") raise ValueError(f"Wrong variable type: {type(submission_type)} used!")
for assoc in assocs: logger.debug(f"Submission type: {submission_type}, Kit: {self}")
try: assocs = [item for item in self.kit_reagentrole_associations if item.submission_type == submission_type]
yield assoc.reagent_role.name, assoc.uses logger.debug(f"Associations: {assocs}")
except TypeError: # NOTE: rescue with submission type's default kit.
continue if not assocs:
logger.error(
f"No associations found with {self}. Attempting rescue with default kit: {submission_type.default_kit}")
new_kit = submission_type.default_kit
if not new_kit:
from frontend.widgets.pop_ups import ObjectSelector
dlg = ObjectSelector(
title="Select Kit",
message="Could not find reagents for this submission type/kit type combo.\nSelect new kit.",
obj_type=self.__class__,
values=[kit.name for kit in submission_type.kit_types]
)
if dlg.exec():
dlg_result = dlg.parse_form()
logger.debug(f"Dialog result: {dlg_result}")
new_kit = self.__class__.query(name=dlg_result)
logger.debug(f"Query result: {new_kit}")
# return new_kit.construct_xl_map_for_use(submission_type=submission_type)
else:
return None, new_kit
assocs = [item for item in new_kit.kit_reagentrole_associations if item.submission_type == submission_type]
# for assoc in assocs:
# try:
# yield assoc.reagent_role.name, assoc.uses
# except TypeError:
# continue
output = {assoc.reagent_role.name: assoc.uses for assoc in assocs}
logger.debug(f"Output: {output}")
return output, new_kit
@classmethod @classmethod
@setup_lookup @setup_lookup
@@ -444,7 +476,7 @@ class Reagent(BaseClass, LogMixin):
Concrete reagent instance Concrete reagent instance
""" """
searchables = ["lot"] searchables = [dict(label="Lot", field="lot")]
id = Column(INTEGER, primary_key=True) #: primary key id = Column(INTEGER, primary_key=True) #: primary key
role = relationship("ReagentRole", back_populates="instances", role = relationship("ReagentRole", back_populates="instances",
@@ -548,7 +580,9 @@ class Reagent(BaseClass, LogMixin):
def query_or_create(cls, **kwargs) -> Reagent: def query_or_create(cls, **kwargs) -> Reagent:
from backend.validators.pydant import PydReagent from backend.validators.pydant import PydReagent
new = False new = False
instance = cls.query(**kwargs) disallowed = ['expiry']
sanitized_kwargs = {k:v for k,v in kwargs.items() if k not in disallowed}
instance = cls.query(**sanitized_kwargs)
if not instance or isinstance(instance, list): if not instance or isinstance(instance, list):
if "role" not in kwargs: if "role" not in kwargs:
try: try:
@@ -557,7 +591,7 @@ class Reagent(BaseClass, LogMixin):
pass pass
instance = PydReagent(**kwargs) instance = PydReagent(**kwargs)
new = True new = True
instance, _ = instance.toSQL() instance = instance.to_sql()
logger.info(f"Instance from query or create: {instance}") logger.info(f"Instance from query or create: {instance}")
return instance, new return instance, new
@@ -644,38 +678,15 @@ class Reagent(BaseClass, LogMixin):
except AttributeError as e: except AttributeError as e:
logger.error(f"Could not set {key} due to {e}") logger.error(f"Could not set {key} due to {e}")
@check_authorization @check_authorization
def edit_from_search(self, obj, **kwargs): def edit_from_search(self, obj, **kwargs):
from frontend.widgets.omni_add_edit import AddEdit from frontend.widgets.omni_add_edit import AddEdit
role = ReagentRole.query(kwargs['role'])
if role:
role_name = role.name
else:
role_name = None
# dlg = AddReagentForm(reagent_lot=self.lot, reagent_role=role_name, expiry=self.expiry, reagent_name=self.name)
dlg = AddEdit(parent=None, instance=self) dlg = AddEdit(parent=None, instance=self)
if dlg.exec(): if dlg.exec():
pyd = dlg.parse_form() pyd = dlg.parse_form()
for field in pyd.model_fields: for field in pyd.model_fields:
self.set_attribute(field, pyd.__getattribute__(field)) self.set_attribute(field, pyd.__getattribute__(field))
# for key, value in vars.items():
# match key:
# case "expiry":
# if isinstance(value, str):
# field_value = datetime.strptime(value, "%Y-%m-%d")
# elif isinstance(value, date):
# field_value = datetime.combine(value, datetime.max.time())
# else:
# field_value = value
# field_value.replace(tzinfo=timezone)
# case "role":
# continue
# case _:
# field_value = value
# self.__setattr__(key, field_value)
self.save() self.save()
# print(self.__dict__)
@classproperty @classproperty
def add_edit_tooltips(self): def add_edit_tooltips(self):
@@ -801,8 +812,8 @@ class SubmissionType(BaseClass):
""" """
return f"<SubmissionType({self.name})>" return f"<SubmissionType({self.name})>"
@classmethod @classproperty
def retrieve_template_file(cls) -> bytes: def basic_template(cls) -> bytes:
""" """
Grabs the default excel template file. Grabs the default excel template file.
@@ -812,7 +823,8 @@ class SubmissionType(BaseClass):
submission_type = cls.query(name="Bacterial Culture") submission_type = cls.query(name="Bacterial Culture")
return submission_type.template_file return submission_type.template_file
def get_template_file_sheets(self) -> List[str]: @property
def template_file_sheets(self) -> List[str]:
""" """
Gets names of sheet in the stored blank form. Gets names of sheet in the stored blank form.
@@ -870,15 +882,6 @@ class SubmissionType(BaseClass):
output['custom'] = self.info_map['custom'] output['custom'] = self.info_map['custom']
return output return output
def construct_sample_map(self) -> dict:
"""
Returns sample map
Returns:
dict: sample location map
"""
return self.sample_map
def construct_field_map(self, field: Literal['equipment', 'tip']) -> Generator[(str, dict), None, None]: def construct_field_map(self, field: Literal['equipment', 'tip']) -> Generator[(str, dict), None, None]:
""" """
Make a map of all locations for tips or equipment. Make a map of all locations for tips or equipment.
@@ -895,7 +898,8 @@ class SubmissionType(BaseClass):
fmap = {} fmap = {}
yield getattr(item, f"{field}_role").name, fmap yield getattr(item, f"{field}_role").name, fmap
def get_default_kit(self) -> KitType | None: @property
def default_kit(self) -> KitType | None:
""" """
If only one kits exists for this Submission Type, return it. If only one kits exists for this Submission Type, return it.
@@ -941,7 +945,8 @@ class SubmissionType(BaseClass):
raise TypeError(f"Type {type(equipment_role)} is not allowed") raise TypeError(f"Type {type(equipment_role)} is not allowed")
return list(set([item for items in relevant for item in items if item is not None])) return list(set([item for items in relevant for item in items if item is not None]))
def get_submission_class(self) -> "BasicSubmission": @property
def submission_class(self) -> "BasicSubmission":
""" """
Gets submission class associated with this submission type. Gets submission class associated with this submission type.
@@ -993,7 +998,8 @@ class SubmissionType(BaseClass):
base_dict = dict(name=self.name) base_dict = dict(name=self.name)
base_dict['info'] = self.construct_info_map(mode='export') base_dict['info'] = self.construct_info_map(mode='export')
base_dict['defaults'] = self.defaults base_dict['defaults'] = self.defaults
base_dict['samples'] = self.construct_sample_map() # base_dict['samples'] = self.construct_sample_map()
base_dict['samples'] = self.sample_map
base_dict['kits'] = [item.to_export_dict() for item in self.submissiontype_kit_associations] base_dict['kits'] = [item.to_export_dict() for item in self.submissiontype_kit_associations]
return base_dict return base_dict
@@ -1413,7 +1419,8 @@ class Equipment(BaseClass, LogMixin):
return {k: v for k, v in self.__dict__.items()} return {k: v for k, v in self.__dict__.items()}
def get_processes(self, submission_type: str | SubmissionType | None = None, def get_processes(self, submission_type: str | SubmissionType | None = None,
extraction_kit: str | KitType | None = None) -> List[str]: extraction_kit: str | KitType | None = None,
equipment_role: str | EquipmentRole | None=None) -> List[str]:
""" """
Get all processes associated with this Equipment for a given SubmissionType Get all processes associated with this Equipment for a given SubmissionType
@@ -1433,6 +1440,8 @@ class Equipment(BaseClass, LogMixin):
continue continue
if extraction_kit and extraction_kit not in process.kit_types: if extraction_kit and extraction_kit not in process.kit_types:
continue continue
if equipment_role and equipment_role not in process.equipment_roles:
continue
yield process yield process
@classmethod @classmethod
@@ -1489,12 +1498,12 @@ class Equipment(BaseClass, LogMixin):
PydEquipment: pydantic equipment object PydEquipment: pydantic equipment object
""" """
from backend.validators.pydant import PydEquipment from backend.validators.pydant import PydEquipment
processes = self.get_processes(submission_type=submission_type, extraction_kit=extraction_kit) processes = self.get_processes(submission_type=submission_type, extraction_kit=extraction_kit, equipment_role=role)
return PydEquipment(processes=processes, role=role, return PydEquipment(processes=processes, role=role,
**self.to_dict(processes=False)) **self.to_dict(processes=False))
@classmethod @classproperty
def get_regex(cls) -> re.Pattern: def manufacturer_regex(cls) -> re.Pattern:
""" """
Creates regex to determine tip manufacturer Creates regex to determine tip manufacturer
@@ -1809,6 +1818,9 @@ class Process(BaseClass):
def query(cls, def query(cls,
name: str | None = None, name: str | None = None,
id: int | None = None, id: int | None = None,
submission_type: str | SubmissionType | None = None,
extraction_kit : str | KitType | None = None,
equipment_role: str | KitType | None = None,
limit: int = 0) -> Process | List[Process]: limit: int = 0) -> Process | List[Process]:
""" """
Lookup Processes Lookup Processes
@@ -1822,6 +1834,30 @@ class Process(BaseClass):
Process|List[Process]: Process(es) matching criteria Process|List[Process]: Process(es) matching criteria
""" """
query = cls.__database_session__.query(cls) query = cls.__database_session__.query(cls)
match submission_type:
case str():
submission_type = SubmissionType.query(name=submission_type)
query = query.filter(cls.submission_types.contains(submission_type))
case SubmissionType():
query = query.filter(cls.submission_types.contains(submission_type))
case _:
pass
match extraction_kit:
case str():
extraction_kit = KitType.query(name=extraction_kit)
query = query.filter(cls.kit_types.contains(extraction_kit))
case KitType():
query = query.filter(cls.kit_types.contains(extraction_kit))
case _:
pass
match equipment_role:
case str():
equipment_role = EquipmentRole.query(name=equipment_role)
query = query.filter(cls.equipment_roles.contains(equipment_role))
case EquipmentRole():
query = query.filter(cls.equipment_roles.contains(equipment_role))
case _:
pass
match name: match name:
case str(): case str():
query = query.filter(cls.name == name) query = query.filter(cls.name == name)
@@ -1975,6 +2011,14 @@ class SubmissionTipsAssociation(BaseClass):
query = query.filter(cls.role_name == role) query = query.filter(cls.role_name == role)
return cls.execute_query(query=query, limit=limit, **kwargs) return cls.execute_query(query=query, limit=limit, **kwargs)
@classmethod
def query_or_create(cls, tips, submission, role: str, **kwargs):
instance = cls.query(tip_id=tips.id, role=role, submission_id=submission.id, limit=1, **kwargs)
if instance is None:
instance = SubmissionTipsAssociation(submission=submission, tips=tips, role_name=role)
return instance
def to_pydantic(self): def to_pydantic(self):
from backend.validators import PydTips from backend.validators import PydTips
return PydTips(name=self.tips.name, lot=self.tips.lot, role=self.role_name) return PydTips(name=self.tips.name, lot=self.tips.lot, role=self.role_name)

View File

@@ -2,8 +2,6 @@
Models for the main submission and sample types. Models for the main submission and sample types.
""" """
from __future__ import annotations from __future__ import annotations
from collections import OrderedDict
from copy import deepcopy from copy import deepcopy
from getpass import getuser from getpass import getuser
import logging, uuid, tempfile, re, base64, numpy as np, pandas as pd, types, sys import logging, uuid, tempfile, re, base64, numpy as np, pandas as pd, types, sys
@@ -12,7 +10,7 @@ from zipfile import ZipFile, BadZipfile
from tempfile import TemporaryDirectory, TemporaryFile from tempfile import TemporaryDirectory, TemporaryFile
from operator import itemgetter from operator import itemgetter
from pprint import pformat from pprint import pformat
from . import BaseClass, Reagent, SubmissionType, KitType, Organization, Contact, LogMixin from . import BaseClass, Reagent, SubmissionType, KitType, Organization, Contact, LogMixin, SubmissionReagentAssociation
from sqlalchemy import Column, String, TIMESTAMP, INTEGER, ForeignKey, JSON, FLOAT, case, func from sqlalchemy import Column, String, TIMESTAMP, INTEGER, ForeignKey, JSON, FLOAT, case, func
from sqlalchemy.orm import relationship, validates, Query from sqlalchemy.orm import relationship, validates, Query
from sqlalchemy.orm.attributes import flag_modified from sqlalchemy.orm.attributes import flag_modified
@@ -25,13 +23,15 @@ from openpyxl.drawing.image import Image as OpenpyxlImage
from tools import row_map, setup_lookup, jinja_template_loading, rreplace, row_keys, check_key_or_attr, Result, Report, \ from tools import row_map, setup_lookup, jinja_template_loading, rreplace, row_keys, check_key_or_attr, Result, Report, \
report_result, create_holidays_for_year report_result, create_holidays_for_year
from datetime import datetime, date, timedelta from datetime import datetime, date, timedelta
from typing import List, Any, Tuple, Literal, Generator from typing import List, Any, Tuple, Literal, Generator, Type
from dateutil.parser import parse from dateutil.parser import parse
from pathlib import Path from pathlib import Path
from jinja2.exceptions import TemplateNotFound from jinja2.exceptions import TemplateNotFound
from jinja2 import Template from jinja2 import Template
from PIL import Image from PIL import Image
logger = logging.getLogger(f"submissions.{__name__}") logger = logging.getLogger(f"submissions.{__name__}")
@@ -126,7 +126,7 @@ class BasicSubmission(BaseClass, LogMixin):
def __repr__(self) -> str: def __repr__(self) -> str:
return f"<Submission({self.rsl_plate_num})>" return f"<Submission({self.rsl_plate_num})>"
@classmethod @classproperty
def jsons(cls) -> List[str]: def jsons(cls) -> List[str]:
""" """
Get list of JSON db columns Get list of JSON db columns
@@ -136,10 +136,10 @@ class BasicSubmission(BaseClass, LogMixin):
""" """
output = [item.name for item in cls.__table__.columns if isinstance(item.type, JSON)] output = [item.name for item in cls.__table__.columns if isinstance(item.type, JSON)]
if issubclass(cls, BasicSubmission) and not cls.__name__ == "BasicSubmission": if issubclass(cls, BasicSubmission) and not cls.__name__ == "BasicSubmission":
output += BasicSubmission.jsons() output += BasicSubmission.jsons
return output return output
@classmethod @classproperty
def timestamps(cls) -> List[str]: def timestamps(cls) -> List[str]:
""" """
Get list of TIMESTAMP columns Get list of TIMESTAMP columns
@@ -149,7 +149,7 @@ class BasicSubmission(BaseClass, LogMixin):
""" """
output = [item.name for item in cls.__table__.columns if isinstance(item.type, TIMESTAMP)] output = [item.name for item in cls.__table__.columns if isinstance(item.type, TIMESTAMP)]
if issubclass(cls, BasicSubmission) and not cls.__name__ == "BasicSubmission": if issubclass(cls, BasicSubmission) and not cls.__name__ == "BasicSubmission":
output += BasicSubmission.timestamps() output += BasicSubmission.timestamps
return output return output
@classmethod @classmethod
@@ -259,7 +259,8 @@ class BasicSubmission(BaseClass, LogMixin):
Returns: Returns:
dict: sample location map dict: sample location map
""" """
return cls.get_submission_type(submission_type).construct_sample_map() # return cls.get_submission_type(submission_type).construct_sample_map()
return cls.get_submission_type(submission_type).sample_map
def generate_associations(self, name: str, extra: str | None = None): def generate_associations(self, name: str, extra: str | None = None):
try: try:
@@ -277,6 +278,7 @@ class BasicSubmission(BaseClass, LogMixin):
Constructs dictionary used in submissions summary Constructs dictionary used in submissions summary
Args: Args:
report (bool, optional): indicates if to be used for a report. Defaults to False.
full_data (bool, optional): indicates if sample dicts to be constructed. Defaults to False. full_data (bool, optional): indicates if sample dicts to be constructed. Defaults to False.
backup (bool, optional): passed to adjust_to_dict_samples. Defaults to False. backup (bool, optional): passed to adjust_to_dict_samples. Defaults to False.
@@ -323,7 +325,8 @@ class BasicSubmission(BaseClass, LogMixin):
logger.error(f"We got an error retrieving reagents: {e}") logger.error(f"We got an error retrieving reagents: {e}")
reagents = [] reagents = []
finally: finally:
for k, v in self.extraction_kit.construct_xl_map_for_use(self.submission_type): dicto, _ = self.extraction_kit.construct_xl_map_for_use(self.submission_type)
for k, v in dicto.items():
if k == 'info': if k == 'info':
continue continue
if not any([item['role'] == k for item in reagents]): if not any([item['role'] == k for item in reagents]):
@@ -381,7 +384,8 @@ class BasicSubmission(BaseClass, LogMixin):
output["completed_date"] = self.completed_date output["completed_date"] = self.completed_date
return output return output
def calculate_column_count(self) -> int: @property
def column_count(self) -> int:
""" """
Calculate the number of columns in this submission Calculate the number of columns in this submission
@@ -391,13 +395,14 @@ class BasicSubmission(BaseClass, LogMixin):
columns = set([assoc.column for assoc in self.submission_sample_associations]) columns = set([assoc.column for assoc in self.submission_sample_associations])
return len(columns) return len(columns)
def calculate_base_cost(self):
def calculate_base_cost(self) -> None:
""" """
Calculates cost of the plate Calculates cost of the plate
""" """
# NOTE: Calculate number of columns based on largest column number # NOTE: Calculate number of columns based on largest column number
try: try:
cols_count_96 = self.calculate_column_count() cols_count_96 = self.column_count
except Exception as e: except Exception as e:
logger.error(f"Column count error: {e}") logger.error(f"Column count error: {e}")
# NOTE: Get kit associated with this submission # NOTE: Get kit associated with this submission
@@ -418,14 +423,15 @@ class BasicSubmission(BaseClass, LogMixin):
logger.error(f"Calculation error: {e}") logger.error(f"Calculation error: {e}")
self.run_cost = round(self.run_cost, 2) self.run_cost = round(self.run_cost, 2)
def hitpick_plate(self) -> list: @property
def hitpicked(self) -> list:
""" """
Returns positve sample locations for plate Returns positve sample locations for plate
Returns: Returns:
list: list of hitpick dictionaries for each sample list: list of hitpick dictionaries for each sample
""" """
output_list = [assoc.to_hitpick() for assoc in self.submission_sample_associations] output_list = [assoc.hitpicked for assoc in self.submission_sample_associations]
return output_list return output_list
@classmethod @classmethod
@@ -454,7 +460,8 @@ class BasicSubmission(BaseClass, LogMixin):
html = template.render(samples=output_samples, PLATE_ROWS=plate_rows, PLATE_COLUMNS=plate_columns) html = template.render(samples=output_samples, PLATE_ROWS=plate_rows, PLATE_COLUMNS=plate_columns)
return html + "<br/>" return html + "<br/>"
def get_used_equipment(self) -> List[str]: @property
def used_equipment(self) -> Generator[str, None, None]:
""" """
Gets EquipmentRole names associated with this BasicSubmission Gets EquipmentRole names associated with this BasicSubmission
@@ -490,6 +497,7 @@ class BasicSubmission(BaseClass, LogMixin):
'source_plates', 'pcr_technician', 'ext_technician', 'artic_technician', 'cost_centre', 'source_plates', 'pcr_technician', 'ext_technician', 'artic_technician', 'cost_centre',
'signed_by', 'artic_date', 'gel_barcode', 'gel_date', 'ngs_date', 'contact_phone', 'contact', 'signed_by', 'artic_date', 'gel_barcode', 'gel_date', 'ngs_date', 'contact_phone', 'contact',
'tips', 'gel_image_path', 'custom'] 'tips', 'gel_image_path', 'custom']
# NOTE: dataframe equals dataframe of all columns not in exclude
df = df.loc[:, ~df.columns.isin(exclude)] df = df.loc[:, ~df.columns.isin(exclude)]
if chronologic: if chronologic:
try: try:
@@ -531,7 +539,7 @@ class BasicSubmission(BaseClass, LogMixin):
field_value = value field_value = value
case "ctx" | "csv" | "filepath" | "equipment" | "controls": case "ctx" | "csv" | "filepath" | "equipment" | "controls":
return return
case item if item in self.jsons(): case item if item in self.jsons:
match key: match key:
case "custom" | "source_plates": case "custom" | "source_plates":
existing = value existing = value
@@ -549,7 +557,7 @@ class BasicSubmission(BaseClass, LogMixin):
if isinstance(value, list): if isinstance(value, list):
existing += value existing += value
else: else:
if value is not None: if value:
existing.append(value) existing.append(value)
self.__setattr__(key, existing) self.__setattr__(key, existing)
# NOTE: Make sure this gets updated by telling SQLAlchemy it's been modified. # NOTE: Make sure this gets updated by telling SQLAlchemy it's been modified.
@@ -636,12 +644,6 @@ class BasicSubmission(BaseClass, LogMixin):
field_value = [item.to_pydantic() for item in self.submission_tips_associations] field_value = [item.to_pydantic() for item in self.submission_tips_associations]
case "submission_type": case "submission_type":
field_value = dict(value=self.__getattribute__(key).name, missing=missing) field_value = dict(value=self.__getattribute__(key).name, missing=missing)
# case "contact":
# try:
# field_value = dict(value=self.__getattribute__(key).name, missing=missing)
# except AttributeError:
# contact = self.submitting_lab.contacts[0]
# field_value = dict(value=contact.name, missing=True)
case "plate_number": case "plate_number":
key = 'rsl_plate_num' key = 'rsl_plate_num'
field_value = dict(value=self.rsl_plate_num, missing=missing) field_value = dict(value=self.rsl_plate_num, missing=missing)
@@ -677,7 +679,7 @@ class BasicSubmission(BaseClass, LogMixin):
return super().save() return super().save()
@classmethod @classmethod
def get_regex(cls, submission_type: SubmissionType | str | None = None) -> str: def get_regex(cls, submission_type: SubmissionType | str | None = None) -> re.Pattern:
""" """
Gets the regex string for identifying a certain class of submission. Gets the regex string for identifying a certain class of submission.
@@ -685,18 +687,26 @@ class BasicSubmission(BaseClass, LogMixin):
submission_type (SubmissionType | str | None, optional): submission type of interest. Defaults to None. submission_type (SubmissionType | str | None, optional): submission type of interest. Defaults to None.
Returns: Returns:
str: _description_ str: String from which regex will be compiled.
""" """
# logger.debug(f"Class for regex: {cls}")
try: try:
return cls.get_submission_type(submission_type).defaults['regex'] regex = cls.get_submission_type(submission_type).defaults['regex']
except AttributeError as e: except AttributeError as e:
logger.error(f"Couldn't get submission type for {cls.__mapper_args__['polymorphic_identity']}") logger.error(f"Couldn't get submission type for {cls.__mapper_args__['polymorphic_identity']}")
return "" regex = None
try:
regex = re.compile(rf"{regex}", flags=re.IGNORECASE | re.VERBOSE)
except re.error as e:
regex = cls.construct_regex()
# logger.debug(f"Returning regex: {regex}")
return regex
# NOTE: Polymorphic functions # NOTE: Polymorphic functions
@classmethod @classproperty
def construct_regex(cls) -> re.Pattern: def regex(cls) -> re.Pattern:
""" """
Constructs catchall regex. Constructs catchall regex.
@@ -762,7 +772,9 @@ class BasicSubmission(BaseClass, LogMixin):
""" """
input_dict['custom'] = {} input_dict['custom'] = {}
for k, v in custom_fields.items(): for k, v in custom_fields.items():
logger.debug(f"Custom info parser getting type: {v['type']}")
match v['type']: match v['type']:
# NOTE: 'exempt' type not currently used
case "exempt": case "exempt":
continue continue
case "cell": case "cell":
@@ -796,7 +808,7 @@ class BasicSubmission(BaseClass, LogMixin):
@classmethod @classmethod
def custom_validation(cls, pyd: "PydSubmission") -> "PydSubmission": def custom_validation(cls, pyd: "PydSubmission") -> "PydSubmission":
""" """
Performs any final custom parsing of the excel file. Performs any final parsing of the pydantic object that only needs to be done for this cls.
Args: Args:
input_dict (dict): Parser product up to this point. input_dict (dict): Parser product up to this point.
@@ -849,6 +861,14 @@ class BasicSubmission(BaseClass, LogMixin):
@classmethod @classmethod
def custom_sample_writer(self, sample: dict) -> dict: def custom_sample_writer(self, sample: dict) -> dict:
"""
Performs any final alterations to sample writing unique to this submission type.
Args:
sample (dict): Dictionary of sample values.
Returns:
dict: Finalized dictionary.
"""
return sample return sample
@classmethod @classmethod
@@ -884,7 +904,7 @@ class BasicSubmission(BaseClass, LogMixin):
logger.error(f"Error making outstr: {e}, sending to RSLNamer to make new plate name.") logger.error(f"Error making outstr: {e}, sending to RSLNamer to make new plate name.")
outstr = RSLNamer.construct_new_plate_name(data=data) outstr = RSLNamer.construct_new_plate_name(data=data)
try: try:
# NOTE: Grab plate number # NOTE: Grab plate number as number after a -|_ not followed by another number
plate_number = re.search(r"(?:(-|_)\d)(?!\d)", outstr).group().strip("_").strip("-") plate_number = re.search(r"(?:(-|_)\d)(?!\d)", outstr).group().strip("_").strip("-")
except AttributeError as e: except AttributeError as e:
plate_number = "1" plate_number = "1"
@@ -910,7 +930,7 @@ class BasicSubmission(BaseClass, LogMixin):
Args: Args:
xl (pd.DataFrame): pcr info form xl (pd.DataFrame): pcr info form
rsl_plate_number (str): rsl plate num of interest rsl_plate_num (str): rsl plate num of interest
Returns: Returns:
Generator[dict, None, None]: Updated samples Generator[dict, None, None]: Updated samples
@@ -943,16 +963,16 @@ class BasicSubmission(BaseClass, LogMixin):
submission = cls.query(rsl_plate_num=rsl_plate_num) submission = cls.query(rsl_plate_num=rsl_plate_num)
name_column = 1 name_column = 1
for item in location_map: for item in location_map:
logger.debug(f"Checking {item}") # logger.debug(f"Checking {item}")
worksheet = xl[item['sheet']] worksheet = xl[item['sheet']]
for iii, row in enumerate(worksheet.iter_rows(max_row=len(worksheet['A']), max_col=name_column), start=1): for iii, row in enumerate(worksheet.iter_rows(max_row=len(worksheet['A']), max_col=name_column), start=1):
logger.debug(f"Checking row {row}, {iii}") # logger.debug(f"Checking row {row}, {iii}")
for cell in row: for cell in row:
logger.debug(f"Checking cell: {cell}, with value {cell.value} against {item['name']}") # logger.debug(f"Checking cell: {cell}, with value {cell.value} against {item['name']}")
if cell.value == item['name']: if cell.value == item['name']:
subtype, _ = item['name'].split("-") subtype, _ = item['name'].split("-")
target = item['target'] target = item['target']
logger.debug(f"Subtype: {subtype}, target: {target}") # logger.debug(f"Subtype: {subtype}, target: {target}")
ct = worksheet.cell(row=iii, column=item['ct_column']).value ct = worksheet.cell(row=iii, column=item['ct_column']).value
# NOTE: Kind of a stop gap solution to find control reagents. # NOTE: Kind of a stop gap solution to find control reagents.
if subtype == "PC": if subtype == "PC":
@@ -966,7 +986,7 @@ class BasicSubmission(BaseClass, LogMixin):
assoc.reagent.role])), None) assoc.reagent.role])), None)
else: else:
ctrl = None ctrl = None
logger.debug(f"Control reagent: {ctrl.__dict__}") # logger.debug(f"Control reagent: {ctrl.__dict__}")
try: try:
ct = float(ct) ct = float(ct)
except ValueError: except ValueError:
@@ -982,7 +1002,7 @@ class BasicSubmission(BaseClass, LogMixin):
target=target, target=target,
reagent_lot=ctrl reagent_lot=ctrl
) )
logger.debug(f"Control output: {pformat(output)}") # logger.debug(f"Control output: {pformat(output)}")
yield output yield output
@classmethod @classmethod
@@ -1010,7 +1030,7 @@ class BasicSubmission(BaseClass, LogMixin):
return samples return samples
@classmethod @classmethod
def get_details_template(cls, base_dict: dict) -> Template: def get_details_template(cls, base_dict: dict) -> Tuple[dict, Template]:
""" """
Get the details jinja template for the correct class Get the details jinja template for the correct class
@@ -1040,8 +1060,8 @@ class BasicSubmission(BaseClass, LogMixin):
submission_type_name: str | None = None, submission_type_name: str | None = None,
id: int | str | None = None, id: int | str | None = None,
rsl_plate_num: str | None = None, rsl_plate_num: str | None = None,
start_date: date | str | int | None = None, start_date: date | datetime | str | int | None = None,
end_date: date | str | int | None = None, end_date: date | datetime | str | int | None = None,
reagent: Reagent | str | None = None, reagent: Reagent | str | None = None,
chronologic: bool = False, chronologic: bool = False,
limit: int = 0, limit: int = 0,
@@ -1065,6 +1085,7 @@ class BasicSubmission(BaseClass, LogMixin):
Returns: Returns:
models.BasicSubmission | List[models.BasicSubmission]: Submission(s) of interest models.BasicSubmission | List[models.BasicSubmission]: Submission(s) of interest
""" """
from ... import SubmissionReagentAssociation
# NOTE: if you go back to using 'model' change the appropriate cls to model in the query filters # NOTE: if you go back to using 'model' change the appropriate cls to model in the query filters
if submission_type is not None: if submission_type is not None:
model = cls.find_polymorphic_subclass(polymorphic_identity=submission_type) model = cls.find_polymorphic_subclass(polymorphic_identity=submission_type)
@@ -1078,41 +1099,48 @@ class BasicSubmission(BaseClass, LogMixin):
logger.warning(f"Start date with no end date, using today.") logger.warning(f"Start date with no end date, using today.")
end_date = date.today() end_date = date.today()
if end_date is not None and start_date is None: if end_date is not None and start_date is None:
logger.warning(f"End date with no start date, using Jan 1, 2023") # NOTE: this query returns a tuple of (object, datetime), need to get only datetime.
start_date = cls.__database_session__.query(cls, func.min(cls.submitted_date)).first()[1] start_date = cls.__database_session__.query(cls, func.min(cls.submitted_date)).first()[1]
logger.warning(f"End date with no start date, using first submission date: {start_date}")
if start_date is not None: if start_date is not None:
match start_date: match start_date:
case date() | datetime(): case date():
start_date = start_date.strftime("%Y-%m-%d") pass
case datetime():
start_date = start_date.date()
case int(): case int():
start_date = datetime.fromordinal( start_date = datetime.fromordinal(
datetime(1900, 1, 1).toordinal() + start_date - 2).date().strftime("%Y-%m-%d") datetime(1900, 1, 1).toordinal() + start_date - 2).date()
case _: case _:
start_date = parse(start_date).strftime("%Y-%m-%d") start_date = parse(start_date).date()
# start_date = start_date.strftime("%Y-%m-%d")
match end_date: match end_date:
case date() | datetime(): case date():
end_date = end_date + timedelta(days=1) pass
end_date = end_date.strftime("%Y-%m-%d") case datetime():
end_date = end_date# + timedelta(days=1)
# pass
case int(): case int():
end_date = datetime.fromordinal(datetime(1900, 1, 1).toordinal() + end_date - 2).date() \ end_date = datetime.fromordinal(datetime(1900, 1, 1).toordinal() + end_date - 2).date()# \
+ timedelta(days=1) # + timedelta(days=1)
end_date = end_date.strftime("%Y-%m-%d")
case _: case _:
end_date = parse(end_date) + timedelta(days=1) end_date = parse(end_date).date()# + timedelta(days=1)
end_date = end_date.strftime("%Y-%m-%d") # end_date = end_date.strftime("%Y-%m-%d")
if start_date == end_date: start_date = datetime.combine(start_date, datetime.min.time()).strftime("%Y-%m-%d %H:%M:%S.%f")
start_date = datetime.strptime(start_date, "%Y-%m-%d").strftime("%Y-%m-%d %H:%M:%S.%f") end_date = datetime.combine(end_date, datetime.max.time()).strftime("%Y-%m-%d %H:%M:%S.%f")
query = query.filter(model.submitted_date == start_date) # if start_date == end_date:
else: # start_date = start_date.strftime("%Y-%m-%d %H:%M:%S.%f")
# query = query.filter(model.submitted_date == start_date)
# else:
query = query.filter(model.submitted_date.between(start_date, end_date)) query = query.filter(model.submitted_date.between(start_date, end_date))
# NOTE: by reagent (for some reason) # NOTE: by reagent (for some reason)
match reagent: match reagent:
case str(): case str():
query = query.join(model.submission_reagent_associations).filter( query = query.join(SubmissionReagentAssociation).join(Reagent).filter(
SubmissionSampleAssociation.reagent.lot == reagent) Reagent.lot == reagent)
case Reagent(): case Reagent():
query = query.join(model.submission_reagent_associations).join( query = query.join(SubmissionReagentAssociation).filter(
SubmissionSampleAssociation.reagent).filter(Reagent.lot == reagent) SubmissionReagentAssociation.reagent == reagent)
case _: case _:
pass pass
# NOTE: by rsl number (returns only a single value) # NOTE: by rsl number (returns only a single value)
@@ -1217,6 +1245,7 @@ class BasicSubmission(BaseClass, LogMixin):
msg = QuestionAsker(title="Delete?", message=f"Are you sure you want to delete {self.rsl_plate_num}?\n") msg = QuestionAsker(title="Delete?", message=f"Are you sure you want to delete {self.rsl_plate_num}?\n")
if msg.exec(): if msg.exec():
try: try:
# NOTE: backs up file as xlsx, same as export.
self.backup(fname=fname, full_backup=True) self.backup(fname=fname, full_backup=True)
except BadZipfile: except BadZipfile:
logger.error("Couldn't open zipfile for writing.") logger.error("Couldn't open zipfile for writing.")
@@ -1285,16 +1314,16 @@ class BasicSubmission(BaseClass, LogMixin):
if dlg.exec(): if dlg.exec():
equipment = dlg.parse_form() equipment = dlg.parse_form()
for equip in equipment: for equip in equipment:
_, assoc = equip.toSQL(submission=self) _, assoc = equip.to_sql(submission=self)
try: try:
assoc.save() assoc.save()
except AttributeError as e: except AttributeError as e:
logger.error(f"Couldn't save association with {equip} due to {e}") logger.error(f"Couldn't save association with {equip} due to {e}")
if equip.tips: if equip.tips:
for tips in equip.tips: for tips in equip.tips:
logger.debug(f"Attempting to add tips assoc: {tips} (pydantic)") # logger.debug(f"Attempting to add tips assoc: {tips} (pydantic)")
tassoc = tips.to_sql(submission=self) tassoc = tips.to_sql(submission=self)
logger.debug(f"Attempting to add tips assoc: {tips.__dict__} (sql)") # logger.debug(f"Attempting to add tips assoc: {tips.__dict__} (sql)")
if tassoc not in self.submission_tips_associations: if tassoc not in self.submission_tips_associations:
tassoc.save() tassoc.save()
else: else:
@@ -1320,7 +1349,8 @@ class BasicSubmission(BaseClass, LogMixin):
writer = pyd.to_writer() writer = pyd.to_writer()
writer.xl.save(filename=fname.with_suffix(".xlsx")) writer.xl.save(filename=fname.with_suffix(".xlsx"))
def get_turnaround_time(self) -> Tuple[int | None, bool | None]: @property
def turnaround_time(self) -> int:
try: try:
completed = self.completed_date.date() completed = self.completed_date.date()
except AttributeError: except AttributeError:
@@ -1328,25 +1358,24 @@ class BasicSubmission(BaseClass, LogMixin):
return self.calculate_turnaround(start_date=self.submitted_date.date(), end_date=completed) return self.calculate_turnaround(start_date=self.submitted_date.date(), end_date=completed)
@classmethod @classmethod
def calculate_turnaround(cls, start_date: date | None = None, end_date: date | None = None) -> Tuple[ def calculate_turnaround(cls, start_date: date | None = None, end_date: date | None = None) -> int:
int | None, bool | None]: """
if 'pytest' not in sys.modules: Calculates number of business days between data submitted and date completed
from tools import ctx
else: Args:
from test_settings import ctx start_date (date, optional): Date submitted. defaults to None.
end_date (date, optional): Date completed. defaults to None.
Returns:
int: Number of business days.
"""
if not end_date: if not end_date:
return None, None return None
try: try:
delta = np.busday_count(start_date, end_date, holidays=create_holidays_for_year(start_date.year)) + 1 delta = np.busday_count(start_date, end_date, holidays=create_holidays_for_year(start_date.year)) + 1
except ValueError: except ValueError:
return None, None return None
try: return delta
tat = cls.get_default_info("turnaround_time")
except (AttributeError, KeyError):
tat = None
if not tat:
tat = ctx.TaT_threshold
return delta, delta <= tat
# NOTE: Below are the custom submission types # NOTE: Below are the custom submission types
@@ -1385,7 +1414,7 @@ class BacterialCulture(BasicSubmission):
return template return template
@classmethod @classmethod
def custom_validation(cls, pyd) -> dict: def custom_validation(cls, pyd) -> "PydSubmission":
""" """
Extends parent. Currently finds control sample and adds to reagents. Extends parent. Currently finds control sample and adds to reagents.
@@ -1395,7 +1424,7 @@ class BacterialCulture(BasicSubmission):
info_map (dict | None, optional): _description_. Defaults to None. info_map (dict | None, optional): _description_. Defaults to None.
Returns: Returns:
dict: Updated dictionary. PydSubmission: Updated pydantic.
""" """
from . import ControlType from . import ControlType
pyd = super().custom_validation(pyd) pyd = super().custom_validation(pyd)
@@ -1549,9 +1578,10 @@ class Wastewater(BasicSubmission):
""" """
samples = [item for item in super().parse_pcr(xl=xl, rsl_plate_num=rsl_plate_num)] samples = [item for item in super().parse_pcr(xl=xl, rsl_plate_num=rsl_plate_num)]
# NOTE: Due to having to run through samples in for loop we need to convert to list. # NOTE: Due to having to run through samples in for loop we need to convert to list.
# NOTE: Also, you can't change the size of a list while iterating it, so don't even think about it.
output = [] output = []
for sample in samples: for sample in samples:
logger.debug(sample) # logger.debug(sample)
# NOTE: remove '-{target}' from controls # NOTE: remove '-{target}' from controls
sample['sample'] = re.sub('-N\\d*$', '', sample['sample']) sample['sample'] = re.sub('-N\\d*$', '', sample['sample'])
# NOTE: if sample is already in output skip # NOTE: if sample is already in output skip
@@ -1559,7 +1589,7 @@ class Wastewater(BasicSubmission):
logger.warning(f"Already have {sample['sample']}") logger.warning(f"Already have {sample['sample']}")
continue continue
# NOTE: Set ct values # NOTE: Set ct values
logger.debug(f"Sample ct: {sample['ct']}") # logger.debug(f"Sample ct: {sample['ct']}")
sample[f"ct_{sample['target'].lower()}"] = sample['ct'] if isinstance(sample['ct'], float) else 0.0 sample[f"ct_{sample['target'].lower()}"] = sample['ct'] if isinstance(sample['ct'], float) else 0.0
# NOTE: Set assessment # NOTE: Set assessment
logger.debug(f"Sample assessemnt: {sample['assessment']}") logger.debug(f"Sample assessemnt: {sample['assessment']}")
@@ -1578,7 +1608,7 @@ class Wastewater(BasicSubmission):
except KeyError: except KeyError:
pass pass
output.append(sample) output.append(sample)
# NOTE: And then convert back to list ot keep fidelity with parent method. # NOTE: And then convert back to list to keep fidelity with parent method.
for sample in output: for sample in output:
yield sample yield sample
@@ -1644,7 +1674,7 @@ class Wastewater(BasicSubmission):
return events return events
@report_result @report_result
def link_pcr(self, obj): def link_pcr(self, obj) -> Report:
""" """
PYQT6 function to add PCR info to this submission PYQT6 function to add PCR info to this submission
@@ -1660,7 +1690,8 @@ class Wastewater(BasicSubmission):
report.add_result(Result(msg="No file selected, cancelling.", status="Warning")) report.add_result(Result(msg="No file selected, cancelling.", status="Warning"))
return report return report
parser = PCRParser(filepath=fname, submission=self) parser = PCRParser(filepath=fname, submission=self)
self.set_attribute("pcr_info", parser.pcr) self.set_attribute("pcr_info", parser.pcr_info)
# NOTE: These are generators here, need to expand.
pcr_samples = [sample for sample in parser.samples] pcr_samples = [sample for sample in parser.samples]
pcr_controls = [control for control in parser.controls] pcr_controls = [control for control in parser.controls]
self.save(original=False) self.save(original=False)
@@ -1674,19 +1705,19 @@ class Wastewater(BasicSubmission):
result = assoc.save() result = assoc.save()
report.add_result(result) report.add_result(result)
controltype = ControlType.query(name="PCR Control") controltype = ControlType.query(name="PCR Control")
submitted_date = datetime.strptime(" ".join(parser.pcr['run_start_date/time'].split(" ")[:-1]), submitted_date = datetime.strptime(" ".join(parser.pcr_info['run_start_date/time'].split(" ")[:-1]),
"%Y-%m-%d %I:%M:%S %p") "%Y-%m-%d %I:%M:%S %p")
for control in pcr_controls: for control in pcr_controls:
logger.debug(f"Control coming into save: {control}") # logger.debug(f"Control coming into save: {control}")
new_control = PCRControl(**control) new_control = PCRControl(**control)
new_control.submitted_date = submitted_date new_control.submitted_date = submitted_date
new_control.controltype = controltype new_control.controltype = controltype
new_control.submission = self new_control.submission = self
logger.debug(f"Control coming into save: {new_control.__dict__}") # logger.debug(f"Control coming into save: {new_control.__dict__}")
new_control.save() new_control.save()
return report return report
def update_subsampassoc(self, sample: BasicSample, input_dict: dict): def update_subsampassoc(self, sample: BasicSample, input_dict: dict) -> SubmissionSampleAssociation:
""" """
Updates a joined submission sample association by assigning ct values to n1 or n2 based on alphabetical sorting. Updates a joined submission sample association by assigning ct values to n1 or n2 based on alphabetical sorting.
@@ -1722,7 +1753,7 @@ class WastewaterArtic(BasicSubmission):
artic_date = Column(TIMESTAMP) #: Date Artic Performed artic_date = Column(TIMESTAMP) #: Date Artic Performed
ngs_date = Column(TIMESTAMP) #: Date submission received ngs_date = Column(TIMESTAMP) #: Date submission received
gel_date = Column(TIMESTAMP) #: Date submission received gel_date = Column(TIMESTAMP) #: Date submission received
gel_barcode = Column(String(16)) gel_barcode = Column(String(16)) #: Identifier for the used gel.
__mapper_args__ = dict(polymorphic_identity="Wastewater Artic", __mapper_args__ = dict(polymorphic_identity="Wastewater Artic",
polymorphic_load="inline", polymorphic_load="inline",
@@ -1769,6 +1800,16 @@ class WastewaterArtic(BasicSubmission):
from openpyxl_image_loader.sheet_image_loader import SheetImageLoader from openpyxl_image_loader.sheet_image_loader import SheetImageLoader
def scrape_image(wb: Workbook, info_dict: dict) -> Image or None: def scrape_image(wb: Workbook, info_dict: dict) -> Image or None:
"""
Pulls image from excel workbook
Args:
wb (Workbook): Workbook of interest.
info_dict (dict): Location map.
Returns:
Image or None: Image of interest.
"""
ws = wb[info_dict['sheet']] ws = wb[info_dict['sheet']]
img_loader = SheetImageLoader(ws) img_loader = SheetImageLoader(ws)
for ii in range(info_dict['start_row'], info_dict['end_row'] + 1): for ii in range(info_dict['start_row'], info_dict['end_row'] + 1):
@@ -1805,7 +1846,7 @@ class WastewaterArtic(BasicSubmission):
if datum['plate'] in ["None", None, ""]: if datum['plate'] in ["None", None, ""]:
continue continue
else: else:
datum['plate'] = RSLNamer(filename=datum['plate'], sub_type="Wastewater").parsed_name datum['plate'] = RSLNamer(filename=datum['plate'], submission_type="Wastewater").parsed_name
if xl is not None: if xl is not None:
try: try:
input_dict['csv'] = xl["hitpicks_csv_to_export"] input_dict['csv'] = xl["hitpicks_csv_to_export"]
@@ -1864,6 +1905,7 @@ class WastewaterArtic(BasicSubmission):
Returns: Returns:
str: Updated name. str: Updated name.
""" """
logger.debug(f"Incoming String: {instr}")
try: try:
# NOTE: Deal with PCR file. # NOTE: Deal with PCR file.
instr = re.sub(r"Artic", "", instr, flags=re.IGNORECASE) instr = re.sub(r"Artic", "", instr, flags=re.IGNORECASE)
@@ -1900,8 +1942,7 @@ class WastewaterArtic(BasicSubmission):
input_dict['source_plate_number'] = int(input_dict['source_plate_number']) input_dict['source_plate_number'] = int(input_dict['source_plate_number'])
except (ValueError, KeyError): except (ValueError, KeyError):
input_dict['source_plate_number'] = 0 input_dict['source_plate_number'] = 0
# NOTE: Because generate_sample_object needs the submitter_id and the artic has the "({origin well})" # NOTE: Because generate_sample_object needs the submitter_id and the artic has the "({origin well})" at the end, this has to be done here. No moving to sqlalchemy object :(
# at the end, this has to be done here. No moving to sqlalchemy object :(
input_dict['submitter_id'] = re.sub(r"\s\(.+\)\s?$", "", str(input_dict['submitter_id'])).strip() input_dict['submitter_id'] = re.sub(r"\s\(.+\)\s?$", "", str(input_dict['submitter_id'])).strip()
try: try:
input_dict['ww_processing_num'] = input_dict['sample_name_(lims)'] input_dict['ww_processing_num'] = input_dict['sample_name_(lims)']
@@ -1988,7 +2029,11 @@ class WastewaterArtic(BasicSubmission):
except AttributeError: except AttributeError:
plate_num = "1" plate_num = "1"
plate_num = plate_num.strip("-") plate_num = plate_num.strip("-")
repeat_num = re.search(r"R(?P<repeat>\d)?$", "PBS20240426-2R").groups()[0] # repeat_num = re.search(r"R(?P<repeat>\d)?$", "PBS20240426-2R").groups()[0]
try:
repeat_num = re.search(r"R(?P<repeat>\d)?$", processed).groups()[0]
except:
repeat_num = None
if repeat_num is None and "R" in plate_num: if repeat_num is None and "R" in plate_num:
repeat_num = "1" repeat_num = "1"
plate_num = re.sub(r"R", rf"R{repeat_num}", plate_num) plate_num = re.sub(r"R", rf"R{repeat_num}", plate_num)
@@ -2192,7 +2237,7 @@ class BasicSample(BaseClass, LogMixin):
Base of basic sample which polymorphs into BCSample and WWSample Base of basic sample which polymorphs into BCSample and WWSample
""" """
searchables = ['submitter_id'] searchables = [dict(label="Submitter ID", field="submitter_id")]
id = Column(INTEGER, primary_key=True) #: primary key id = Column(INTEGER, primary_key=True) #: primary key
submitter_id = Column(String(64), nullable=False, unique=True) #: identification from submitter submitter_id = Column(String(64), nullable=False, unique=True) #: identification from submitter
@@ -2242,7 +2287,7 @@ class BasicSample(BaseClass, LogMixin):
except AttributeError: except AttributeError:
return f"<Sample({self.submitter_id})" return f"<Sample({self.submitter_id})"
@classmethod @classproperty
def timestamps(cls) -> List[str]: def timestamps(cls) -> List[str]:
""" """
Constructs a list of all attributes stored as SQL Timestamps Constructs a list of all attributes stored as SQL Timestamps
@@ -2252,7 +2297,7 @@ class BasicSample(BaseClass, LogMixin):
""" """
output = [item.name for item in cls.__table__.columns if isinstance(item.type, TIMESTAMP)] output = [item.name for item in cls.__table__.columns if isinstance(item.type, TIMESTAMP)]
if issubclass(cls, BasicSample) and not cls.__name__ == "BasicSample": if issubclass(cls, BasicSample) and not cls.__name__ == "BasicSample":
output += BasicSample.timestamps() output += BasicSample.timestamps
return output return output
def to_sub_dict(self, full_data: bool = False) -> dict: def to_sub_dict(self, full_data: bool = False) -> dict:
@@ -2293,7 +2338,7 @@ class BasicSample(BaseClass, LogMixin):
@classmethod @classmethod
def find_polymorphic_subclass(cls, polymorphic_identity: str | None = None, def find_polymorphic_subclass(cls, polymorphic_identity: str | None = None,
attrs: dict | None = None) -> BasicSample: attrs: dict | None = None) -> Type[BasicSample]:
""" """
Retrieves subclasses of BasicSample based on type name. Retrieves subclasses of BasicSample based on type name.
@@ -2340,8 +2385,8 @@ class BasicSample(BaseClass, LogMixin):
""" """
return input_dict return input_dict
@classmethod @classproperty
def get_details_template(cls) -> Template: def details_template(cls) -> Template:
""" """
Get the details jinja template for the correct class Get the details jinja template for the correct class
@@ -2458,15 +2503,15 @@ class BasicSample(BaseClass, LogMixin):
def delete(self): def delete(self):
raise AttributeError(f"Delete not implemented for {self.__class__}") raise AttributeError(f"Delete not implemented for {self.__class__}")
@classmethod # @classmethod
def get_searchables(cls) -> List[dict]: # def get_searchables(cls) -> List[dict]:
""" # """
Delivers a list of fields that can be used in fuzzy search. # Delivers a list of fields that can be used in fuzzy search.
#
Returns: # Returns:
List[str]: List of fields. # List[str]: List of fields.
""" # """
return [dict(label="Submitter ID", field="submitter_id")] # return [dict(label="Submitter ID", field="submitter_id")]
@classmethod @classmethod
def samples_to_df(cls, sample_list: List[BasicSample], **kwargs) -> pd.DataFrame: def samples_to_df(cls, sample_list: List[BasicSample], **kwargs) -> pd.DataFrame:
@@ -2504,6 +2549,16 @@ class BasicSample(BaseClass, LogMixin):
pass pass
def edit_from_search(self, obj, **kwargs): def edit_from_search(self, obj, **kwargs):
"""
Function called form search. "Edit" is dependent on function as this one just shows details.
Args:
obj (__type__): Parent widget.
**kwargs (): Required for all edit from search functions.
Returns:
"""
self.show_details(obj) self.show_details(obj)
@@ -2514,7 +2569,7 @@ class WastewaterSample(BasicSample):
Derivative wastewater sample Derivative wastewater sample
""" """
searchables = BasicSample.searchables + ['ww_processing_num', 'ww_full_sample_id', 'rsl_number'] # searchables = BasicSample.searchables + ['ww_processing_num', 'ww_full_sample_id', 'rsl_number']
id = Column(INTEGER, ForeignKey('_basicsample.id'), primary_key=True) id = Column(INTEGER, ForeignKey('_basicsample.id'), primary_key=True)
ww_processing_num = Column(String(64)) #: wastewater processing number ww_processing_num = Column(String(64)) #: wastewater processing number
@@ -2594,15 +2649,15 @@ class WastewaterSample(BasicSample):
# logger.debug(pformat(output_dict, indent=4)) # logger.debug(pformat(output_dict, indent=4))
return output_dict return output_dict
@classmethod @classproperty
def get_searchables(cls) -> List[str]: def searchables(cls) -> List[dict]:
""" """
Delivers a list of fields that can be used in fuzzy search. Extends parent. Delivers a list of fields that can be used in fuzzy search. Extends parent.
Returns: Returns:
List[str]: List of fields. List[str]: List of fields.
""" """
searchables = super().get_searchables() searchables = super().searchables
for item in ["ww_processing_num", "ww_full_sample_id", "rsl_number"]: for item in ["ww_processing_num", "ww_full_sample_id", "rsl_number"]:
label = item.strip("ww_").replace("_", " ").replace("rsl", "RSL").title() label = item.strip("ww_").replace("_", " ").replace("rsl", "RSL").title()
searchables.append(dict(label=label, field=item)) searchables.append(dict(label=label, field=item))
@@ -2726,7 +2781,8 @@ class SubmissionSampleAssociation(BaseClass):
from backend.validators import PydSample from backend.validators import PydSample
return PydSample(**self.to_sub_dict()) return PydSample(**self.to_sub_dict())
def to_hitpick(self) -> dict | None: @property
def hitpicked(self) -> dict | None:
""" """
Outputs a dictionary usable for html plate maps. Outputs a dictionary usable for html plate maps.
@@ -2948,14 +3004,15 @@ class WastewaterAssociation(SubmissionSampleAssociation):
logger.error(f"Couldn't check positives for {self.sample.rsl_number}. Looks like there isn't PCR data.") logger.error(f"Couldn't check positives for {self.sample.rsl_number}. Looks like there isn't PCR data.")
return sample return sample
def to_hitpick(self) -> dict | None: @property
def hitpicked(self) -> dict | None:
""" """
Outputs a dictionary usable for html plate maps. Extends parent Outputs a dictionary usable for html plate maps. Extends parent
Returns: Returns:
dict: dictionary of sample id, row and column in elution plate dict: dictionary of sample id, row and column in elution plate
""" """
sample = super().to_hitpick() sample = super().hitpicked
try: try:
scaler = max([self.ct_n1, self.ct_n2]) scaler = max([self.ct_n1, self.ct_n2])
except TypeError: except TypeError:

View File

@@ -59,25 +59,27 @@ class SheetParser(object):
Pulls basic information from the excel sheet Pulls basic information from the excel sheet
""" """
parser = InfoParser(xl=self.xl, submission_type=self.submission_type, sub_object=self.sub_object) parser = InfoParser(xl=self.xl, submission_type=self.submission_type, sub_object=self.sub_object)
info = parser.parse_info() # info = parser.parsed_info
self.info_map = parser.map self.info_map = parser.info_map
# NOTE: in order to accommodate generic submission types we have to check for the type in the excel sheet and # NOTE: in order to accommodate generic submission types we have to check for the type in the excel sheet and rerun accordingly
# rerun accordingly
try: try:
check = info['submission_type']['value'] not in [None, "None", "", " "] check = parser.parsed_info['submission_type']['value'] not in [None, "None", "", " "]
except KeyError: except KeyError as e:
logger.error(f"Couldn't check submission type due to KeyError: {e}")
return return
logger.info( logger.info(
f"Checking for updated submission type: {self.submission_type.name} against new: {info['submission_type']['value']}") f"Checking for updated submission type: {self.submission_type.name} against new: {parser.parsed_info['submission_type']['value']}")
if self.submission_type.name != info['submission_type']['value']: if self.submission_type.name != parser.parsed_info['submission_type']['value']:
if check: if check:
self.submission_type = SubmissionType.query(name=info['submission_type']['value']) # NOTE: If initial submission type doesn't match parsed submission type, defer to parsed submission type.
self.submission_type = SubmissionType.query(name=parser.parsed_info['submission_type']['value'])
logger.info(f"Updated self.submission_type to {self.submission_type}. Rerunning parse.") logger.info(f"Updated self.submission_type to {self.submission_type}. Rerunning parse.")
self.parse_info() self.parse_info()
else: else:
self.submission_type = RSLNamer.retrieve_submission_type(filename=self.filepath) self.submission_type = RSLNamer.retrieve_submission_type(filename=self.filepath)
self.parse_info() self.parse_info()
[self.sub.__setitem__(k, v) for k, v in info.items()] for k, v in parser.parsed_info.items():
self.sub.__setitem__(k, v)
def parse_reagents(self, extraction_kit: str | None = None): def parse_reagents(self, extraction_kit: str | None = None):
""" """
@@ -90,28 +92,28 @@ class SheetParser(object):
extraction_kit = self.sub['extraction_kit'] extraction_kit = self.sub['extraction_kit']
parser = ReagentParser(xl=self.xl, submission_type=self.submission_type, parser = ReagentParser(xl=self.xl, submission_type=self.submission_type,
extraction_kit=extraction_kit) extraction_kit=extraction_kit)
self.sub['reagents'] = parser.parse_reagents() self.sub['reagents'] = parser.parsed_reagents
def parse_samples(self): def parse_samples(self):
""" """
Calls sample parser to pull info from the excel sheet Calls sample parser to pull info from the excel sheet
""" """
parser = SampleParser(xl=self.xl, submission_type=self.submission_type) parser = SampleParser(xl=self.xl, submission_type=self.submission_type)
self.sub['samples'] = parser.parse_samples() self.sub['samples'] = parser.parsed_samples
def parse_equipment(self): def parse_equipment(self):
""" """
Calls equipment parser to pull info from the excel sheet Calls equipment parser to pull info from the excel sheet
""" """
parser = EquipmentParser(xl=self.xl, submission_type=self.submission_type) parser = EquipmentParser(xl=self.xl, submission_type=self.submission_type)
self.sub['equipment'] = parser.parse_equipment() self.sub['equipment'] = parser.parsed_equipment
def parse_tips(self): def parse_tips(self):
""" """
Calls tips parser to pull info from the excel sheet Calls tips parser to pull info from the excel sheet
""" """
parser = TipParser(xl=self.xl, submission_type=self.submission_type) parser = TipParser(xl=self.xl, submission_type=self.submission_type)
self.sub['tips'] = parser.parse_tips() self.sub['tips'] = parser.parsed_tips
def import_kit_validation_check(self): def import_kit_validation_check(self):
""" """
@@ -156,23 +158,23 @@ class InfoParser(object):
if sub_object is None: if sub_object is None:
sub_object = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=submission_type.name) sub_object = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=submission_type.name)
self.submission_type_obj = submission_type self.submission_type_obj = submission_type
self.submission_type = dict(value=self.submission_type_obj.name, missing=True)
self.sub_object = sub_object self.sub_object = sub_object
self.map = self.fetch_submission_info_map()
self.xl = xl self.xl = xl
def fetch_submission_info_map(self) -> dict: @property
def info_map(self) -> dict:
""" """
Gets location of basic info from the submission_type object in the database. Gets location of basic info from the submission_type object in the database.
Returns: Returns:
dict: Location map of all info for this submission type dict: Location map of all info for this submission type
""" """
self.submission_type = dict(value=self.submission_type_obj.name, missing=True)
info_map = self.sub_object.construct_info_map(submission_type=self.submission_type_obj, mode="read")
# NOTE: Get the parse_info method from the submission type specified # NOTE: Get the parse_info method from the submission type specified
return info_map return self.sub_object.construct_info_map(submission_type=self.submission_type_obj, mode="read")
def parse_info(self) -> dict: @property
def parsed_info(self) -> dict:
""" """
Pulls basic info from the excel sheet. Pulls basic info from the excel sheet.
@@ -184,7 +186,7 @@ class InfoParser(object):
for sheet in self.xl.sheetnames: for sheet in self.xl.sheetnames:
ws = self.xl[sheet] ws = self.xl[sheet]
relevant = [] relevant = []
for k, v in self.map.items(): for k, v in self.info_map.items():
# NOTE: If the value is hardcoded put it in the dictionary directly. Ex. Artic kit # NOTE: If the value is hardcoded put it in the dictionary directly. Ex. Artic kit
if k == "custom": if k == "custom":
continue continue
@@ -215,7 +217,7 @@ class InfoParser(object):
case "submitted_date": case "submitted_date":
value, missing = is_missing(value) value, missing = is_missing(value)
# NOTE: is field a JSON? Includes: Extraction info, PCR info, comment, custom # NOTE: is field a JSON? Includes: Extraction info, PCR info, comment, custom
case thing if thing in self.sub_object.jsons(): case thing if thing in self.sub_object.jsons:
value, missing = is_missing(value) value, missing = is_missing(value)
if missing: continue if missing: continue
value = dict(name=f"Parser_{sheet}", text=value, time=datetime.now()) value = dict(name=f"Parser_{sheet}", text=value, time=datetime.now())
@@ -232,7 +234,7 @@ class InfoParser(object):
except (KeyError, IndexError): except (KeyError, IndexError):
continue continue
# NOTE: Return after running the parser components held in submission object. # NOTE: Return after running the parser components held in submission object.
return self.sub_object.custom_info_parser(input_dict=dicto, xl=self.xl, custom_fields=self.map['custom']) return self.sub_object.custom_info_parser(input_dict=dicto, xl=self.xl, custom_fields=self.info_map['custom'])
class ReagentParser(object): class ReagentParser(object):
@@ -252,16 +254,17 @@ class ReagentParser(object):
if isinstance(submission_type, str): if isinstance(submission_type, str):
submission_type = SubmissionType.query(name=submission_type) submission_type = SubmissionType.query(name=submission_type)
self.submission_type_obj = submission_type self.submission_type_obj = submission_type
if not sub_object:
sub_object = submission_type.submission_class
self.sub_object = sub_object self.sub_object = sub_object
if isinstance(extraction_kit, dict): if isinstance(extraction_kit, dict):
extraction_kit = extraction_kit['value'] extraction_kit = extraction_kit['value']
self.kit_object = KitType.query(name=extraction_kit) self.kit_object = KitType.query(name=extraction_kit)
self.map = self.fetch_kit_info_map(submission_type=submission_type) # self.kit_map = self.kit_map(submission_type=submission_type)
logger.debug(f"Setting map: {self.map}")
self.xl = xl self.xl = xl
# @report_result @property
def fetch_kit_info_map(self, submission_type: str | SubmissionType) -> Tuple[Report, dict]: def kit_map(self) -> dict:
""" """
Gets location of kit reagents from database Gets location of kit reagents from database
@@ -271,38 +274,41 @@ class ReagentParser(object):
Returns: Returns:
dict: locations of reagent info for the kit. dict: locations of reagent info for the kit.
""" """
report = Report() # report = Report()
if isinstance(submission_type, dict): # if isinstance(submission_type, dict):
submission_type = submission_type['value'] # submission_type = submission_type['value']
if isinstance(submission_type, str): # if isinstance(submission_type, str):
submission_type = SubmissionType.query(name=submission_type) # submission_type = SubmissionType.query(name=submission_type)
reagent_map = {k: v for k, v in self.kit_object.construct_xl_map_for_use(submission_type)} logger.debug("Running kit map")
associations, self.kit_object = self.kit_object.construct_xl_map_for_use(submission_type=self.submission_type_obj)
reagent_map = {k: v for k, v in associations.items() if k != 'info'}
try: try:
del reagent_map['info'] del reagent_map['info']
except KeyError: except KeyError:
pass pass
# NOTE: If reagent map is empty, maybe the wrong kit was given, check if there's only one kit for that submission type and use it if so. # # NOTE: If reagent map is empty, maybe the wrong kit was given, check if there's only one kit for that submission type and use it if so.
if not reagent_map: # if not reagent_map:
temp_kit_object = self.submission_type_obj.get_default_kit() # temp_kit_object = self.submission_type_obj.default_kit
if temp_kit_object: # if temp_kit_object:
self.kit_object = temp_kit_object # self.kit_object = temp_kit_object
logger.warning(f"Attempting to salvage with default kit {self.kit_object} and submission_type: {self.submission_type_obj}") # logger.warning(f"Attempting to salvage with default kit {self.kit_object} and submission_type: {self.submission_type_obj}")
return self.fetch_kit_info_map(submission_type=self.submission_type_obj) # return self.fetch_kit_map(submission_type=self.submission_type_obj)
else: # else:
logger.error(f"Still no reagent map, displaying error.") # logger.error(f"Still no reagent map, displaying error.")
try: # try:
ext_kit_loc = self.submission_type_obj.info_map['extraction_kit']['read'][0] # ext_kit_loc = self.submission_type_obj.info_map['extraction_kit']['read'][0]
location_string = f"Sheet: {ext_kit_loc['sheet']}, Row: {ext_kit_loc['row']}, Column: {ext_kit_loc['column']}?" # location_string = f"Sheet: {ext_kit_loc['sheet']}, Row: {ext_kit_loc['row']}, Column: {ext_kit_loc['column']}?"
except (IndexError, KeyError): # except (IndexError, KeyError):
location_string = "" # location_string = ""
report.add_result(Result(owner=__name__, code=0, # report.add_result(Result(owner=__name__, code=0,
msg=f"No kit map found for {self.kit_object.name}.\n\n" # msg=f"No kit map found for {self.kit_object.name}.\n\n"
f"Are you sure you put the right kit in:\n\n{location_string}?", # f"Are you sure you put the right kit in:\n\n{location_string}?",
status="Critical")) # status="Critical"))
logger.debug(f"Here is the map coming out: {reagent_map}") # logger.debug(f"Here is the map coming out: {reagent_map}")
return reagent_map return reagent_map
def parse_reagents(self) -> Generator[dict, None, None]: @property
def parsed_reagents(self) -> Generator[dict, None, None]:
""" """
Extracts reagent information from the Excel form. Extracts reagent information from the Excel form.
@@ -311,7 +317,7 @@ class ReagentParser(object):
""" """
for sheet in self.xl.sheetnames: for sheet in self.xl.sheetnames:
ws = self.xl[sheet] ws = self.xl[sheet]
relevant = {k.strip(): v for k, v in self.map.items() if sheet in self.map[k]['sheet']} relevant = {k.strip(): v for k, v in self.kit_map.items() if sheet in self.kit_map[k]['sheet']}
if not relevant: if not relevant:
continue continue
for item in relevant: for item in relevant:
@@ -367,11 +373,14 @@ class SampleParser(object):
f"Sample parser attempting to fetch submission class with polymorphic identity: {self.submission_type}") f"Sample parser attempting to fetch submission class with polymorphic identity: {self.submission_type}")
sub_object = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type) sub_object = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type)
self.sub_object = sub_object self.sub_object = sub_object
self.sample_info_map = self.fetch_sample_info_map(submission_type=submission_type, sample_map=sample_map) self.sample_type = self.sub_object.get_default_info("sample_type", submission_type=submission_type)
self.plate_map_samples = self.parse_plate_map() self.samp_object = BasicSample.find_polymorphic_subclass(polymorphic_identity=self.sample_type)
self.lookup_samples = self.parse_lookup_table() # self.sample_map = self.sample_map(submission_type=submission_type, sample_map=sample_map)
# self.plate_map_samples = self.parse_plate_map()
# self.lookup_samples = self.parse_lookup_table()
def fetch_sample_info_map(self, submission_type: str, sample_map: dict | None = None) -> dict: @property
def sample_map(self) -> dict:
""" """
Gets info locations in excel book for submission type. Gets info locations in excel book for submission type.
@@ -381,15 +390,16 @@ class SampleParser(object):
Returns: Returns:
dict: Info locations. dict: Info locations.
""" """
self.sample_type = self.sub_object.get_default_info("sample_type", submission_type=submission_type)
self.samp_object = BasicSample.find_polymorphic_subclass(polymorphic_identity=self.sample_type)
if sample_map is None:
sample_info_map = self.sub_object.construct_sample_map(submission_type=self.submission_type_obj)
else:
sample_info_map = sample_map
return sample_info_map
def parse_plate_map(self) -> List[dict]: # if sample_map is None:
# sample_info_map = self.sub_object.construct_sample_map(submission_type=self.submission_type_obj)
# else:
# sample_info_map = sample_map
# return sample_info_map
return self.sub_object.construct_sample_map(submission_type=self.submission_type_obj)
@property
def plate_map_samples(self) -> List[dict]:
""" """
Parse sample location/name from plate map Parse sample location/name from plate map
@@ -397,7 +407,7 @@ class SampleParser(object):
List[dict]: List of sample ids and locations. List[dict]: List of sample ids and locations.
""" """
invalids = [0, "0", "EMPTY"] invalids = [0, "0", "EMPTY"]
smap = self.sample_info_map['plate_map'] smap = self.sample_map['plate_map']
ws = self.xl[smap['sheet']] ws = self.xl[smap['sheet']]
plate_map_samples = [] plate_map_samples = []
for ii, row in enumerate(range(smap['start_row'], smap['end_row'] + 1), start=1): for ii, row in enumerate(range(smap['start_row'], smap['end_row'] + 1), start=1):
@@ -414,7 +424,8 @@ class SampleParser(object):
pass pass
return plate_map_samples return plate_map_samples
def parse_lookup_table(self) -> List[dict]: @property
def lookup_samples(self) -> List[dict]:
""" """
Parse misc info from lookup table. Parse misc info from lookup table.
@@ -422,7 +433,7 @@ class SampleParser(object):
List[dict]: List of basic sample info. List[dict]: List of basic sample info.
""" """
lmap = self.sample_info_map['lookup_table'] lmap = self.sample_map['lookup_table']
ws = self.xl[lmap['sheet']] ws = self.xl[lmap['sheet']]
lookup_samples = [] lookup_samples = []
for ii, row in enumerate(range(lmap['start_row'], lmap['end_row'] + 1), start=1): for ii, row in enumerate(range(lmap['start_row'], lmap['end_row'] + 1), start=1):
@@ -441,7 +452,8 @@ class SampleParser(object):
lookup_samples.append(self.samp_object.parse_sample(row_dict)) lookup_samples.append(self.samp_object.parse_sample(row_dict))
return lookup_samples return lookup_samples
def parse_samples(self) -> Generator[dict, None, None]: @property
def parsed_samples(self) -> Generator[dict, None, None]:
""" """
Merges sample info from lookup table and plate map. Merges sample info from lookup table and plate map.
@@ -461,7 +473,7 @@ class SampleParser(object):
pass pass
yield new yield new
else: else:
merge_on_id = self.sample_info_map['lookup_table']['merge_on_id'] merge_on_id = self.sample_map['lookup_table']['merge_on_id']
logger.info(f"Merging sample info using {merge_on_id}") logger.info(f"Merging sample info using {merge_on_id}")
plate_map_samples = sorted(copy(self.plate_map_samples), key=itemgetter('id')) plate_map_samples = sorted(copy(self.plate_map_samples), key=itemgetter('id'))
lookup_samples = sorted(copy(self.lookup_samples), key=itemgetter(merge_on_id)) lookup_samples = sorted(copy(self.lookup_samples), key=itemgetter(merge_on_id))
@@ -507,9 +519,10 @@ class EquipmentParser(object):
submission_type = SubmissionType.query(name=submission_type) submission_type = SubmissionType.query(name=submission_type)
self.submission_type = submission_type self.submission_type = submission_type
self.xl = xl self.xl = xl
self.map = self.fetch_equipment_map() # self.equipment_map = self.fetch_equipment_map()
def fetch_equipment_map(self) -> dict: @property
def equipment_map(self) -> dict:
""" """
Gets the map of equipment locations in the submission type's spreadsheet Gets the map of equipment locations in the submission type's spreadsheet
@@ -528,14 +541,15 @@ class EquipmentParser(object):
Returns: Returns:
str: asset number str: asset number
""" """
regex = Equipment.get_regex() regex = Equipment.manufacturer_regex
try: try:
return regex.search(input).group().strip("-") return regex.search(input).group().strip("-")
except AttributeError as e: except AttributeError as e:
logger.error(f"Error getting asset number for {input}: {e}") logger.error(f"Error getting asset number for {input}: {e}")
return input return input
def parse_equipment(self) -> Generator[dict, None, None]: @property
def parsed_equipment(self) -> Generator[dict, None, None]:
""" """
Scrapes equipment from xl sheet Scrapes equipment from xl sheet
@@ -545,7 +559,7 @@ class EquipmentParser(object):
for sheet in self.xl.sheetnames: for sheet in self.xl.sheetnames:
ws = self.xl[sheet] ws = self.xl[sheet]
try: try:
relevant = {k: v for k, v in self.map.items() if v['sheet'] == sheet} relevant = {k: v for k, v in self.equipment_map.items() if v['sheet'] == sheet}
except (TypeError, KeyError) as e: except (TypeError, KeyError) as e:
logger.error(f"Error creating relevant equipment list: {e}") logger.error(f"Error creating relevant equipment list: {e}")
continue continue
@@ -566,7 +580,7 @@ class EquipmentParser(object):
nickname=eq.nickname) nickname=eq.nickname)
except AttributeError: except AttributeError:
logger.error(f"Unable to add {eq} to list.") logger.error(f"Unable to add {eq} to list.")
continue
class TipParser(object): class TipParser(object):
""" """
@@ -583,9 +597,10 @@ class TipParser(object):
submission_type = SubmissionType.query(name=submission_type) submission_type = SubmissionType.query(name=submission_type)
self.submission_type = submission_type self.submission_type = submission_type
self.xl = xl self.xl = xl
self.map = self.fetch_tip_map() # self.map = self.fetch_tip_map()
def fetch_tip_map(self) -> dict: @property
def tip_map(self) -> dict:
""" """
Gets the map of equipment locations in the submission type's spreadsheet Gets the map of equipment locations in the submission type's spreadsheet
@@ -594,7 +609,8 @@ class TipParser(object):
""" """
return {k: v for k, v in self.submission_type.construct_field_map("tip")} return {k: v for k, v in self.submission_type.construct_field_map("tip")}
def parse_tips(self) -> List[dict]: @property
def parsed_tips(self) -> Generator[dict, None, None]:
""" """
Scrapes equipment from xl sheet Scrapes equipment from xl sheet
@@ -604,7 +620,7 @@ class TipParser(object):
for sheet in self.xl.sheetnames: for sheet in self.xl.sheetnames:
ws = self.xl[sheet] ws = self.xl[sheet]
try: try:
relevant = {k: v for k, v in self.map.items() if v['sheet'] == sheet} relevant = {k: v for k, v in self.tip_map.items() if v['sheet'] == sheet}
except (TypeError, KeyError) as e: except (TypeError, KeyError) as e:
logger.error(f"Error creating relevant equipment list: {e}") logger.error(f"Error creating relevant equipment list: {e}")
continue continue
@@ -653,11 +669,12 @@ class PCRParser(object):
else: else:
self.submission_obj = submission self.submission_obj = submission
rsl_plate_num = self.submission_obj.rsl_plate_num rsl_plate_num = self.submission_obj.rsl_plate_num
self.pcr = self.parse_general() # self.pcr = self.parse_general()
self.samples = self.submission_obj.parse_pcr(xl=self.xl, rsl_plate_num=rsl_plate_num) self.samples = self.submission_obj.parse_pcr(xl=self.xl, rsl_plate_num=rsl_plate_num)
self.controls = self.submission_obj.parse_pcr_controls(xl=self.xl, rsl_plate_num=rsl_plate_num) self.controls = self.submission_obj.parse_pcr_controls(xl=self.xl, rsl_plate_num=rsl_plate_num)
def parse_general(self): @property
def pcr_info(self) -> dict:
""" """
Parse general info rows for all types of PCR results Parse general info rows for all types of PCR results

View File

@@ -1,6 +1,7 @@
''' '''
Contains functions for generating summary reports Contains functions for generating summary reports
''' '''
import sys
from pprint import pformat from pprint import pformat
from pandas import DataFrame, ExcelWriter from pandas import DataFrame, ExcelWriter
import logging import logging
@@ -8,7 +9,7 @@ from pathlib import Path
from datetime import date from datetime import date
from typing import Tuple from typing import Tuple
from backend.db.models import BasicSubmission from backend.db.models import BasicSubmission
from tools import jinja_template_loading, get_first_blank_df_row, row_map from tools import jinja_template_loading, get_first_blank_df_row, row_map, ctx
from PyQt6.QtWidgets import QWidget from PyQt6.QtWidgets import QWidget
from openpyxl.worksheet.worksheet import Worksheet from openpyxl.worksheet.worksheet import Worksheet
@@ -18,6 +19,9 @@ env = jinja_template_loading()
class ReportArchetype(object): class ReportArchetype(object):
"""
Made for children to inherit 'write_report", etc.
"""
def write_report(self, filename: Path | str, obj: QWidget | None = None): def write_report(self, filename: Path | str, obj: QWidget | None = None):
""" """
@@ -168,7 +172,21 @@ class TurnaroundMaker(ReportArchetype):
Returns: Returns:
""" """
days, tat_ok = sub.get_turnaround_time() if 'pytest' not in sys.modules:
from tools import ctx
else:
from test_settings import ctx
days = sub.turnaround_time
try:
tat = sub.get_default_info("turnaround_time")
except (AttributeError, KeyError):
tat = None
if not tat:
tat = ctx.TaT_threshold
try:
tat_ok = days <= tat
except TypeError:
return {}
return dict(name=str(sub.rsl_plate_num), days=days, submitted_date=sub.submitted_date, return dict(name=str(sub.rsl_plate_num), days=days, submitted_date=sub.submitted_date,
completed_date=sub.completed_date, acceptable=tat_ok) completed_date=sub.completed_date, acceptable=tat_ok)
@@ -179,5 +197,3 @@ class ChartReportMaker(ReportArchetype):
self.df = df self.df = df
self.sheet_name = sheet_name self.sheet_name = sheet_name

View File

@@ -45,7 +45,7 @@ class SheetWriter(object):
template = self.submission_type.template_file template = self.submission_type.template_file
if not template: if not template:
logger.error(f"No template file found, falling back to Bacterial Culture") logger.error(f"No template file found, falling back to Bacterial Culture")
template = SubmissionType.retrieve_template_file() template = SubmissionType.basic_template
workbook = load_workbook(BytesIO(template)) workbook = load_workbook(BytesIO(template))
self.xl = workbook self.xl = workbook
self.write_info() self.write_info()
@@ -155,8 +155,11 @@ class InfoWriter(object):
""" """
final_info = {} final_info = {}
for k, v in self.info: for k, v in self.info:
if k == "custom": match k:
case "custom":
continue continue
# case "comment":
# NOTE: merge all comments to fit in single cell. # NOTE: merge all comments to fit in single cell.
if k == "comment" and isinstance(v['value'], list): if k == "comment" and isinstance(v['value'], list):
json_join = [item['text'] for item in v['value'] if 'text' in item.keys()] json_join = [item['text'] for item in v['value'] if 'text' in item.keys()]
@@ -170,6 +173,7 @@ class InfoWriter(object):
for loc in locations: for loc in locations:
sheet = self.xl[loc['sheet']] sheet = self.xl[loc['sheet']]
try: try:
logger.debug(f"Writing {v['value']} to row {loc['row']} and column {loc['column']}")
sheet.cell(row=loc['row'], column=loc['column'], value=v['value']) sheet.cell(row=loc['row'], column=loc['column'], value=v['value'])
except AttributeError as e: except AttributeError as e:
logger.error(f"Can't write {k} to that cell due to AttributeError: {e}") logger.error(f"Can't write {k} to that cell due to AttributeError: {e}")
@@ -196,9 +200,13 @@ class ReagentWriter(object):
self.xl = xl self.xl = xl
if isinstance(submission_type, str): if isinstance(submission_type, str):
submission_type = SubmissionType.query(name=submission_type) submission_type = SubmissionType.query(name=submission_type)
self.submission_type_obj = submission_type
if isinstance(extraction_kit, str): if isinstance(extraction_kit, str):
extraction_kit = KitType.query(name=extraction_kit) extraction_kit = KitType.query(name=extraction_kit)
reagent_map = {k: v for k, v in extraction_kit.construct_xl_map_for_use(submission_type)} self.kit_object = extraction_kit
associations, self.kit_object = self.kit_object.construct_xl_map_for_use(
submission_type=self.submission_type_obj)
reagent_map = {k: v for k, v in associations.items()}
self.reagents = self.reconcile_map(reagent_list=reagent_list, reagent_map=reagent_map) self.reagents = self.reconcile_map(reagent_list=reagent_list, reagent_map=reagent_map)
def reconcile_map(self, reagent_list: List[dict], reagent_map: dict) -> Generator[dict, None, None]: def reconcile_map(self, reagent_list: List[dict], reagent_map: dict) -> Generator[dict, None, None]:
@@ -264,7 +272,7 @@ class SampleWriter(object):
submission_type = SubmissionType.query(name=submission_type) submission_type = SubmissionType.query(name=submission_type)
self.submission_type = submission_type self.submission_type = submission_type
self.xl = xl self.xl = xl
self.sample_map = submission_type.construct_sample_map()['lookup_table'] self.sample_map = submission_type.sample_map['lookup_table']
# NOTE: exclude any samples without a submission rank. # NOTE: exclude any samples without a submission rank.
samples = [item for item in self.reconcile_map(sample_list) if item['submission_rank'] > 0] samples = [item for item in self.reconcile_map(sample_list) if item['submission_rank'] > 0]
self.samples = sorted(samples, key=itemgetter('submission_rank')) self.samples = sorted(samples, key=itemgetter('submission_rank'))
@@ -282,7 +290,7 @@ class SampleWriter(object):
""" """
multiples = ['row', 'column', 'assoc_id', 'submission_rank'] multiples = ['row', 'column', 'assoc_id', 'submission_rank']
for sample in sample_list: for sample in sample_list:
sample = self.submission_type.get_submission_class().custom_sample_writer(sample) sample = self.submission_type.submission_class.custom_sample_writer(sample)
for assoc in zip(sample['row'], sample['column'], sample['submission_rank']): for assoc in zip(sample['row'], sample['column'], sample['submission_rank']):
new = dict(row=assoc[0], column=assoc[1], submission_rank=assoc[2]) new = dict(row=assoc[0], column=assoc[1], submission_rank=assoc[2])
for k, v in sample.items(): for k, v in sample.items():

View File

@@ -19,21 +19,22 @@ class RSLNamer(object):
Object that will enforce proper formatting on RSL plate names. Object that will enforce proper formatting on RSL plate names.
""" """
def __init__(self, filename: str, sub_type: str | None = None, data: dict | None = None): def __init__(self, filename: str, submission_type: str | None = None, data: dict | None = None):
# NOTE: Preferred method is path retrieval, but might also need validation for just string. # NOTE: Preferred method is path retrieval, but might also need validation for just string.
filename = Path(filename) if Path(filename).exists() else filename filename = Path(filename) if Path(filename).exists() else filename
self.submission_type = sub_type self.submission_type = submission_type
if not self.submission_type: if not self.submission_type:
self.submission_type = self.retrieve_submission_type(filename=filename) self.submission_type = self.retrieve_submission_type(filename=filename)
logger.info(f"got submission type: {self.submission_type}") logger.info(f"got submission type: {self.submission_type}")
if self.submission_type: if self.submission_type:
self.sub_object = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type) self.sub_object = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type)
self.parsed_name = self.retrieve_rsl_number(filename=filename, regex=self.sub_object.get_regex(submission_type=sub_type)) self.parsed_name = self.retrieve_rsl_number(filename=filename, regex=self.sub_object.get_regex(submission_type=submission_type))
if not data: if not data:
data = dict(submission_type=self.submission_type) data = dict(submission_type=self.submission_type)
if "submission_type" not in data.keys(): if "submission_type" not in data.keys():
data['submission_type'] = self.submission_type data['submission_type'] = self.submission_type
self.parsed_name = self.sub_object.enforce_name(instr=self.parsed_name, data=data) self.parsed_name = self.sub_object.enforce_name(instr=self.parsed_name, data=data)
logger.info(f"Parsed name: {self.parsed_name}")
@classmethod @classmethod
def retrieve_submission_type(cls, filename: str | Path) -> str: def retrieve_submission_type(cls, filename: str | Path) -> str:
@@ -57,7 +58,7 @@ class RSLNamer(object):
categories = wb.properties.category.split(";") categories = wb.properties.category.split(";")
submission_type = next(item.strip().title() for item in categories) submission_type = next(item.strip().title() for item in categories)
except (StopIteration, AttributeError): except (StopIteration, AttributeError):
sts = {item.name: item.get_template_file_sheets() for item in SubmissionType.query() if item.template_file} sts = {item.name: item.template_file_sheets for item in SubmissionType.query() if item.template_file}
try: try:
submission_type = next(k.title() for k,v in sts.items() if wb.sheetnames==v) submission_type = next(k.title() for k,v in sts.items() if wb.sheetnames==v)
except StopIteration: except StopIteration:
@@ -69,7 +70,7 @@ class RSLNamer(object):
def st_from_str(filename:str) -> str: def st_from_str(filename:str) -> str:
if filename.startswith("tmp"): if filename.startswith("tmp"):
return "Bacterial Culture" return "Bacterial Culture"
regex = BasicSubmission.construct_regex() regex = BasicSubmission.regex
m = regex.search(filename) m = regex.search(filename)
try: try:
submission_type = m.lastgroup submission_type = m.lastgroup
@@ -94,14 +95,15 @@ class RSLNamer(object):
raise ValueError("Submission Type came back as None.") raise ValueError("Submission Type came back as None.")
from frontend.widgets import ObjectSelector from frontend.widgets import ObjectSelector
dlg = ObjectSelector(title="Couldn't parse submission type.", dlg = ObjectSelector(title="Couldn't parse submission type.",
message="Please select submission type from list below.", obj_type=SubmissionType) message="Please select submission type from list below.",
obj_type=SubmissionType)
if dlg.exec(): if dlg.exec():
submission_type = dlg.parse_form() submission_type = dlg.parse_form()
submission_type = submission_type.replace("_", " ") submission_type = submission_type.replace("_", " ")
return submission_type return submission_type
@classmethod @classmethod
def retrieve_rsl_number(cls, filename: str | Path, regex: str | None = None): def retrieve_rsl_number(cls, filename: str | Path, regex: re.Pattern | None = None):
""" """
Uses regex to retrieve the plate number and submission type from an input string Uses regex to retrieve the plate number and submission type from an input string
@@ -110,12 +112,7 @@ class RSLNamer(object):
filename (str): string to be parsed filename (str): string to be parsed
""" """
if regex is None: if regex is None:
regex = BasicSubmission.construct_regex() regex = BasicSubmission.regex
else:
try:
regex = re.compile(rf'{regex}', re.IGNORECASE | re.VERBOSE)
except re.error as e:
regex = BasicSubmission.construct_regex()
match filename: match filename:
case Path(): case Path():
m = regex.search(filename.stem) m = regex.search(filename.stem)
@@ -135,7 +132,7 @@ class RSLNamer(object):
@classmethod @classmethod
def construct_new_plate_name(cls, data: dict) -> str: def construct_new_plate_name(cls, data: dict) -> str:
""" """
Make a brand new plate name from submission data. Make a brand-new plate name from submission data.
Args: Args:
data (dict): incoming submission data data (dict): incoming submission data
@@ -179,7 +176,13 @@ class RSLNamer(object):
template = environment.from_string(template) template = environment.from_string(template)
return template.render(**kwargs) return template.render(**kwargs)
def calculate_repeat(self): def calculate_repeat(self) -> str:
"""
Determines what repeat number this plate is.
Returns:
str: Repeat number.
"""
regex = re.compile(r"-\d(?P<repeat>R\d)") regex = re.compile(r"-\d(?P<repeat>R\d)")
m = regex.search(self.parsed_name) m = regex.search(self.parsed_name)
if m is not None: if m is not None:

View File

@@ -73,7 +73,7 @@ class PydReagent(BaseModel):
if value is not None: if value is not None:
match value: match value:
case int(): case int():
return datetime.fromordinal(datetime(1900, 1, 1).toordinal() + value - 2).date() return datetime.fromordinal(datetime(1900, 1, 1).toordinal() + value - 2)
case 'NA': case 'NA':
return value return value
case str(): case str():
@@ -117,7 +117,8 @@ class PydReagent(BaseModel):
fields = list(self.model_fields.keys()) + extras fields = list(self.model_fields.keys()) + extras
return {k: getattr(self, k) for k in fields} return {k: getattr(self, k) for k in fields}
def toSQL(self, submission: BasicSubmission | str = None) -> Tuple[Reagent, Report]: @report_result
def to_sql(self, submission: BasicSubmission | str = None) -> Tuple[Reagent, Report]:
""" """
Converts this instance into a backend.db.models.kit.Reagent instance Converts this instance into a backend.db.models.kit.Reagent instance
@@ -128,6 +129,7 @@ class PydReagent(BaseModel):
if self.model_extra is not None: if self.model_extra is not None:
self.__dict__.update(self.model_extra) self.__dict__.update(self.model_extra)
reagent = Reagent.query(lot=self.lot, name=self.name) reagent = Reagent.query(lot=self.lot, name=self.name)
logger.debug(f"Reagent: {reagent}")
if reagent is None: if reagent is None:
reagent = Reagent() reagent = Reagent()
for key, value in self.__dict__.items(): for key, value in self.__dict__.items():
@@ -140,7 +142,6 @@ class PydReagent(BaseModel):
assoc.comments = self.comment assoc.comments = self.comment
else: else:
assoc = None assoc = None
report.add_result(Result(owner=__name__, code=0, msg="New reagent created.", status="Information"))
else: else:
if submission is not None and reagent not in submission.reagents: if submission is not None and reagent not in submission.reagents:
submission.update_reagentassoc(reagent=reagent, role=self.role) submission.update_reagentassoc(reagent=reagent, role=self.role)
@@ -160,7 +161,7 @@ class PydSample(BaseModel, extra='allow'):
def validate_model(cls, data): def validate_model(cls, data):
model = BasicSample.find_polymorphic_subclass(polymorphic_identity=data.sample_type) model = BasicSample.find_polymorphic_subclass(polymorphic_identity=data.sample_type)
for k, v in data.model_extra.items(): for k, v in data.model_extra.items():
if k in model.timestamps(): if k in model.timestamps:
if isinstance(v, str): if isinstance(v, str):
v = datetime.strptime(v, "%Y-%m-%d") v = datetime.strptime(v, "%Y-%m-%d")
data.__setattr__(k, v) data.__setattr__(k, v)
@@ -202,7 +203,7 @@ class PydSample(BaseModel, extra='allow'):
fields = list(self.model_fields.keys()) + list(self.model_extra.keys()) fields = list(self.model_fields.keys()) + list(self.model_extra.keys())
return {k: getattr(self, k) for k in fields} return {k: getattr(self, k) for k in fields}
def toSQL(self, submission: BasicSubmission | str = None) -> Tuple[ def to_sql(self, submission: BasicSubmission | str = None) -> Tuple[
BasicSample, List[SubmissionSampleAssociation], Result | None]: BasicSample, List[SubmissionSampleAssociation], Result | None]:
""" """
Converts this instance into a backend.db.models.submissions.Sample object Converts this instance into a backend.db.models.submissions.Sample object
@@ -271,7 +272,7 @@ class PydTips(BaseModel):
def to_sql(self, submission: BasicSubmission) -> SubmissionTipsAssociation: def to_sql(self, submission: BasicSubmission) -> SubmissionTipsAssociation:
""" """
Con Convert this object to the SQL version for database storage.
Args: Args:
submission (BasicSubmission): A submission object to associate tips represented here. submission (BasicSubmission): A submission object to associate tips represented here.
@@ -280,10 +281,10 @@ class PydTips(BaseModel):
SubmissionTipsAssociation: Association between queried tips and submission SubmissionTipsAssociation: Association between queried tips and submission
""" """
tips = Tips.query(name=self.name, limit=1) tips = Tips.query(name=self.name, limit=1)
logger.debug(f"Tips query has yielded: {tips}") # logger.debug(f"Tips query has yielded: {tips}")
assoc = SubmissionTipsAssociation.query(tip_id=tips.id, submission_id=submission.id, role=self.role, limit=1) assoc = SubmissionTipsAssociation.query_or_create(tips=tips, submission=submission, role=self.role, limit=1)
if assoc is None: # if assoc is None:
assoc = SubmissionTipsAssociation(submission=submission, tips=tips, role_name=self.role) # assoc = SubmissionTipsAssociation(submission=submission, tips=tips, role_name=self.role)
return assoc return assoc
@@ -316,7 +317,7 @@ class PydEquipment(BaseModel, extra='ignore'):
pass pass
return value return value
def toSQL(self, submission: BasicSubmission | str = None) -> Tuple[Equipment, SubmissionEquipmentAssociation]: def to_sql(self, submission: BasicSubmission | str = None, extraction_kit: KitType | str = None) -> Tuple[Equipment, SubmissionEquipmentAssociation]:
""" """
Creates Equipment and SubmssionEquipmentAssociations for this PydEquipment Creates Equipment and SubmssionEquipmentAssociations for this PydEquipment
@@ -328,6 +329,8 @@ class PydEquipment(BaseModel, extra='ignore'):
""" """
if isinstance(submission, str): if isinstance(submission, str):
submission = BasicSubmission.query(rsl_plate_num=submission) submission = BasicSubmission.query(rsl_plate_num=submission)
if isinstance(extraction_kit, str):
extraction_kit = KitType.query(name=extraction_kit)
equipment = Equipment.query(asset_number=self.asset_number) equipment = Equipment.query(asset_number=self.asset_number)
if equipment is None: if equipment is None:
logger.error("No equipment found. Returning None.") logger.error("No equipment found. Returning None.")
@@ -343,6 +346,11 @@ class PydEquipment(BaseModel, extra='ignore'):
if assoc is None: if assoc is None:
assoc = SubmissionEquipmentAssociation(submission=submission, equipment=equipment) assoc = SubmissionEquipmentAssociation(submission=submission, equipment=equipment)
# TODO: This seems precarious. What if there is more than one process? # TODO: This seems precarious. What if there is more than one process?
# NOTE: It looks like the way fetching the processes is done in the SQL model, this shouldn't be a problem, but I'll include a failsafe.
# NOTE: I need to find a way to filter this by the kit involved.
if len(self.processes) > 1:
process = Process.query(submission_type=submission.get_submission_type(), extraction_kit=extraction_kit, equipment_role=self.role)
else:
process = Process.query(name=self.processes[0]) process = Process.query(name=self.processes[0])
if process is None: if process is None:
logger.error(f"Found unknown process: {process}.") logger.error(f"Found unknown process: {process}.")
@@ -405,10 +413,12 @@ class PydSubmission(BaseModel, extra='allow'):
@field_validator('equipment', mode='before') @field_validator('equipment', mode='before')
@classmethod @classmethod
def convert_equipment_dict(cls, value): def convert_equipment_dict(cls, value):
if isinstance(value, Generator):
return [PydEquipment(**equipment) for equipment in value]
if isinstance(value, dict): if isinstance(value, dict):
return value['value'] return value['value']
if isinstance(value, Generator):
return [PydEquipment(**equipment) for equipment in value]
if not value:
return []
return value return value
@field_validator('comment', mode='before') @field_validator('comment', mode='before')
@@ -443,12 +453,11 @@ class PydSubmission(BaseModel, extra='allow'):
def strip_datetime_string(cls, value): def strip_datetime_string(cls, value):
match value['value']: match value['value']:
case date(): case date():
return value output = datetime.combine(value['value'], datetime.min.time())
case datetime(): case datetime():
return value.date() pass
case int(): case int():
return dict(value=datetime.fromordinal(datetime(1900, 1, 1).toordinal() + value['value'] - 2).date(), output = datetime.fromordinal(datetime(1900, 1, 1).toordinal() + value['value'] - 2)
missing=True)
case str(): case str():
string = re.sub(r"(_|-)\d(R\d)?$", "", value['value']) string = re.sub(r"(_|-)\d(R\d)?$", "", value['value'])
try: try:
@@ -456,12 +465,15 @@ class PydSubmission(BaseModel, extra='allow'):
except ParserError as e: except ParserError as e:
logger.error(f"Problem parsing date: {e}") logger.error(f"Problem parsing date: {e}")
try: try:
output = dict(value=parse(string.replace("-", "")).date(), missing=True) output = parse(string.replace("-", "")).date()
except Exception as e: except Exception as e:
logger.error(f"Problem with parse fallback: {e}") logger.error(f"Problem with parse fallback: {e}")
return output return value
case _: case _:
raise ValueError(f"Could not get datetime from {value['value']}") raise ValueError(f"Could not get datetime from {value['value']}")
value['value'] = output.replace(tzinfo=timezone)
return value
@field_validator("submitting_lab", mode="before") @field_validator("submitting_lab", mode="before")
@classmethod @classmethod
@@ -511,7 +523,7 @@ class PydSubmission(BaseModel, extra='allow'):
if "pytest" in sys.modules and sub_type.replace(" ", "") == "BasicSubmission": if "pytest" in sys.modules and sub_type.replace(" ", "") == "BasicSubmission":
output = "RSL-BS-Test001" output = "RSL-BS-Test001"
else: else:
output = RSLNamer(filename=values.data['filepath'].__str__(), sub_type=sub_type, output = RSLNamer(filename=values.data['filepath'].__str__(), submission_type=sub_type,
data=values.data).parsed_name data=values.data).parsed_name
return dict(value=output, missing=True) return dict(value=output, missing=True)
@@ -653,9 +665,9 @@ class PydSubmission(BaseModel, extra='allow'):
return value return value
if isinstance(contact, tuple): if isinstance(contact, tuple):
contact = contact[0] contact = contact[0]
value = dict(value=f"Defaulted to: {contact}", missing=True) value = dict(value=f"Defaulted to: {contact}", missing=False)
logger.debug(f"Value after query: {value}") logger.debug(f"Value after query: {value}")
return return value
else: else:
logger.debug(f"Value after bypass check: {value}") logger.debug(f"Value after bypass check: {value}")
return value return value
@@ -665,7 +677,7 @@ class PydSubmission(BaseModel, extra='allow'):
# NOTE: this could also be done with default_factory # NOTE: this could also be done with default_factory
self.submission_object = BasicSubmission.find_polymorphic_subclass( self.submission_object = BasicSubmission.find_polymorphic_subclass(
polymorphic_identity=self.submission_type['value']) polymorphic_identity=self.submission_type['value'])
self.namer = RSLNamer(self.rsl_plate_num['value'], sub_type=self.submission_type['value']) self.namer = RSLNamer(self.rsl_plate_num['value'], submission_type=self.submission_type['value'])
if run_custom: if run_custom:
self.submission_object.custom_validation(pyd=self) self.submission_object.custom_validation(pyd=self)
@@ -777,10 +789,10 @@ class PydSubmission(BaseModel, extra='allow'):
match key: match key:
case "reagents": case "reagents":
for reagent in self.reagents: for reagent in self.reagents:
reagent, _ = reagent.toSQL(submission=instance) reagent = reagent.to_sql(submission=instance)
case "samples": case "samples":
for sample in self.samples: for sample in self.samples:
sample, associations, _ = sample.toSQL(submission=instance) sample, associations, _ = sample.to_sql(submission=instance)
for assoc in associations: for assoc in associations:
if assoc is not None: if assoc is not None:
if assoc not in instance.submission_sample_associations: if assoc not in instance.submission_sample_associations:
@@ -791,7 +803,7 @@ class PydSubmission(BaseModel, extra='allow'):
for equip in self.equipment: for equip in self.equipment:
if equip is None: if equip is None:
continue continue
equip, association = equip.toSQL(submission=instance) equip, association = equip.to_sql(submission=instance, extraction_kit=self.extraction_kit)
if association is not None: if association is not None:
instance.submission_equipment_associations.append(association) instance.submission_equipment_associations.append(association)
case "tips": case "tips":
@@ -807,7 +819,7 @@ class PydSubmission(BaseModel, extra='allow'):
instance.submission_tips_associations.append(association) instance.submission_tips_associations.append(association)
else: else:
logger.warning(f"Tips association {association} is already present in {instance}") logger.warning(f"Tips association {association} is already present in {instance}")
case item if item in instance.timestamps(): case item if item in instance.timestamps:
logger.warning(f"Incoming timestamp key: {item}, with value: {value}") logger.warning(f"Incoming timestamp key: {item}, with value: {value}")
if isinstance(value, date): if isinstance(value, date):
value = datetime.combine(value, datetime.now().time()) value = datetime.combine(value, datetime.now().time())
@@ -818,7 +830,7 @@ class PydSubmission(BaseModel, extra='allow'):
else: else:
value = value value = value
instance.set_attribute(key=key, value=value) instance.set_attribute(key=key, value=value)
case item if item in instance.jsons(): case item if item in instance.jsons:
try: try:
ii = value.items() ii = value.items()
except AttributeError: except AttributeError:
@@ -989,7 +1001,7 @@ class PydContact(BaseModel):
logger.debug(f"Output phone: {value}") logger.debug(f"Output phone: {value}")
return value return value
def toSQL(self) -> Tuple[Contact, Report]: def to_sql(self) -> Tuple[Contact, Report]:
""" """
Converts this instance into a backend.db.models.organization. Contact instance. Converts this instance into a backend.db.models.organization. Contact instance.
Does not query for existing contacts. Does not query for existing contacts.
@@ -1024,7 +1036,7 @@ class PydOrganization(BaseModel):
cost_centre: str cost_centre: str
contacts: List[PydContact] | None contacts: List[PydContact] | None
def toSQL(self) -> Organization: def to_sql(self) -> Organization:
""" """
Converts this instance into a backend.db.models.organization.Organization instance. Converts this instance into a backend.db.models.organization.Organization instance.
@@ -1055,7 +1067,7 @@ class PydReagentRole(BaseModel):
return timedelta(days=value) return timedelta(days=value)
return value return value
def toSQL(self, kit: KitType) -> ReagentRole: def to_sql(self, kit: KitType) -> ReagentRole:
""" """
Converts this instance into a backend.db.models.ReagentType instance Converts this instance into a backend.db.models.ReagentType instance
@@ -1082,7 +1094,7 @@ class PydKit(BaseModel):
name: str name: str
reagent_roles: List[PydReagentRole] = [] reagent_roles: List[PydReagentRole] = []
def toSQL(self) -> Tuple[KitType, Report]: def to_sql(self) -> Tuple[KitType, Report]:
""" """
Converts this instance into a backend.db.models.kits.KitType instance Converts this instance into a backend.db.models.kits.KitType instance
@@ -1093,7 +1105,7 @@ class PydKit(BaseModel):
instance = KitType.query(name=self.name) instance = KitType.query(name=self.name)
if instance is None: if instance is None:
instance = KitType(name=self.name) instance = KitType(name=self.name)
[item.toSQL(instance) for item in self.reagent_roles] [item.to_sql(instance) for item in self.reagent_roles]
return instance, report return instance, report

View File

@@ -193,7 +193,7 @@ class App(QMainWindow):
@check_authorization @check_authorization
def edit_reagent(self, *args, **kwargs): def edit_reagent(self, *args, **kwargs):
dlg = SearchBox(parent=self, object_type=Reagent, extras=['role']) dlg = SearchBox(parent=self, object_type=Reagent, extras=[dict(name='Role', field="role")])
dlg.exec() dlg.exec()
@check_authorization @check_authorization

View File

@@ -30,8 +30,7 @@ class ControlsViewer(InfoPane):
self.control_sub_typer.addItems(con_sub_types) self.control_sub_typer.addItems(con_sub_types)
# NOTE: create custom widget to get types of analysis -- disabled by PCR control # NOTE: create custom widget to get types of analysis -- disabled by PCR control
self.mode_typer = QComboBox() self.mode_typer = QComboBox()
mode_types = IridaControl.get_modes() self.mode_typer.addItems(IridaControl.modes)
self.mode_typer.addItems(mode_types)
# NOTE: create custom widget to get subtypes of analysis -- disabled by PCR control # NOTE: create custom widget to get subtypes of analysis -- disabled by PCR control
self.mode_sub_typer = QComboBox() self.mode_sub_typer = QComboBox()
self.mode_sub_typer.setEnabled(False) self.mode_sub_typer.setEnabled(False)
@@ -43,7 +42,7 @@ class ControlsViewer(InfoPane):
self.layout.addWidget(self.control_sub_typer, 1, 0, 1, 4) self.layout.addWidget(self.control_sub_typer, 1, 0, 1, 4)
self.layout.addWidget(self.mode_typer, 2, 0, 1, 4) self.layout.addWidget(self.mode_typer, 2, 0, 1, 4)
self.layout.addWidget(self.mode_sub_typer, 3, 0, 1, 4) self.layout.addWidget(self.mode_sub_typer, 3, 0, 1, 4)
self.archetype.get_instance_class().make_parent_buttons(parent=self) self.archetype.instance_class.make_parent_buttons(parent=self)
self.update_data() self.update_data()
self.control_sub_typer.currentIndexChanged.connect(self.update_data) self.control_sub_typer.currentIndexChanged.connect(self.update_data)
self.mode_typer.currentIndexChanged.connect(self.update_data) self.mode_typer.currentIndexChanged.connect(self.update_data)
@@ -70,7 +69,7 @@ class ControlsViewer(InfoPane):
except AttributeError: except AttributeError:
sub_types = [] sub_types = []
# NOTE: added in allowed to have subtypes in case additions made in future. # NOTE: added in allowed to have subtypes in case additions made in future.
if sub_types and self.mode.lower() in self.archetype.get_instance_class().subtyping_allowed: if sub_types and self.mode.lower() in self.archetype.instance_class.subtyping_allowed:
# NOTE: block signal that will rerun controls getter and update mode_sub_typer # NOTE: block signal that will rerun controls getter and update mode_sub_typer
with QSignalBlocker(self.mode_sub_typer) as blocker: with QSignalBlocker(self.mode_sub_typer) as blocker:
self.mode_sub_typer.addItems(sub_types) self.mode_sub_typer.addItems(sub_types)
@@ -103,7 +102,7 @@ class ControlsViewer(InfoPane):
chart_settings = dict(sub_type=self.con_sub_type, start_date=self.start_date, end_date=self.end_date, chart_settings = dict(sub_type=self.con_sub_type, start_date=self.start_date, end_date=self.end_date,
mode=self.mode, mode=self.mode,
sub_mode=self.mode_sub_type, parent=self, months=months) sub_mode=self.mode_sub_type, parent=self, months=months)
self.fig = self.archetype.get_instance_class().make_chart(chart_settings=chart_settings, parent=self, ctx=self.app.ctx) self.fig = self.archetype.instance_class.make_chart(chart_settings=chart_settings, parent=self, ctx=self.app.ctx)
self.report_obj = ChartReportMaker(df=self.fig.df, sheet_name=self.archetype.name) self.report_obj = ChartReportMaker(df=self.fig.df, sheet_name=self.archetype.name)
if issubclass(self.fig.__class__, CustomFigure): if issubclass(self.fig.__class__, CustomFigure):
self.save_button.setEnabled(True) self.save_button.setEnabled(True)

View File

@@ -19,7 +19,7 @@ class EquipmentUsage(QDialog):
super().__init__(parent) super().__init__(parent)
self.submission = submission self.submission = submission
self.setWindowTitle(f"Equipment Checklist - {submission.rsl_plate_num}") self.setWindowTitle(f"Equipment Checklist - {submission.rsl_plate_num}")
self.used_equipment = self.submission.get_used_equipment() self.used_equipment = self.submission.used_equipment
self.kit = self.submission.extraction_kit self.kit = self.submission.extraction_kit
self.opt_equipment = submission.submission_type.get_equipment() self.opt_equipment = submission.submission_type.get_equipment()
self.layout = QVBoxLayout() self.layout = QVBoxLayout()

View File

@@ -65,11 +65,11 @@ class AddEdit(QDialog):
report = Report() report = Report()
parsed = {result[0].strip(":"): result[1] for result in [item.parse_form() for item in self.findChildren(EditProperty)] if result[0]} parsed = {result[0].strip(":"): result[1] for result in [item.parse_form() for item in self.findChildren(EditProperty)] if result[0]}
logger.debug(parsed) logger.debug(parsed)
model = self.object_type.get_pydantic_model() model = self.object_type.pydantic_model
# NOTE: Hand-off to pydantic model for validation. # NOTE: Hand-off to pydantic model for validation.
# NOTE: Also, why am I not just using the toSQL method here. I could write one for contacts. # NOTE: Also, why am I not just using the toSQL method here. I could write one for contacts.
model = model(**parsed) model = model(**parsed)
# output, result = model.toSQL() # output, result = model.to_sql()
# report.add_result(result) # report.add_result(result)
# if len(report.results) < 1: # if len(report.results) < 1:
# report.add_result(Result(msg="Added new regeant.", icon="Information", owner=__name__)) # report.add_result(Result(msg="Added new regeant.", icon="Information", owner=__name__))

View File

@@ -188,7 +188,7 @@ class EditRelationship(QWidget):
dlg = AddEdit(self, instance=instance, manager=self.parent().object_type.__name__.lower()) dlg = AddEdit(self, instance=instance, manager=self.parent().object_type.__name__.lower())
if dlg.exec(): if dlg.exec():
new_instance = dlg.parse_form() new_instance = dlg.parse_form()
new_instance, result = new_instance.toSQL() new_instance, result = new_instance.to_sql()
logger.debug(f"New instance: {new_instance}") logger.debug(f"New instance: {new_instance}")
addition = getattr(self.parent().instance, self.objectName()) addition = getattr(self.parent().instance, self.objectName())
if isinstance(addition, InstrumentedList): if isinstance(addition, InstrumentedList):
@@ -213,7 +213,7 @@ class EditRelationship(QWidget):
sets data in model sets data in model
""" """
# logger.debug(self.data) # logger.debug(self.data)
self.data = DataFrame.from_records([item.to_omnigui_dict() for item in self.data]) self.data = DataFrame.from_records([item.omnigui_dict for item in self.data])
try: try:
self.columns_of_interest = [dict(name=item, column=self.data.columns.get_loc(item)) for item in self.extras] self.columns_of_interest = [dict(name=item, column=self.data.columns.get_loc(item)) for item in self.extras]
except (KeyError, AttributeError): except (KeyError, AttributeError):

View File

@@ -20,7 +20,7 @@ class SearchBox(QDialog):
The full search widget. The full search widget.
""" """
def __init__(self, parent, object_type: Any, extras: List[str], returnable: bool = False, **kwargs): def __init__(self, parent, object_type: Any, extras: List[dict], returnable: bool = False, **kwargs):
super().__init__(parent) super().__init__(parent)
self.object_type = self.original_type = object_type self.object_type = self.original_type = object_type
self.extras = extras self.extras = extras
@@ -73,8 +73,9 @@ class SearchBox(QDialog):
except AttributeError: except AttributeError:
search_fields = [] search_fields = []
for iii, searchable in enumerate(search_fields): for iii, searchable in enumerate(search_fields):
widget = FieldSearch(parent=self, label=searchable, field_name=searchable) widget = FieldSearch(parent=self, label=searchable['label'], field_name=searchable['field'])
widget.setObjectName(searchable) # widget = FieldSearch(parent=self, label=k, field_name=v)
widget.setObjectName(searchable['field'])
self.layout.addWidget(widget, 1 + iii, 0) self.layout.addWidget(widget, 1 + iii, 0)
widget.search_widget.textChanged.connect(self.update_data) widget.search_widget.textChanged.connect(self.update_data)
self.update_data() self.update_data()
@@ -150,14 +151,16 @@ class SearchResults(QTableView):
self.extras = extras + self.object_type.searchables self.extras = extras + self.object_type.searchables
except AttributeError: except AttributeError:
self.extras = extras self.extras = extras
logger.debug(f"Extras: {self.extras}")
def setData(self, df: DataFrame) -> None: def setData(self, df: DataFrame) -> None:
""" """
sets data in model sets data in model
""" """
self.data = df self.data = df
try: try:
self.columns_of_interest = [dict(name=item, column=self.data.columns.get_loc(item)) for item in self.extras] self.columns_of_interest = [dict(name=item['field'], column=self.data.columns.get_loc(item['field'])) for item in self.extras]
except KeyError: except KeyError:
self.columns_of_interest = [] self.columns_of_interest = []
try: try:

View File

@@ -93,7 +93,7 @@ class SubmissionDetails(QDialog):
base_dict = sample.to_sub_dict(full_data=True) base_dict = sample.to_sub_dict(full_data=True)
exclude = ['submissions', 'excluded', 'colour', 'tooltip'] exclude = ['submissions', 'excluded', 'colour', 'tooltip']
base_dict['excluded'] = exclude base_dict['excluded'] = exclude
template = sample.get_details_template() template = sample.details_template
template_path = Path(template.environment.loader.__getattribute__("searchpath")[0]) template_path = Path(template.environment.loader.__getattribute__("searchpath")[0])
with open(template_path.joinpath("css", "styles.css"), "r") as f: with open(template_path.joinpath("css", "styles.css"), "r") as f:
css = f.read() css = f.read()
@@ -147,7 +147,7 @@ class SubmissionDetails(QDialog):
self.rsl_plate_num = submission.rsl_plate_num self.rsl_plate_num = submission.rsl_plate_num
self.base_dict = submission.to_dict(full_data=True) self.base_dict = submission.to_dict(full_data=True)
# NOTE: don't want id # NOTE: don't want id
self.base_dict['platemap'] = submission.make_plate_map(sample_list=submission.hitpick_plate()) self.base_dict['platemap'] = submission.make_plate_map(sample_list=submission.hitpicked)
self.base_dict['excluded'] = submission.get_default_info("details_ignore") self.base_dict['excluded'] = submission.get_default_info("details_ignore")
self.base_dict, self.template = submission.get_details_template(base_dict=self.base_dict) self.base_dict, self.template = submission.get_details_template(base_dict=self.base_dict)
template_path = Path(self.template.environment.loader.__getattribute__("searchpath")[0]) template_path = Path(self.template.environment.loader.__getattribute__("searchpath")[0])

View File

@@ -147,14 +147,16 @@ class SubmissionFormContainer(QWidget):
instance = Reagent() instance = Reagent()
dlg = AddEdit(parent=self, instance=instance) dlg = AddEdit(parent=self, instance=instance)
if dlg.exec(): if dlg.exec():
reagent, result = dlg.parse_form() reagent = dlg.parse_form()
reagent.missing = False reagent.missing = False
logger.debug(f"Reagent: {reagent}, result: {result}") # logger.debug(f"Reagent: {reagent}, result: {result}")
report.add_result(result) # report.add_result(result)
# NOTE: send reagent to db # NOTE: send reagent to db
sqlobj, result = reagent.toSQL() sqlobj = reagent.to_sql()
sqlobj.save() sqlobj.save()
report.add_result(result) logger.debug(f"Reagent added!")
report.add_result(Result(owner=__name__, code=0, msg="New reagent created.", status="Information"))
# report.add_result(result)
return reagent, report return reagent, report
@report_result @report_result
@@ -184,10 +186,10 @@ class SubmissionFormContainer(QWidget):
# NOTE: create reagent object # NOTE: create reagent object
reagent = PydReagent(ctx=self.app.ctx, **info, missing=False) reagent = PydReagent(ctx=self.app.ctx, **info, missing=False)
# NOTE: send reagent to db # NOTE: send reagent to db
sqlobj, result = reagent.toSQL() sqlobj = reagent.to_sql()
sqlobj.save() sqlobj.save()
report.add_result(result) # report.add_result(result)
return reagent, report return reagent
class SubmissionFormWidget(QWidget): class SubmissionFormWidget(QWidget):
@@ -201,7 +203,7 @@ class SubmissionFormWidget(QWidget):
self.pyd = submission self.pyd = submission
self.missing_info = [] self.missing_info = []
self.submission_type = SubmissionType.query(name=self.pyd.submission_type['value']) self.submission_type = SubmissionType.query(name=self.pyd.submission_type['value'])
st = self.submission_type.get_submission_class() st = self.submission_type.submission_class
defaults = st.get_default_info("form_recover", "form_ignore", submission_type=self.pyd.submission_type['value']) defaults = st.get_default_info("form_recover", "form_ignore", submission_type=self.pyd.submission_type['value'])
self.recover = defaults['form_recover'] self.recover = defaults['form_recover']
self.ignore = defaults['form_ignore'] self.ignore = defaults['form_ignore']
@@ -443,6 +445,9 @@ class SubmissionFormWidget(QWidget):
reagent = widget.parse_form() reagent = widget.parse_form()
if reagent is not None: if reagent is not None:
reagents.append(reagent) reagents.append(reagent)
else:
report.add_result(Result(msg="Failed integrity check", status="Critical"))
return report
case self.InfoItem(): case self.InfoItem():
field, value = widget.parse_form() field, value = widget.parse_form()
if field is not None: if field is not None:
@@ -523,7 +528,7 @@ class SubmissionFormWidget(QWidget):
if isinstance(submission_type, str): if isinstance(submission_type, str):
submission_type = SubmissionType.query(name=submission_type) submission_type = SubmissionType.query(name=submission_type)
if sub_obj is None: if sub_obj is None:
sub_obj = submission_type.get_submission_class() sub_obj = submission_type.submission_class
try: try:
value = value['value'] value = value['value']
except (TypeError, KeyError): except (TypeError, KeyError):
@@ -585,7 +590,7 @@ class SubmissionFormWidget(QWidget):
add_widget.addItems(cats) add_widget.addItems(cats)
add_widget.setToolTip("Enter submission category or select from list.") add_widget.setToolTip("Enter submission category or select from list.")
case _: case _:
if key in sub_obj.timestamps(): if key in sub_obj.timestamps:
add_widget = MyQDateEdit(calendarPopup=True, scrollWidget=parent) add_widget = MyQDateEdit(calendarPopup=True, scrollWidget=parent)
# NOTE: sets submitted date based on date found in excel sheet # NOTE: sets submitted date based on date found in excel sheet
try: try:
@@ -696,7 +701,7 @@ class SubmissionFormWidget(QWidget):
if not self.lot.isEnabled(): if not self.lot.isEnabled():
return None, report return None, report
lot = self.lot.currentText() lot = self.lot.currentText()
wanted_reagent, new = Reagent.query_or_create(lot=lot, role=self.reagent.role) wanted_reagent, new = Reagent.query_or_create(lot=lot, role=self.reagent.role, expiry=self.reagent.expiry)
# NOTE: if reagent doesn't exist in database, offer to add it (uses App.add_reagent) # NOTE: if reagent doesn't exist in database, offer to add it (uses App.add_reagent)
logger.debug(f"Wanted reagent: {wanted_reagent}, New: {new}") logger.debug(f"Wanted reagent: {wanted_reagent}, New: {new}")
# if wanted_reagent is None: # if wanted_reagent is None:
@@ -705,18 +710,13 @@ class SubmissionFormWidget(QWidget):
message=f"Couldn't find reagent type {self.reagent.role}: {lot} in the database.\n\nWould you like to add it?") message=f"Couldn't find reagent type {self.reagent.role}: {lot} in the database.\n\nWould you like to add it?")
if dlg.exec(): if dlg.exec():
# wanted_reagent = self.parent().parent().add_reagent(reagent_lot=lot,
# reagent_role=self.reagent.role,
# expiry=self.reagent.expiry,
# name=self.reagent.name,
# kit=self.extraction_kit
# )
wanted_reagent = self.parent().parent().new_add_reagent(instance=wanted_reagent) wanted_reagent = self.parent().parent().new_add_reagent(instance=wanted_reagent)
logger.debug(f"Reagent added!")
report.add_result(Result(owner=__name__, code=0, msg="New reagent created.", status="Information"))
return wanted_reagent, report return wanted_reagent, report
else: else:
# NOTE: In this case we will have an empty reagent and the submission will fail kit integrity check # NOTE: In this case we will have an empty reagent and the submission will fail kit integrity check
report.add_result(Result(msg="Failed integrity check", status="Critical"))
return None, report return None, report
else: else:
# NOTE: Since this now gets passed in directly from the parser -> pyd -> form and the parser gets the name # NOTE: Since this now gets passed in directly from the parser -> pyd -> form and the parser gets the name

View File

@@ -53,7 +53,6 @@ main_form_style = '''
QComboBox:!editable, QDateEdit { QComboBox:!editable, QDateEdit {
background-color:light gray; background-color:light gray;
} }
''' '''
page_size = 250 page_size = 250