Moments before disaster.

This commit is contained in:
lwark
2024-05-01 08:08:34 -05:00
parent 5378c79933
commit 61c1a613e2
9 changed files with 605 additions and 480 deletions

View File

@@ -1,3 +1,7 @@
## 202404.05
- Addition of default query method using Kwargs.
## 202404.04 ## 202404.04
- Storing of default values in db rather than hardcoded. - Storing of default values in db rather than hardcoded.

View File

@@ -1,5 +1,7 @@
- [ ] Create a default info return function.
- [ ] Parse comment from excel sheet.
- [ ] Make reporting better. - [ ] Make reporting better.
- [ ] Build master query method? - [x] Build master query method?
- Obviously there will need to be extensions, but I feel the attr method I have in Submissions could work. - Obviously there will need to be extensions, but I feel the attr method I have in Submissions could work.
- [x] Fix Artic RSLNamer - [x] Fix Artic RSLNamer
- [x] Put "Not applicable" reagents in to_dict() method. - [x] Put "Not applicable" reagents in to_dict() method.

View File

@@ -1,18 +1,24 @@
''' '''
Contains all models for sqlalchemy Contains all models for sqlalchemy
''' '''
import sys from __future__ import annotations
import sys, logging
from sqlalchemy.orm import DeclarativeMeta, declarative_base, Query, Session from sqlalchemy.orm import DeclarativeMeta, declarative_base, Query, Session
from sqlalchemy.ext.declarative import declared_attr from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.exc import ArgumentError
from typing import Any, List from typing import Any, List
from pathlib import Path from pathlib import Path
# Load testing environment # Load testing environment
if 'pytest' in sys.modules: if 'pytest' in sys.modules:
from pathlib import Path from pathlib import Path
sys.path.append(Path(__file__).parents[4].absolute().joinpath("tests").__str__()) sys.path.append(Path(__file__).parents[4].absolute().joinpath("tests").__str__())
Base: DeclarativeMeta = declarative_base() Base: DeclarativeMeta = declarative_base()
logger = logging.getLogger(f"submissions.{__name__}")
class BaseClass(Base): class BaseClass(Base):
""" """
@@ -75,7 +81,25 @@ class BaseClass(Base):
return ctx.backup_path return ctx.backup_path
@classmethod @classmethod
def execute_query(cls, query: Query, limit: int = 0) -> Any | List[Any]: def get_default_info(cls, *args) -> dict | List[str]:
dicto = dict(singles=['id'])
output = {}
for k, v in dicto.items():
if len(args) > 0 and k not in args:
# logger.debug(f"Don't want {k}")
continue
else:
output[k] = v
if len(args) == 1:
return output[args[0]]
return output
@classmethod
def query(cls, **kwargs):
return cls.execute_query(**kwargs)
@classmethod
def execute_query(cls, query: Query = None, model=None, limit: int = 0, **kwargs) -> Any | List[Any]:
""" """
Execute sqlalchemy query. Execute sqlalchemy query.
@@ -86,6 +110,23 @@ class BaseClass(Base):
Returns: Returns:
Any | List[Any]: Single result if limit = 1 or List if other. Any | List[Any]: Single result if limit = 1 or List if other.
""" """
if model is None:
model = cls
if query is None:
query: Query = cls.__database_session__.query(model)
# logger.debug(f"Grabbing singles using {model.get_default_info}")
singles = model.get_default_info('singles')
logger.debug(f"Querying: {model}, singles: {singles}")
for k, v in kwargs.items():
logger.debug(f"Using key: {k} with value: {v}")
# logger.debug(f"That key found attribute: {attr} with type: {attr}")
try:
attr = getattr(model, k)
query = query.filter(attr == v)
except (ArgumentError, AttributeError) as e:
logger.error(f"Attribute {k} available due to:\n\t{e}\nSkipping.")
if k in singles:
limit = 1
with query.session.no_autoflush: with query.session.no_autoflush:
match limit: match limit:
case 0: case 0:
@@ -95,6 +136,10 @@ class BaseClass(Base):
case _: case _:
return query.limit(limit).all() return query.limit(limit).all()
@classmethod
def default_info_return(cls, info, *args):
return info
def save(self): def save(self):
""" """
Add the object to the database and commit Add the object to the database and commit

View File

@@ -947,7 +947,8 @@ class SubmissionReagentAssociation(BaseClass):
match submission: match submission:
case BasicSubmission() | str(): case BasicSubmission() | str():
if isinstance(submission, str): if isinstance(submission, str):
submission = BasicSubmission.query(rsl_number=submission) # submission = BasicSubmission.query(rsl_number=submission)
submission = BasicSubmission.query(rsl_plate_num=submission)
# logger.debug(f"Lookup SubmissionReagentAssociation by submission BasicSubmission {submission}") # logger.debug(f"Lookup SubmissionReagentAssociation by submission BasicSubmission {submission}")
query = query.filter(cls.submission==submission) query = query.filter(cls.submission==submission)
case int(): case int():

View File

@@ -1,6 +1,6 @@
''' """
Models for the main submission types. Models for the main submission and sample types.
''' """
from __future__ import annotations from __future__ import annotations
from getpass import getuser from getpass import getuser
import logging, uuid, tempfile, re, yaml, base64 import logging, uuid, tempfile, re, yaml, base64
@@ -23,13 +23,14 @@ from tools import check_not_nan, row_map, setup_lookup, jinja_template_loading,
from datetime import datetime, date from datetime import datetime, date
from typing import List, Any, Tuple from typing import List, Any, Tuple
from dateutil.parser import parse from dateutil.parser import parse
from dateutil.parser._parser import ParserError from dateutil.parser import ParserError
from pathlib import Path from pathlib import Path
from jinja2.exceptions import TemplateNotFound from jinja2.exceptions import TemplateNotFound
from jinja2 import Template from jinja2 import Template
logger = logging.getLogger(f"submissions.{__name__}") logger = logging.getLogger(f"submissions.{__name__}")
class BasicSubmission(BaseClass): class BasicSubmission(BaseClass):
""" """
Concrete of basic submission which polymorphs into BacterialCulture and Wastewater Concrete of basic submission which polymorphs into BacterialCulture and Wastewater
@@ -40,21 +41,27 @@ class BasicSubmission(BaseClass):
submitter_plate_num = Column(String(127), unique=True) #: The number given to the submission by the submitting lab submitter_plate_num = Column(String(127), unique=True) #: The number given to the submission by the submitting lab
submitted_date = Column(TIMESTAMP) #: Date submission received submitted_date = Column(TIMESTAMP) #: Date submission received
submitting_lab = relationship("Organization", back_populates="submissions") #: client org submitting_lab = relationship("Organization", back_populates="submissions") #: client org
submitting_lab_id = Column(INTEGER, ForeignKey("_organization.id", ondelete="SET NULL", name="fk_BS_sublab_id")) #: client lab id from _organizations submitting_lab_id = Column(INTEGER, ForeignKey("_organization.id", ondelete="SET NULL",
name="fk_BS_sublab_id")) #: client lab id from _organizations
sample_count = Column(INTEGER) #: Number of samples in the submission sample_count = Column(INTEGER) #: Number of samples in the submission
extraction_kit = relationship("KitType", back_populates="submissions") #: The extraction kit used extraction_kit = relationship("KitType", back_populates="submissions") #: The extraction kit used
extraction_kit_id = Column(INTEGER, ForeignKey("_kittype.id", ondelete="SET NULL", name="fk_BS_extkit_id")) #: id of joined extraction kit extraction_kit_id = Column(INTEGER, ForeignKey("_kittype.id", ondelete="SET NULL",
submission_type_name = Column(String, ForeignKey("_submissiontype.name", ondelete="SET NULL", name="fk_BS_subtype_name")) #: name of joined submission type name="fk_BS_extkit_id")) #: id of joined extraction kit
submission_type_name = Column(String, ForeignKey("_submissiontype.name", ondelete="SET NULL",
name="fk_BS_subtype_name")) #: name of joined submission type
technician = Column(String(64)) #: initials of processing tech(s) technician = Column(String(64)) #: initials of processing tech(s)
# Move this into custom types? # Move this into custom types?
reagents_id = Column(String, ForeignKey("_reagent.id", ondelete="SET NULL", name="fk_BS_reagents_id")) #: id of used reagents reagents_id = Column(String, ForeignKey("_reagent.id", ondelete="SET NULL",
name="fk_BS_reagents_id")) #: id of used reagents
extraction_info = Column(JSON) #: unstructured output from the extraction table logger. extraction_info = Column(JSON) #: unstructured output from the extraction table logger.
run_cost = Column(FLOAT(2)) #: total cost of running the plate. Set from constant and mutable kit costs at time of creation. run_cost = Column(
FLOAT(2)) #: total cost of running the plate. Set from constant and mutable kit costs at time of creation.
signed_by = Column(String(32)) #: user name of person who submitted the submission to the database. signed_by = Column(String(32)) #: user name of person who submitted the submission to the database.
comment = Column(JSON) #: user notes comment = Column(JSON) #: user notes
submission_category = Column(String(64)) #: ["Research", "Diagnostic", "Surveillance", "Validation"], else defaults to submission_type_name submission_category = Column(
cost_centre = Column(String(64)) #: Permanent storage of used cost centre in case organization field changed in the future. String(64)) #: ["Research", "Diagnostic", "Surveillance", "Validation"], else defaults to submission_type_name
cost_centre = Column(
String(64)) #: Permanent storage of used cost centre in case organization field changed in the future.
submission_sample_associations = relationship( submission_sample_associations = relationship(
"SubmissionSampleAssociation", "SubmissionSampleAssociation",
@@ -62,7 +69,8 @@ class BasicSubmission(BaseClass):
cascade="all, delete-orphan", cascade="all, delete-orphan",
) #: Relation to SubmissionSampleAssociation ) #: Relation to SubmissionSampleAssociation
samples = association_proxy("submission_sample_associations", "sample") #: Association proxy to SubmissionSampleAssociation.samples samples = association_proxy("submission_sample_associations",
"sample") #: Association proxy to SubmissionSampleAssociation.samples
submission_reagent_associations = relationship( submission_reagent_associations = relationship(
"SubmissionReagentAssociation", "SubmissionReagentAssociation",
@@ -70,7 +78,8 @@ class BasicSubmission(BaseClass):
cascade="all, delete-orphan", cascade="all, delete-orphan",
) #: Relation to SubmissionReagentAssociation ) #: Relation to SubmissionReagentAssociation
reagents = association_proxy("submission_reagent_associations", "reagent") #: Association proxy to SubmissionReagentAssociation.reagent reagents = association_proxy("submission_reagent_associations",
"reagent") #: Association proxy to SubmissionReagentAssociation.reagent
submission_equipment_associations = relationship( submission_equipment_associations = relationship(
"SubmissionEquipmentAssociation", "SubmissionEquipmentAssociation",
@@ -78,7 +87,8 @@ class BasicSubmission(BaseClass):
cascade="all, delete-orphan" cascade="all, delete-orphan"
) #: Relation to Equipment ) #: Relation to Equipment
equipment = association_proxy("submission_equipment_associations", "equipment") #: Association proxy to SubmissionEquipmentAssociation.equipment equipment = association_proxy("submission_equipment_associations",
"equipment") #: Association proxy to SubmissionEquipmentAssociation.equipment
# Allows for subclassing into ex. BacterialCulture, Wastewater, etc. # Allows for subclassing into ex. BacterialCulture, Wastewater, etc.
__mapper_args__ = { __mapper_args__ = {
@@ -105,24 +115,22 @@ class BasicSubmission(BaseClass):
@classmethod @classmethod
def get_default_info(cls, *args): def get_default_info(cls, *args):
# Create defaults for all submission_types # Create defaults for all submission_types
# print(args) parent_defs = super().get_default_info()
recover = ['filepath', 'samples', 'csv', 'comment', 'equipment'] recover = ['filepath', 'samples', 'csv', 'comment', 'equipment']
dicto = dict( dicto = dict(
details_ignore=['excluded', 'reagents', 'samples', details_ignore=['excluded', 'reagents', 'samples',
'extraction_info', 'comment', 'barcode', 'extraction_info', 'comment', 'barcode',
'platemap', 'export_map', 'equipment'], 'platemap', 'export_map', 'equipment'],
form_recover=recover, form_recover=recover,
form_ignore = ['reagents', 'ctx', 'id', 'cost', 'extraction_info', 'signed_by'] + recover, form_ignore=['reagents', 'ctx', 'id', 'cost', 'extraction_info', 'signed_by', 'comment'] + recover,
parser_ignore=['samples', 'signed_by'] + cls.jsons(), parser_ignore=['samples', 'signed_by'] + cls.jsons(),
excel_ignore = [] excel_ignore=[],
) )
# Grab subtype specific info. # logger.debug(dicto['singles'])
st = cls.get_submission_type() """Singles tells the query which fields to set limit to 1"""
if st is None: dicto['singles'] = parent_defs['singles']
logger.error("No default info for BasicSubmission.") # logger.debug(dicto['singles'])
return dicto """Grab subtype specific info."""
else:
dicto['submission_type'] = st.name
output = {} output = {}
for k, v in dicto.items(): for k, v in dicto.items():
if len(args) > 0 and k not in args: if len(args) > 0 and k not in args:
@@ -130,6 +138,12 @@ class BasicSubmission(BaseClass):
continue continue
else: else:
output[k] = v output[k] = v
st = cls.get_submission_type()
if st is None:
logger.error("No default info for BasicSubmission.")
# return output
else:
output['submission_type'] = st.name
for k, v in st.defaults.items(): for k, v in st.defaults.items():
if len(args) > 0 and k not in args: if len(args) > 0 and k not in args:
# logger.debug(f"Don't want {k}") # logger.debug(f"Don't want {k}")
@@ -196,12 +210,15 @@ class BasicSubmission(BaseClass):
if full_data: if full_data:
logger.debug(f"Attempting reagents.") logger.debug(f"Attempting reagents.")
try: try:
reagents = [item.to_sub_dict(extraction_kit=self.extraction_kit) for item in self.submission_reagent_associations] reagents = [item.to_sub_dict(extraction_kit=self.extraction_kit) for item in
self.submission_reagent_associations]
for k in self.extraction_kit.construct_xl_map_for_use(self.submission_type): for k in self.extraction_kit.construct_xl_map_for_use(self.submission_type):
if k == 'info': if k == 'info':
continue continue
if not any([item['type'] == k for item in reagents]): if not any([item['type'] == k for item in reagents]):
reagents.append(dict(type=k, name="Not Applicable", lot="NA", expiry=date(year=1970, month=1, day=1), missing=True)) reagents.append(
dict(type=k, name="Not Applicable", lot="NA", expiry=date(year=1970, month=1, day=1),
missing=True))
except Exception as e: except Exception as e:
logger.error(f"We got an error retrieving reagents: {e}") logger.error(f"We got an error retrieving reagents: {e}")
reagents = None reagents = None
@@ -260,7 +277,8 @@ class BasicSubmission(BaseClass):
except Exception as e: except Exception as e:
logger.error(f"Column count error: {e}") logger.error(f"Column count error: {e}")
# Get kit associated with this submission # Get kit associated with this submission
assoc = [item for item in self.extraction_kit.kit_submissiontype_associations if item.submission_type == self.submission_type][0] assoc = [item for item in self.extraction_kit.kit_submissiontype_associations if
item.submission_type == self.submission_type][0]
logger.debug(f"Came up with association: {assoc}") logger.debug(f"Came up with association: {assoc}")
# If every individual cost is 0 this is probably an old plate. # If every individual cost is 0 this is probably an old plate.
if all(item == 0.0 for item in [assoc.constant_cost, assoc.mutable_cost_column, assoc.mutable_cost_sample]): if all(item == 0.0 for item in [assoc.constant_cost, assoc.mutable_cost_column, assoc.mutable_cost_sample]):
@@ -270,7 +288,8 @@ class BasicSubmission(BaseClass):
logger.error(f"Calculation error: {e}") logger.error(f"Calculation error: {e}")
else: else:
try: try:
self.run_cost = assoc.constant_cost + (assoc.mutable_cost_column * cols_count_96) + (assoc.mutable_cost_sample * int(self.sample_count)) self.run_cost = assoc.constant_cost + (assoc.mutable_cost_column * cols_count_96) + (
assoc.mutable_cost_sample * int(self.sample_count))
except Exception as e: except Exception as e:
logger.error(f"Calculation error: {e}") logger.error(f"Calculation error: {e}")
self.run_cost = round(self.run_cost, 2) self.run_cost = round(self.run_cost, 2)
@@ -350,7 +369,8 @@ class BasicSubmission(BaseClass):
logger.debug(f"Got {len(subs)} submissions.") logger.debug(f"Got {len(subs)} submissions.")
df = pd.DataFrame.from_records(subs) df = pd.DataFrame.from_records(subs)
# Exclude sub information # Exclude sub information
for item in ['controls', 'extraction_info', 'pcr_info', 'comment', 'comments', 'samples', 'reagents', 'equipment', 'gel_info', 'gel_image', 'dna_core_submission_number', 'gel_controls']: for item in ['controls', 'extraction_info', 'pcr_info', 'comment', 'comments', 'samples', 'reagents',
'equipment', 'gel_info', 'gel_image', 'dna_core_submission_number', 'gel_controls']:
try: try:
df = df.drop(item, axis=1) df = df.drop(item, axis=1)
except: except:
@@ -384,7 +404,8 @@ class BasicSubmission(BaseClass):
return return
case "reagents": case "reagents":
logger.debug(f"Reagents coming into SQL: {value}") logger.debug(f"Reagents coming into SQL: {value}")
field_value = [reagent['value'].toSQL()[0] if isinstance(reagent, dict) else reagent.toSQL()[0] for reagent in value] field_value = [reagent['value'].toSQL()[0] if isinstance(reagent, dict) else reagent.toSQL()[0] for
reagent in value]
logger.debug(f"Reagents coming out of SQL: {field_value}") logger.debug(f"Reagents coming out of SQL: {field_value}")
case "submission_type": case "submission_type":
field_value = SubmissionType.query(name=value) field_value = SubmissionType.query(name=value)
@@ -474,7 +495,8 @@ class BasicSubmission(BaseClass):
new_dict[key] = [PydReagent(**reagent) for reagent in value] new_dict[key] = [PydReagent(**reagent) for reagent in value]
case "samples": case "samples":
new_dict[key] = [PydSample(**{k.lower().replace(" ", "_"):v for k,v in sample.items()}) for sample in dicto['samples']] new_dict[key] = [PydSample(**{k.lower().replace(" ", "_"): v for k, v in sample.items()}) for sample
in dicto['samples']]
case "equipment": case "equipment":
try: try:
new_dict[key] = [PydEquipment(**equipment) for equipment in dicto['equipment']] new_dict[key] = [PydEquipment(**equipment) for equipment in dicto['equipment']]
@@ -519,7 +541,8 @@ class BasicSubmission(BaseClass):
return regex return regex
@classmethod @classmethod
def find_polymorphic_subclass(cls, attrs: dict|None = None, polymorphic_identity:str|SubmissionType|None = None): def find_polymorphic_subclass(cls, polymorphic_identity: str | SubmissionType | None = None,
attrs: dict | None = None):
""" """
Find subclass based on polymorphic identity or relevant attributes. Find subclass based on polymorphic identity or relevant attributes.
@@ -549,12 +572,14 @@ class BasicSubmission(BaseClass):
pass pass
if attrs is None or len(attrs) == 0: if attrs is None or len(attrs) == 0:
return model return model
if any([not hasattr(cls, attr) for attr in attrs]): if any([not hasattr(cls, attr) for attr in attrs.keys()]):
# looks for first model that has all included kwargs # looks for first model that has all included kwargs
try: try:
model = [subclass for subclass in cls.__subclasses__() if all([hasattr(subclass, attr) for attr in attrs])][0] model = [subclass for subclass in cls.__subclasses__() if
all([hasattr(subclass, attr) for attr in attrs.keys()])][0]
except IndexError as e: except IndexError as e:
raise AttributeError(f"Couldn't find existing class/subclass of {cls} with all attributes:\n{pformat(attrs)}") raise AttributeError(
f"Couldn't find existing class/subclass of {cls} with all attributes:\n{pformat(attrs.keys())}")
logger.info(f"Recruiting model: {model}") logger.info(f"Recruiting model: {model}")
return model return model
@@ -605,7 +630,8 @@ class BasicSubmission(BaseClass):
return input_dict return input_dict
@classmethod @classmethod
def finalize_parse(cls, input_dict:dict, xl:pd.ExcelFile|None=None, info_map:dict|None=None, plate_map:dict|None=None) -> dict: def finalize_parse(cls, input_dict: dict, xl: pd.ExcelFile | None = None, info_map: dict | None = None,
plate_map: dict | None = None) -> dict:
""" """
Performs any final custom parsing of the excel file. Performs any final custom parsing of the excel file.
@@ -668,7 +694,8 @@ class BasicSubmission(BaseClass):
outstr = re.sub(rf"RSL-?", rf"RSL-{data['abbreviation']}-", outstr, flags=re.IGNORECASE) outstr = re.sub(rf"RSL-?", rf"RSL-{data['abbreviation']}-", outstr, flags=re.IGNORECASE)
try: try:
outstr = re.sub(r"(\d{4})-(\d{2})-(\d{2})", r"\1\2\3", outstr) outstr = re.sub(r"(\d{4})-(\d{2})-(\d{2})", r"\1\2\3", outstr)
outstr = re.sub(rf"{data['abbreviation']}(\d{6})", rf"{data['abbreviation']}-\1", outstr, flags=re.IGNORECASE).upper() outstr = re.sub(rf"{data['abbreviation']}(\d{6})", rf"{data['abbreviation']}-\1", outstr,
flags=re.IGNORECASE).upper()
except (AttributeError, TypeError) as e: except (AttributeError, TypeError) as e:
logger.error(f"Error making outstr: {e}, sending to RSLNamer to make new plate name.") logger.error(f"Error making outstr: {e}, sending to RSLNamer to make new plate name.")
outstr = RSLNamer.construct_new_plate_name(data=data) outstr = RSLNamer.construct_new_plate_name(data=data)
@@ -775,7 +802,7 @@ class BasicSubmission(BaseClass):
def query(cls, def query(cls,
submission_type: str | SubmissionType | None = None, submission_type: str | SubmissionType | None = None,
id: int | str | None = None, id: int | str | None = None,
rsl_number:str|None=None, rsl_plate_num: str | None = None,
start_date: date | str | int | None = None, start_date: date | str | int | None = None,
end_date: date | str | int | None = None, end_date: date | str | int | None = None,
reagent: Reagent | str | None = None, reagent: Reagent | str | None = None,
@@ -784,12 +811,12 @@ class BasicSubmission(BaseClass):
**kwargs **kwargs
) -> BasicSubmission | List[BasicSubmission]: ) -> BasicSubmission | List[BasicSubmission]:
""" """
Lookup submissions based on a number of parameters. Lookup submissions based on a number of parameters. Overrides parent.
Args: Args:
submission_type (str | models.SubmissionType | None, optional): Submission type of interest. Defaults to None. submission_type (str | models.SubmissionType | None, optional): Submission type of interest. Defaults to None.
id (int | str | None, optional): Submission id in the database (limits results to 1). Defaults to None. id (int | str | None, optional): Submission id in the database (limits results to 1). Defaults to None.
rsl_number (str | None, optional): Submission name in the database (limits results to 1). Defaults to None. rsl_plate_num (str | None, optional): Submission name in the database (limits results to 1). Defaults to None.
start_date (date | str | int | None, optional): Beginning date to search by. Defaults to None. start_date (date | str | int | None, optional): Beginning date to search by. Defaults to None.
end_date (date | str | int | None, optional): Ending date to search by. Defaults to None. end_date (date | str | int | None, optional): Ending date to search by. Defaults to None.
reagent (models.Reagent | str | None, optional): A reagent used in the submission. Defaults to None. reagent (models.Reagent | str | None, optional): A reagent used in the submission. Defaults to None.
@@ -815,13 +842,13 @@ class BasicSubmission(BaseClass):
else: else:
model = cls model = cls
query: Query = cls.__database_session__.query(model) query: Query = cls.__database_session__.query(model)
if start_date != None and end_date == None: if start_date is not None and end_date is None:
logger.warning(f"Start date with no end date, using today.") logger.warning(f"Start date with no end date, using today.")
end_date = date.today() end_date = date.today()
if end_date != None and start_date == None: if end_date is not None and start_date is None:
logger.warning(f"End date with no start date, using Jan 1, 2023") logger.warning(f"End date with no start date, using Jan 1, 2023")
start_date = date(2023, 1, 1) start_date = date(2023, 1, 1)
if start_date != None: if start_date is not None:
logger.debug(f"Querying with start date: {start_date} and end date: {end_date}") logger.debug(f"Querying with start date: {start_date} and end date: {end_date}")
match start_date: match start_date:
case date(): case date():
@@ -829,7 +856,8 @@ class BasicSubmission(BaseClass):
start_date = start_date.strftime("%Y-%m-%d") start_date = start_date.strftime("%Y-%m-%d")
case int(): case int():
# logger.debug(f"Lookup BasicSubmission by ordinal start_date {start_date}") # logger.debug(f"Lookup BasicSubmission by ordinal start_date {start_date}")
start_date = datetime.fromordinal(datetime(1900, 1, 1).toordinal() + start_date - 2).date().strftime("%Y-%m-%d") start_date = datetime.fromordinal(
datetime(1900, 1, 1).toordinal() + start_date - 2).date().strftime("%Y-%m-%d")
case _: case _:
# logger.debug(f"Lookup BasicSubmission by parsed str start_date {start_date}") # logger.debug(f"Lookup BasicSubmission by parsed str start_date {start_date}")
start_date = parse(start_date).strftime("%Y-%m-%d") start_date = parse(start_date).strftime("%Y-%m-%d")
@@ -839,7 +867,8 @@ class BasicSubmission(BaseClass):
end_date = end_date.strftime("%Y-%m-%d") end_date = end_date.strftime("%Y-%m-%d")
case int(): case int():
# logger.debug(f"Lookup BasicSubmission by ordinal end_date {end_date}") # logger.debug(f"Lookup BasicSubmission by ordinal end_date {end_date}")
end_date = datetime.fromordinal(datetime(1900, 1, 1).toordinal() + end_date - 2).date().strftime("%Y-%m-%d") end_date = datetime.fromordinal(datetime(1900, 1, 1).toordinal() + end_date - 2).date().strftime(
"%Y-%m-%d")
case _: case _:
# logger.debug(f"Lookup BasicSubmission by parsed str end_date {end_date}") # logger.debug(f"Lookup BasicSubmission by parsed str end_date {end_date}")
end_date = parse(end_date).strftime("%Y-%m-%d") end_date = parse(end_date).strftime("%Y-%m-%d")
@@ -855,16 +884,18 @@ class BasicSubmission(BaseClass):
match reagent: match reagent:
case str(): case str():
# logger.debug(f"Looking up BasicSubmission with reagent: {reagent}") # logger.debug(f"Looking up BasicSubmission with reagent: {reagent}")
query = query.join(model.submission_reagent_associations).filter(SubmissionSampleAssociation.reagent.lot==reagent) query = query.join(model.submission_reagent_associations).filter(
SubmissionSampleAssociation.reagent.lot == reagent)
case Reagent(): case Reagent():
# logger.debug(f"Looking up BasicSubmission with reagent: {reagent}") # logger.debug(f"Looking up BasicSubmission with reagent: {reagent}")
query = query.join(model.submission_reagent_associations).join(SubmissionSampleAssociation.reagent).filter(Reagent.lot==reagent) query = query.join(model.submission_reagent_associations).join(
SubmissionSampleAssociation.reagent).filter(Reagent.lot == reagent)
case _: case _:
pass pass
# by rsl number (returns only a single value) # by rsl number (returns only a single value)
match rsl_number: match rsl_plate_num:
case str(): case str():
query = query.filter(model.rsl_plate_num==rsl_number) query = query.filter(model.rsl_plate_num == rsl_plate_num)
# logger.debug(f"At this point the query gets: {query.all()}") # logger.debug(f"At this point the query gets: {query.all()}")
limit = 1 limit = 1
case _: case _:
@@ -881,16 +912,20 @@ class BasicSubmission(BaseClass):
limit = 1 limit = 1
case _: case _:
pass pass
for k, v in kwargs.items(): # for k, v in kwargs.items():
logger.debug(f"Looking up attribute: {k}") # logger.debug(f"Looking up attribute: {k}")
attr = getattr(model, k) # attr = getattr(model, k)
logger.debug(f"Got attr: {attr}") # logger.debug(f"Got attr: {attr}")
query = query.filter(attr==v) # query = query.filter(attr==v)
# if len(kwargs) > 0: # if len(kwargs) > 0:
# limit = 1 # limit = 1
# query = cls.query_by_keywords(query=query, model=model, **kwargs)
# if any(x in kwargs.keys() for x in cls.get_default_info('singles')):
# logger.debug(f"There's a singled out item in kwargs")
# limit = 1
if chronologic: if chronologic:
query.order_by(cls.submitted_date) query.order_by(cls.submitted_date)
return cls.execute_query(query=query, limit=limit) return cls.execute_query(query=query, model=model, limit=limit, **kwargs)
@classmethod @classmethod
def query_or_create(cls, submission_type: str | SubmissionType | None = None, **kwargs) -> BasicSubmission: def query_or_create(cls, submission_type: str | SubmissionType | None = None, **kwargs) -> BasicSubmission:
@@ -914,7 +949,8 @@ class BasicSubmission(BaseClass):
raise ValueError("Need to narrow down query or the first available instance will be returned.") raise ValueError("Need to narrow down query or the first available instance will be returned.")
for key in kwargs.keys(): for key in kwargs.keys():
if key in disallowed: if key in disallowed:
raise ValueError(f"{key} is not allowed as a query argument as it could lead to creation of duplicate objects. Use .query() instead.") raise ValueError(
f"{key} is not allowed as a query argument as it could lead to creation of duplicate objects. Use .query() instead.")
instance = cls.query(submission_type=submission_type, limit=1, **kwargs) instance = cls.query(submission_type=submission_type, limit=1, **kwargs)
# logger.debug(f"Retrieved instance: {instance}") # logger.debug(f"Retrieved instance: {instance}")
if instance == None: if instance == None:
@@ -1059,6 +1095,7 @@ class BasicSubmission(BaseClass):
wb = pyd.autofill_equipment(wb) wb = pyd.autofill_equipment(wb)
wb.save(filename=fname.with_suffix(".xlsx")) wb.save(filename=fname.with_suffix(".xlsx"))
# Below are the custom submission types # Below are the custom submission types
class BacterialCulture(BasicSubmission): class BacterialCulture(BasicSubmission):
@@ -1066,7 +1103,8 @@ class BacterialCulture(BasicSubmission):
derivative submission type from BasicSubmission derivative submission type from BasicSubmission
""" """
id = Column(INTEGER, ForeignKey('_basicsubmission.id'), primary_key=True) id = Column(INTEGER, ForeignKey('_basicsubmission.id'), primary_key=True)
controls = relationship("Control", back_populates="submission", uselist=True) #: A control sample added to submission controls = relationship("Control", back_populates="submission",
uselist=True) #: A control sample added to submission
__mapper_args__ = dict(polymorphic_identity="Bacterial Culture", __mapper_args__ = dict(polymorphic_identity="Bacterial Culture",
polymorphic_load="inline", polymorphic_load="inline",
inherit_condition=(id == BasicSubmission.id)) inherit_condition=(id == BasicSubmission.id))
@@ -1085,10 +1123,6 @@ class BacterialCulture(BasicSubmission):
output['controls'] = [item.to_sub_dict() for item in self.controls] output['controls'] = [item.to_sub_dict() for item in self.controls]
return output return output
# @classmethod
# def get_default_info(cls) -> dict:
# return dict(abbreviation="BC", submission_type="Bacterial Culture")
@classmethod @classmethod
def custom_platemap(cls, xl: pd.ExcelFile, plate_map: pd.DataFrame) -> pd.DataFrame: def custom_platemap(cls, xl: pd.ExcelFile, plate_map: pd.DataFrame) -> pd.DataFrame:
""" """
@@ -1152,7 +1186,8 @@ class BacterialCulture(BasicSubmission):
return template return template
@classmethod @classmethod
def finalize_parse(cls, input_dict: dict, xl: pd.ExcelFile | None = None, info_map: dict | None = None, plate_map: dict | None = None) -> dict: def finalize_parse(cls, input_dict: dict, xl: pd.ExcelFile | None = None, info_map: dict | None = None,
plate_map: dict | None = None) -> dict:
""" """
Extends parent. Currently finds control sample and adds to reagents. Extends parent. Currently finds control sample and adds to reagents.
@@ -1176,7 +1211,8 @@ class BacterialCulture(BasicSubmission):
logger.debug(f"Control match found: {sample.submitter_id}") logger.debug(f"Control match found: {sample.submitter_id}")
new_lot = matched.group() new_lot = matched.group()
try: try:
pos_control_reg = [reg for reg in input_dict['reagents'] if reg.type=="Bacterial-Positive Control"][0] pos_control_reg = \
[reg for reg in input_dict['reagents'] if reg.type == "Bacterial-Positive Control"][0]
except IndexError: except IndexError:
logger.error(f"No positive control reagent listed") logger.error(f"No positive control reagent listed")
return input_dict return input_dict
@@ -1203,6 +1239,7 @@ class BacterialCulture(BasicSubmission):
row = idx.index.to_list()[0] row = idx.index.to_list()[0]
return row + 1 return row + 1
class Wastewater(BasicSubmission): class Wastewater(BasicSubmission):
""" """
derivative submission type from BasicSubmission derivative submission type from BasicSubmission
@@ -1263,7 +1300,10 @@ class Wastewater(BasicSubmission):
""" """
samples = super().parse_pcr(xl=xl, rsl_number=rsl_number) samples = super().parse_pcr(xl=xl, rsl_number=rsl_number)
df = xl.parse(sheet_name="Results", dtype=object).fillna("") df = xl.parse(sheet_name="Results", dtype=object).fillna("")
column_names = ["Well", "Well Position", "Omit","Sample","Target","Task"," Reporter","Quencher","Amp Status","Amp Score","Curve Quality","Result Quality Issues","Cq","Cq Confidence","Cq Mean","Cq SD","Auto Threshold","Threshold", "Auto Baseline", "Baseline Start", "Baseline End"] column_names = ["Well", "Well Position", "Omit", "Sample", "Target", "Task", " Reporter", "Quencher",
"Amp Status", "Amp Score", "Curve Quality", "Result Quality Issues", "Cq", "Cq Confidence",
"Cq Mean", "Cq SD", "Auto Threshold", "Threshold", "Auto Baseline", "Baseline Start",
"Baseline End"]
samples_df = df.iloc[23:][0:] samples_df = df.iloc[23:][0:]
logger.debug(f"Dataframe of PCR results:\n\t{samples_df}") logger.debug(f"Dataframe of PCR results:\n\t{samples_df}")
samples_df.columns = column_names samples_df.columns = column_names
@@ -1364,6 +1404,7 @@ class Wastewater(BasicSubmission):
self.update_subsampassoc(sample=sample, input_dict=sample_dict) self.update_subsampassoc(sample=sample, input_dict=sample_dict)
# self.report.add_result(Result(msg=f"We added PCR info to {sub.rsl_plate_num}.", status='Information')) # self.report.add_result(Result(msg=f"We added PCR info to {sub.rsl_plate_num}.", status='Information'))
class WastewaterArtic(BasicSubmission): class WastewaterArtic(BasicSubmission):
""" """
derivative submission type for artic wastewater derivative submission type for artic wastewater
@@ -1414,9 +1455,12 @@ class WastewaterArtic(BasicSubmission):
ws = workbook['Egel results'] ws = workbook['Egel results']
data = [ws.cell(row=ii, column=jj) for jj in range(15, 27) for ii in range(10, 18)] data = [ws.cell(row=ii, column=jj) for jj in range(15, 27) for ii in range(10, 18)]
data = [cell for cell in data if cell.value is not None and "NTC" in cell.value] data = [cell for cell in data if cell.value is not None and "NTC" in cell.value]
input_dict['gel_controls'] = [dict(sample_id=cell.value, location=f"{row_map[cell.row-9]}{str(cell.column-14).zfill(2)}") for cell in data] input_dict['gel_controls'] = [
dict(sample_id=cell.value, location=f"{row_map[cell.row - 9]}{str(cell.column - 14).zfill(2)}") for cell in
data]
ws = workbook['First Strand List'] ws = workbook['First Strand List']
data = [dict(plate=ws.cell(row=ii, column=3).value, starting_sample=ws.cell(row=ii, column=4).value) for ii in range(8,11)] data = [dict(plate=ws.cell(row=ii, column=3).value, starting_sample=ws.cell(row=ii, column=4).value) for ii in
range(8, 11)]
input_dict['source_plates'] = data input_dict['source_plates'] = data
return input_dict return input_dict
@@ -1523,7 +1567,8 @@ class WastewaterArtic(BasicSubmission):
return "(?P<Wastewater_Artic>(\\d{4}-\\d{2}-\\d{2}(?:-|_)(?:\\d_)?artic)|(RSL(?:-|_)?AR(?:-|_)?20\\d{2}-?\\d{2}-?\\d{2}(?:(_|-)\\d?(\\D|$)R?\\d?)?))" return "(?P<Wastewater_Artic>(\\d{4}-\\d{2}-\\d{2}(?:-|_)(?:\\d_)?artic)|(RSL(?:-|_)?AR(?:-|_)?20\\d{2}-?\\d{2}-?\\d{2}(?:(_|-)\\d?(\\D|$)R?\\d?)?))"
@classmethod @classmethod
def finalize_parse(cls, input_dict: dict, xl: pd.ExcelFile | None = None, info_map: dict | None = None, plate_map: dict | None = None) -> dict: def finalize_parse(cls, input_dict: dict, xl: pd.ExcelFile | None = None, info_map: dict | None = None,
plate_map: dict | None = None) -> dict:
""" """
Performs any final custom parsing of the excel file. Extends parent Performs any final custom parsing of the excel file. Extends parent
@@ -1618,7 +1663,8 @@ class WastewaterArtic(BasicSubmission):
Tuple[dict, Template]: (Updated dictionary, Template to be rendered) Tuple[dict, Template]: (Updated dictionary, Template to be rendered)
""" """
base_dict, template = super().get_details_template(base_dict=base_dict) base_dict, template = super().get_details_template(base_dict=base_dict)
base_dict['excluded'] += ['gel_info', 'gel_image', 'headers', "dna_core_submission_number", "source_plates", "gel_controls"] base_dict['excluded'] += ['gel_info', 'gel_image', 'headers', "dna_core_submission_number", "source_plates",
"gel_controls"]
base_dict['DNA Core ID'] = base_dict['dna_core_submission_number'] base_dict['DNA Core ID'] = base_dict['dna_core_submission_number']
check = 'gel_info' in base_dict.keys() and base_dict['gel_info'] != None check = 'gel_info' in base_dict.keys() and base_dict['gel_info'] != None
if check: if check:
@@ -1697,6 +1743,7 @@ class WastewaterArtic(BasicSubmission):
zipf.write(img_path, self.gel_image) zipf.write(img_path, self.gel_image)
self.save() self.save()
# Sample Classes # Sample Classes
class BasicSample(BaseClass): class BasicSample(BaseClass):
@@ -1765,7 +1812,8 @@ class BasicSample(BaseClass):
sample['Submitter ID'] = self.submitter_id sample['Submitter ID'] = self.submitter_id
sample['Sample Type'] = self.sample_type sample['Sample Type'] = self.sample_type
if full_data: if full_data:
sample['submissions'] = sorted([item.to_sub_dict() for item in self.sample_submission_associations], key=itemgetter('submitted_date')) sample['submissions'] = sorted([item.to_sub_dict() for item in self.sample_submission_associations],
key=itemgetter('submitted_date'))
# logger.debug(f"Done converting {self} after {time()-start}") # logger.debug(f"Done converting {self} after {time()-start}")
return sample return sample
@@ -1783,44 +1831,13 @@ class BasicSample(BaseClass):
logger.error(f"Attribute {name} not found") logger.error(f"Attribute {name} not found")
@classmethod @classmethod
def find_subclasses(cls, attrs:dict|None=None, sample_type:str|None=None) -> BasicSample: def find_polymorphic_subclass(cls, polymorphic_identity: str | None = None,
""" attrs: dict | None = None) -> BasicSample:
Retrieves subclass of BasicSample based on type or possessed attributes.
Args:
attrs (dict | None, optional): attributes for query. Defaults to None.
sample_type (str | None, optional): sample type by name. Defaults to None.
Raises:
AttributeError: Raised if class containing all given attributes cannot be found.
Returns:
BasicSample: sample type object of interest
"""
if sample_type != None:
return cls.find_polymorphic_subclass(polymorphic_identity=sample_type)
if len(attrs) == 0 or attrs == None:
logger.warning(f"No attr, returning {cls}")
return cls
if any([not hasattr(cls, attr) for attr in attrs]):
logger.debug(f"{cls} is missing attrs. searching for better match.")
# looks for first model that has all included kwargs
try:
model = [subclass for subclass in cls.__subclasses__() if all([hasattr(subclass, attr) for attr in attrs])][0]
except IndexError as e:
raise AttributeError(f"Couldn't find existing class/subclass of {cls} with all attributes:\n{pformat(attrs)}")
else:
# logger.debug(f"{cls} has all necessary attributes, returning")
return cls
# logger.debug(f"Using model: {model}")
return model
@classmethod
def find_polymorphic_subclass(cls, polymorphic_identity:str|None=None) -> BasicSample:
""" """
Retrieves subclasses of BasicSample based on type name. Retrieves subclasses of BasicSample based on type name.
Args: Args:
attrs (dict | None, optional): name: value of attributes in the wanted subclass
polymorphic_identity (str | None, optional): Name of subclass fed to polymorphic identity. Defaults to None. polymorphic_identity (str | None, optional): Name of subclass fed to polymorphic identity. Defaults to None.
Returns: Returns:
@@ -1828,14 +1845,27 @@ class BasicSample(BaseClass):
""" """
if isinstance(polymorphic_identity, dict): if isinstance(polymorphic_identity, dict):
polymorphic_identity = polymorphic_identity['value'] polymorphic_identity = polymorphic_identity['value']
if polymorphic_identity == None: if polymorphic_identity is not None:
return cls
else:
try: try:
return [item for item in cls.__subclasses__() if item.__mapper_args__['polymorphic_identity']==polymorphic_identity][0] return [item for item in cls.__subclasses__() if
item.__mapper_args__['polymorphic_identity'] == polymorphic_identity][0]
except Exception as e: except Exception as e:
logger.error(f"Could not get polymorph {polymorphic_identity} of {cls} due to {e}") logger.error(f"Could not get polymorph {polymorphic_identity} of {cls} due to {e}")
return cls model = cls
else:
model = cls
if attrs is None or len(attrs) == 0:
return model
if any([not hasattr(cls, attr) for attr in attrs.keys()]):
# looks for first model that has all included kwargs
try:
model = [subclass for subclass in cls.__subclasses__() if
all([hasattr(subclass, attr) for attr in attrs.keys()])][0]
except IndexError as e:
raise AttributeError(
f"Couldn't find existing class/subclass of {cls} with all attributes:\n{pformat(attrs.keys())}")
logger.info(f"Recruiting model: {model}")
return model
@classmethod @classmethod
def parse_sample(cls, input_dict: dict) -> dict: def parse_sample(cls, input_dict: dict) -> dict:
@@ -1891,10 +1921,11 @@ class BasicSample(BaseClass):
Returns: Returns:
models.BasicSample|List[models.BasicSample]: Sample(s) of interest. models.BasicSample|List[models.BasicSample]: Sample(s) of interest.
""" """
if sample_type == None: if sample_type is None:
model = cls.find_subclasses(attrs=kwargs) # model = cls.find_subclasses(attrs=kwargs)
model = cls.find_polymorphic_subclass(attrs=kwargs)
else: else:
model = cls.find_subclasses(sample_type=sample_type) model = cls.find_polymorphic_subclass(polymorphic_identity=sample_type)
logger.debug(f"Length of kwargs: {len(kwargs)}") logger.debug(f"Length of kwargs: {len(kwargs)}")
# model = models.BasicSample.find_subclasses(ctx=ctx, attrs=kwargs) # model = models.BasicSample.find_subclasses(ctx=ctx, attrs=kwargs)
# query: Query = setup_lookup(ctx=ctx, locals=locals()).query(model) # query: Query = setup_lookup(ctx=ctx, locals=locals()).query(model)
@@ -1906,19 +1937,20 @@ class BasicSample(BaseClass):
limit = 1 limit = 1
case _: case _:
pass pass
match sample_type: # match sample_type:
case str(): # case str():
logger.warning(f"Looking up samples with sample_type is disabled.") # logger.warning(f"Looking up samples with sample_type is disabled.")
# query = query.filter(models.BasicSample.sample_type==sample_type) # # query = query.filter(models.BasicSample.sample_type==sample_type)
case _: # case _:
pass # pass
for k, v in kwargs.items(): # for k, v in kwargs.items():
attr = getattr(model, k) # attr = getattr(model, k)
# logger.debug(f"Got attr: {attr}") # # logger.debug(f"Got attr: {attr}")
query = query.filter(attr==v) # query = query.filter(attr==v)
if len(kwargs) > 0: # if len(kwargs) > 0:
limit = 1 # limit = 1
return cls.execute_query(query=query, limit=limit) return cls.execute_query(query=query, model=model, limit=limit, **kwargs)
# return cls.execute_query(query=query, limit=limit)
@classmethod @classmethod
def query_or_create(cls, sample_type: str | None = None, **kwargs) -> BasicSample: def query_or_create(cls, sample_type: str | None = None, **kwargs) -> BasicSample:
@@ -1940,11 +1972,12 @@ class BasicSample(BaseClass):
raise ValueError("Need to narrow down query or the first available instance will be returned.") raise ValueError("Need to narrow down query or the first available instance will be returned.")
for key in kwargs.keys(): for key in kwargs.keys():
if key in disallowed: if key in disallowed:
raise ValueError(f"{key} is not allowed as a query argument as it could lead to creation of duplicate objects.") raise ValueError(
f"{key} is not allowed as a query argument as it could lead to creation of duplicate objects.")
instance = cls.query(sample_type=sample_type, limit=1, **kwargs) instance = cls.query(sample_type=sample_type, limit=1, **kwargs)
logger.debug(f"Retrieved instance: {instance}") logger.debug(f"Retrieved instance: {instance}")
if instance == None: if instance == None:
used_class = cls.find_subclasses(attrs=kwargs, sample_type=sample_type) used_class = cls.find_polymorphic_subclass(attrs=kwargs, polymorphic_identity=sample_type)
instance = used_class(**kwargs) instance = used_class(**kwargs)
instance.sample_type = sample_type instance.sample_type = sample_type
logger.debug(f"Creating instance: {instance}") logger.debug(f"Creating instance: {instance}")
@@ -1953,6 +1986,7 @@ class BasicSample(BaseClass):
def delete(self): def delete(self):
raise AttributeError(f"Delete not implemented for {self.__class__}") raise AttributeError(f"Delete not implemented for {self.__class__}")
#Below are the custom sample types #Below are the custom sample types
class WastewaterSample(BasicSample): class WastewaterSample(BasicSample):
@@ -1971,6 +2005,28 @@ class WastewaterSample(BasicSample):
polymorphic_load="inline", polymorphic_load="inline",
inherit_condition=(id == BasicSample.id)) inherit_condition=(id == BasicSample.id))
@classmethod
def get_default_info(cls, *args):
dicto = super().get_default_info(*args)
match dicto:
case dict():
dicto['singles'] += ['ww_processing_num']
output = {}
for k, v in dicto.items():
if len(args) > 0 and k not in args:
# logger.debug(f"Don't want {k}")
continue
else:
output[k] = v
if len(args) == 1:
return output[args[0]]
case list():
if "singles" in args:
dicto += ['ww_processing_num']
return dicto
case _:
pass
def to_sub_dict(self, full_data: bool = False) -> dict: def to_sub_dict(self, full_data: bool = False) -> dict:
""" """
gui friendly dictionary, extends parent method. gui friendly dictionary, extends parent method.
@@ -2024,7 +2080,8 @@ class WastewaterSample(BasicSample):
plates = [item['plate'] for item in current_artic_submission.source_plates] plates = [item['plate'] for item in current_artic_submission.source_plates]
except TypeError as e: except TypeError as e:
logger.error(f"source_plates must not be present") logger.error(f"source_plates must not be present")
plates = [item.rsl_plate_num for item in self.submissions[:self.submissions.index(current_artic_submission)]] plates = [item.rsl_plate_num for item in
self.submissions[:self.submissions.index(current_artic_submission)]]
subs = [sub for sub in self.submissions if sub.rsl_plate_num in plates] subs = [sub for sub in self.submissions if sub.rsl_plate_num in plates]
logger.debug(f"Submissions: {subs}") logger.debug(f"Submissions: {subs}")
try: try:
@@ -2032,6 +2089,7 @@ class WastewaterSample(BasicSample):
except IndexError: except IndexError:
return None return None
class BacterialCultureSample(BasicSample): class BacterialCultureSample(BasicSample):
""" """
base of bacterial culture sample base of bacterial culture sample
@@ -2062,6 +2120,7 @@ class BacterialCultureSample(BasicSample):
# logger.debug(f"Done converting to {self} to dict after {time()-start}") # logger.debug(f"Done converting to {self} to dict after {time()-start}")
return sample return sample
# Submission to Sample Associations # Submission to Sample Associations
class SubmissionSampleAssociation(BaseClass): class SubmissionSampleAssociation(BaseClass):
@@ -2077,7 +2136,8 @@ class SubmissionSampleAssociation(BaseClass):
column = Column(INTEGER, primary_key=True) #: column on the 96 well plate column = Column(INTEGER, primary_key=True) #: column on the 96 well plate
# reference to the Submission object # reference to the Submission object
submission = relationship(BasicSubmission, back_populates="submission_sample_associations") #: associated submission submission = relationship(BasicSubmission,
back_populates="submission_sample_associations") #: associated submission
# reference to the Sample object # reference to the Sample object
sample = relationship(BasicSample, back_populates="sample_submission_associations") #: associated sample sample = relationship(BasicSample, back_populates="sample_submission_associations") #: associated sample
@@ -2092,7 +2152,8 @@ class SubmissionSampleAssociation(BaseClass):
"with_polymorphic": "*", "with_polymorphic": "*",
} }
def __init__(self, submission:BasicSubmission=None, sample:BasicSample=None, row:int=1, column:int=1, id:int|None=None): def __init__(self, submission: BasicSubmission = None, sample: BasicSample = None, row: int = 1, column: int = 1,
id: int | None = None):
self.submission = submission self.submission = submission
self.sample = sample self.sample = sample
self.row = row self.row = row
@@ -2185,7 +2246,8 @@ class SubmissionSampleAssociation(BaseClass):
output = cls output = cls
else: else:
try: try:
output = [item for item in cls.__subclasses__() if item.__mapper_args__['polymorphic_identity']==polymorphic_identity][0] output = [item for item in cls.__subclasses__() if
item.__mapper_args__['polymorphic_identity'] == polymorphic_identity][0]
except Exception as e: except Exception as e:
logger.error(f"Could not get polymorph {polymorphic_identity} of {cls} due to {e}") logger.error(f"Could not get polymorph {polymorphic_identity} of {cls} due to {e}")
output = cls output = cls
@@ -2203,6 +2265,7 @@ class SubmissionSampleAssociation(BaseClass):
limit: int = 0, limit: int = 0,
chronologic: bool = False, chronologic: bool = False,
reverse: bool = False, reverse: bool = False,
**kwargs
) -> SubmissionSampleAssociation | List[SubmissionSampleAssociation]: ) -> SubmissionSampleAssociation | List[SubmissionSampleAssociation]:
""" """
Lookup junction of Submission and Sample in the database Lookup junction of Submission and Sample in the database
@@ -2244,7 +2307,8 @@ class SubmissionSampleAssociation(BaseClass):
match exclude_submission_type: match exclude_submission_type:
case str(): case str():
# logger.debug(f"filter SampleSubmissionAssociation to exclude submission type {exclude_submission_type}") # logger.debug(f"filter SampleSubmissionAssociation to exclude submission type {exclude_submission_type}")
query = query.join(BasicSubmission).filter(BasicSubmission.submission_type_name != exclude_submission_type) query = query.join(BasicSubmission).filter(
BasicSubmission.submission_type_name != exclude_submission_type)
case _: case _:
pass pass
# logger.debug(f"Query count: {query.count()}") # logger.debug(f"Query count: {query.count()}")
@@ -2255,14 +2319,14 @@ class SubmissionSampleAssociation(BaseClass):
query = query.order_by(BasicSubmission.submitted_date.desc()) query = query.order_by(BasicSubmission.submitted_date.desc())
else: else:
query = query.order_by(BasicSubmission.submitted_date) query = query.order_by(BasicSubmission.submitted_date)
return cls.execute_query(query=query, limit=limit) return cls.execute_query(query=query, limit=limit, **kwargs)
@classmethod @classmethod
def query_or_create(cls, def query_or_create(cls,
association_type: str = "Basic Association", association_type: str = "Basic Association",
submission: BasicSubmission | str | None = None, submission: BasicSubmission | str | None = None,
sample: BasicSample | str | None = None, sample: BasicSample | str | None = None,
id:int|None=None, # id:int|None=None,
**kwargs) -> SubmissionSampleAssociation: **kwargs) -> SubmissionSampleAssociation:
""" """
Queries for an association, if none exists creates a new one. Queries for an association, if none exists creates a new one.
@@ -2281,7 +2345,7 @@ class SubmissionSampleAssociation(BaseClass):
case BasicSubmission(): case BasicSubmission():
pass pass
case str(): case str():
submission = BasicSubmission.query(rsl_number=submission) submission = BasicSubmission.query(rsl_plate_num=submission)
case _: case _:
raise ValueError() raise ValueError()
match sample: match sample:
@@ -2305,14 +2369,15 @@ class SubmissionSampleAssociation(BaseClass):
instance = None instance = None
if instance == None: if instance == None:
used_cls = cls.find_polymorphic_subclass(polymorphic_identity=association_type) used_cls = cls.find_polymorphic_subclass(polymorphic_identity=association_type)
instance = used_cls(submission=submission, sample=sample, id=id, **kwargs) # instance = used_cls(submission=submission, sample=sample, id=id, **kwargs)
instance = used_cls(submission=submission, sample=sample, **kwargs)
return instance return instance
def delete(self): def delete(self):
raise AttributeError(f"Delete not implemented for {self.__class__}") raise AttributeError(f"Delete not implemented for {self.__class__}")
class WastewaterAssociation(SubmissionSampleAssociation):
class WastewaterAssociation(SubmissionSampleAssociation):
id = Column(INTEGER, ForeignKey("_submissionsampleassociation.id"), primary_key=True) id = Column(INTEGER, ForeignKey("_submissionsampleassociation.id"), primary_key=True)
ct_n1 = Column(FLOAT(2)) #: AKA ct for N1 ct_n1 = Column(FLOAT(2)) #: AKA ct for N1
ct_n2 = Column(FLOAT(2)) #: AKA ct for N2 ct_n2 = Column(FLOAT(2)) #: AKA ct for N2
@@ -2349,7 +2414,8 @@ class WastewaterAssociation(SubmissionSampleAssociation):
""" """
sample = super().to_hitpick() sample = super().to_hitpick()
try: try:
sample['tooltip'] += f"<br>- ct N1: {'{:.2f}'.format(self.ct_n1)} ({self.n1_status})<br>- ct N2: {'{:.2f}'.format(self.ct_n2)} ({self.n2_status})" sample[
'tooltip'] += f"<br>- ct N1: {'{:.2f}'.format(self.ct_n1)} ({self.n1_status})<br>- ct N2: {'{:.2f}'.format(self.ct_n2)} ({self.n2_status})"
except (TypeError, AttributeError) as e: except (TypeError, AttributeError) as e:
logger.error(f"Couldn't set tooltip for {self.sample.rsl_number}. Looks like there isn't PCR data.") logger.error(f"Couldn't set tooltip for {self.sample.rsl_number}. Looks like there isn't PCR data.")
return sample return sample

View File

@@ -100,7 +100,7 @@ class SheetParser(object):
Enforce that the parser has an extraction kit Enforce that the parser has an extraction kit
""" """
from frontend.widgets.pop_ups import ObjectSelector from frontend.widgets.pop_ups import ObjectSelector
if not check_not_nan(self.sub['extraction_kit']['value']): if 'extraction_kit' not in self.sub.keys() or not check_not_nan(self.sub['extraction_kit']['value']):
dlg = ObjectSelector(title="Kit Needed", message="At minimum a kit is needed. Please select one.", obj_type=KitType) dlg = ObjectSelector(title="Kit Needed", message="At minimum a kit is needed. Please select one.", obj_type=KitType)
if dlg.exec(): if dlg.exec():
self.sub['extraction_kit'] = dict(value=dlg.parse_form(), missing=True) self.sub['extraction_kit'] = dict(value=dlg.parse_form(), missing=True)
@@ -192,13 +192,18 @@ class InfoParser(object):
for k, v in self.map.items(): for k, v in self.map.items():
# exclude from generic parsing # exclude from generic parsing
if k in exclude_from_generic: if k in exclude_from_generic:
logger.warning(f"Key {k} is excluded due to parser_ignore")
continue continue
# If the value is hardcoded put it in the dictionary directly. # If the value is hardcoded put it in the dictionary directly.
if isinstance(v, str): if isinstance(v, str):
dicto[k] = dict(value=v, missing=False) dicto[k] = dict(value=v, missing=False)
continue continue
logger.debug(f"Looking for {k} in self.map") logger.debug(f"Looking for {k} in self.map")
if sheet in self.map[k]['sheets']: try:
check = sheet in self.map[k]['sheets']
except TypeError:
continue
if check:
relevant[k] = v relevant[k] = v
logger.debug(f"relevant map for {sheet}: {pformat(relevant)}") logger.debug(f"relevant map for {sheet}: {pformat(relevant)}")
if relevant == {}: if relevant == {}:
@@ -592,7 +597,7 @@ class PCRParser(object):
self.plate_num = namer.parsed_name self.plate_num = namer.parsed_name
self.submission_type = namer.submission_type self.submission_type = namer.submission_type
logger.debug(f"Set plate number to {self.plate_num} and type to {self.submission_type}") logger.debug(f"Set plate number to {self.plate_num} and type to {self.submission_type}")
parser = BasicSubmission.find_polymorphic_subclass(self.submission_type) parser = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type)
self.samples = parser.parse_pcr(xl=self.xl, rsl_number=self.plate_num) self.samples = parser.parse_pcr(xl=self.xl, rsl_number=self.plate_num)
def parse_general(self, sheet_name:str): def parse_general(self, sheet_name:str):

View File

@@ -576,6 +576,7 @@ class PydSubmission(BaseModel, extra='allow'):
association.save() association.save()
logger.debug(f"Equipment association SQL object to be added to submission: {association.__dict__}") logger.debug(f"Equipment association SQL object to be added to submission: {association.__dict__}")
instance.submission_equipment_associations.append(association) instance.submission_equipment_associations.append(association)
# TODO: case item if item in instance.jsons()
case _: case _:
try: try:
instance.set_attribute(key=key, value=value) instance.set_attribute(key=key, value=value)

View File

@@ -82,7 +82,8 @@ class SubmissionDetails(QDialog):
""" """
logger.debug(f"Details for: {submission}") logger.debug(f"Details for: {submission}")
if isinstance(submission, str): if isinstance(submission, str):
submission = BasicSubmission.query(rsl_number=submission) # submission = BasicSubmission.query(rsl_number=submission)
submission = BasicSubmission.query(rsl_plate_num=submission)
self.base_dict = submission.to_dict(full_data=True) self.base_dict = submission.to_dict(full_data=True)
logger.debug(f"Submission details data:\n{pformat({k:v for k,v in self.base_dict.items() if k != 'samples'})}") logger.debug(f"Submission details data:\n{pformat({k:v for k,v in self.base_dict.items() if k != 'samples'})}")
# don't want id # don't want id
@@ -103,7 +104,8 @@ class SubmissionDetails(QDialog):
def sign_off(self, submission:str|BasicSubmission): def sign_off(self, submission:str|BasicSubmission):
logger.debug(f"Signing off on {submission} - ({getuser()})") logger.debug(f"Signing off on {submission} - ({getuser()})")
if isinstance(submission, str): if isinstance(submission, str):
submission = BasicSubmission.query(rsl_number=submission) # submission = BasicSubmission.query(rsl_number=submission)
submission = BasicSubmission.query(rsl_plate_number=submission)
submission.signed_by = getuser() submission.signed_by = getuser()
submission.save() submission.save()
self.submission_details(submission=self.rsl_plate_num) self.submission_details(submission=self.rsl_plate_num)

View File

@@ -166,9 +166,8 @@ class SubmissionsSheet(QTableView):
for ii in range(6, len(run)): for ii in range(6, len(run)):
new_run[f"column{str(ii-5)}_vol"] = run[ii] new_run[f"column{str(ii-5)}_vol"] = run[ii]
# Lookup imported submissions # Lookup imported submissions
# sub = lookup_submission_by_rsl_num(ctx=obj.ctx, rsl_num=new_run['rsl_plate_num']) # sub = BasicSubmission.query(rsl_number=new_run['rsl_plate_num'])
# sub = lookup_submissions(ctx=obj.ctx, rsl_number=new_run['rsl_plate_num']) sub = BasicSubmission.query(rsl_plate_num=new_run['rsl_plate_num'])
sub = BasicSubmission.query(rsl_number=new_run['rsl_plate_num'])
# If no such submission exists, move onto the next run # If no such submission exists, move onto the next run
if sub == None: if sub == None:
continue continue