Moments before disaster.

This commit is contained in:
lwark
2025-01-16 08:36:15 -06:00
parent 5cded949ed
commit bf711369c6
21 changed files with 541 additions and 368 deletions

View File

@@ -20,11 +20,11 @@ def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
if ctx.database_schema == "sqlite":
execution_phrase = "PRAGMA foreign_keys=ON"
print(f"Executing '{execution_phrase}' in sql.")
else:
# print("Nothing to execute, returning")
cursor.close()
return
print(f"Executing '{execution_phrase}' in sql.")
cursor.execute(execution_phrase)
cursor.close()
@@ -33,6 +33,17 @@ from .models import *
def update_log(mapper, connection, target):
"""
Updates log table whenever an object with LogMixin is updated.
Args:
mapper ():
connection ():
target ():
Returns:
None
"""
state = inspect(target)
object_name = state.object.truncated_name
update = dict(user=getuser(), time=datetime.now(), object=object_name, changes=[])
@@ -43,6 +54,7 @@ def update_log(mapper, connection, target):
if attr.key == "custom":
continue
added = [str(item) for item in hist.added]
# NOTE: Attributes left out to save space
if attr.key in ['artic_technician', 'submission_sample_associations', 'submission_reagent_associations',
'submission_equipment_associations', 'submission_tips_associations', 'contact_id', 'gel_info',
'gel_controls', 'source_plates']:

View File

@@ -175,7 +175,7 @@ class BaseClass(Base):
try:
records = [obj.to_sub_dict(**kwargs) for obj in objects]
except AttributeError:
records = [obj.to_omnigui_dict() for obj in objects]
records = [obj.omnigui_dict for obj in objects]
return DataFrame.from_records(records)
@classmethod
@@ -241,7 +241,8 @@ class BaseClass(Base):
report.add_result(Result(msg=e, status="Critical"))
return report
def to_omnigui_dict(self) -> dict:
@property
def omnigui_dict(self) -> dict:
"""
For getting any object in an omni-thing friendly output.
@@ -255,8 +256,8 @@ class BaseClass(Base):
pass
return dicto
@classmethod
def get_pydantic_model(cls) -> BaseModel:
@classproperty
def pydantic_model(cls) -> BaseModel:
"""
Gets the pydantic model corresponding to this object.
@@ -271,7 +272,7 @@ class BaseClass(Base):
return model
@classproperty
def add_edit_tooltips(self) -> dict:
def add_edit_tooltips(cls) -> dict:
"""
Gets tooltips for Omni-add-edit

View File

@@ -81,7 +81,8 @@ class ControlType(BaseClass):
subtypes = sorted(list(jsoner[genera].keys()), reverse=True)
return subtypes
def get_instance_class(self) -> Control:
@property
def instance_class(self) -> Control:
"""
Retrieves the Control class associated with this controltype
@@ -314,7 +315,7 @@ class PCRControl(Control):
def to_sub_dict(self) -> dict:
"""
Creates dictionary of fields for this object
Creates dictionary of fields for this object.
Returns:
dict: Output dict of name, ct, subtype, target, reagent_lot and submitted_date
@@ -471,8 +472,8 @@ class IridaControl(Control):
_dict[key] = data[genus][key]
yield _dict
@classmethod
def get_modes(cls) -> List[str]:
@classproperty
def modes(cls) -> List[str]:
"""
Get all control modes from database

View File

@@ -9,7 +9,7 @@ from sqlalchemy.orm import relationship, validates, Query
from sqlalchemy.ext.associationproxy import association_proxy
from datetime import date, datetime, timedelta
from tools import check_authorization, setup_lookup, Report, Result, check_regex_match, yaml_regex_creator, timezone
from typing import List, Literal, Generator, Any
from typing import List, Literal, Generator, Any, Tuple
from pandas import ExcelFile
from pathlib import Path
from . import Base, BaseClass, Organization, LogMixin
@@ -157,30 +157,62 @@ class KitType(BaseClass):
else:
return (item.reagent_role for item in relevant_associations)
def construct_xl_map_for_use(self, submission_type: str | SubmissionType) -> Generator[(str, str), None, None]:
def construct_xl_map_for_use(self, submission_type: str | SubmissionType) -> Tuple[dict|None, KitType]:
"""
Creates map of locations in Excel workbook for a SubmissionType
Args:
new_kit ():
submission_type (str | SubmissionType): Submissiontype.name
Returns:
Generator[(str, str), None, None]: Tuple containing information locations.
"""
new_kit = self
# NOTE: Account for submission_type variable type.
match submission_type:
case str():
assocs = [item for item in self.kit_reagentrole_associations if
item.submission_type.name == submission_type]
# assocs = [item for item in self.kit_reagentrole_associations if
# item.submission_type.name == submission_type]
logger.debug(f"Query for {submission_type}")
submission_type = SubmissionType.query(name=submission_type)
case SubmissionType():
assocs = [item for item in self.kit_reagentrole_associations if item.submission_type == submission_type]
pass
case _:
raise ValueError(f"Wrong variable type: {type(submission_type)} used!")
for assoc in assocs:
try:
yield assoc.reagent_role.name, assoc.uses
except TypeError:
continue
logger.debug(f"Submission type: {submission_type}, Kit: {self}")
assocs = [item for item in self.kit_reagentrole_associations if item.submission_type == submission_type]
logger.debug(f"Associations: {assocs}")
# NOTE: rescue with submission type's default kit.
if not assocs:
logger.error(
f"No associations found with {self}. Attempting rescue with default kit: {submission_type.default_kit}")
new_kit = submission_type.default_kit
if not new_kit:
from frontend.widgets.pop_ups import ObjectSelector
dlg = ObjectSelector(
title="Select Kit",
message="Could not find reagents for this submission type/kit type combo.\nSelect new kit.",
obj_type=self.__class__,
values=[kit.name for kit in submission_type.kit_types]
)
if dlg.exec():
dlg_result = dlg.parse_form()
logger.debug(f"Dialog result: {dlg_result}")
new_kit = self.__class__.query(name=dlg_result)
logger.debug(f"Query result: {new_kit}")
# return new_kit.construct_xl_map_for_use(submission_type=submission_type)
else:
return None, new_kit
assocs = [item for item in new_kit.kit_reagentrole_associations if item.submission_type == submission_type]
# for assoc in assocs:
# try:
# yield assoc.reagent_role.name, assoc.uses
# except TypeError:
# continue
output = {assoc.reagent_role.name: assoc.uses for assoc in assocs}
logger.debug(f"Output: {output}")
return output, new_kit
@classmethod
@setup_lookup
@@ -444,7 +476,7 @@ class Reagent(BaseClass, LogMixin):
Concrete reagent instance
"""
searchables = ["lot"]
searchables = [dict(label="Lot", field="lot")]
id = Column(INTEGER, primary_key=True) #: primary key
role = relationship("ReagentRole", back_populates="instances",
@@ -548,7 +580,9 @@ class Reagent(BaseClass, LogMixin):
def query_or_create(cls, **kwargs) -> Reagent:
from backend.validators.pydant import PydReagent
new = False
instance = cls.query(**kwargs)
disallowed = ['expiry']
sanitized_kwargs = {k:v for k,v in kwargs.items() if k not in disallowed}
instance = cls.query(**sanitized_kwargs)
if not instance or isinstance(instance, list):
if "role" not in kwargs:
try:
@@ -557,7 +591,7 @@ class Reagent(BaseClass, LogMixin):
pass
instance = PydReagent(**kwargs)
new = True
instance, _ = instance.toSQL()
instance = instance.to_sql()
logger.info(f"Instance from query or create: {instance}")
return instance, new
@@ -644,38 +678,15 @@ class Reagent(BaseClass, LogMixin):
except AttributeError as e:
logger.error(f"Could not set {key} due to {e}")
@check_authorization
def edit_from_search(self, obj, **kwargs):
from frontend.widgets.omni_add_edit import AddEdit
role = ReagentRole.query(kwargs['role'])
if role:
role_name = role.name
else:
role_name = None
# dlg = AddReagentForm(reagent_lot=self.lot, reagent_role=role_name, expiry=self.expiry, reagent_name=self.name)
dlg = AddEdit(parent=None, instance=self)
if dlg.exec():
pyd = dlg.parse_form()
for field in pyd.model_fields:
self.set_attribute(field, pyd.__getattribute__(field))
# for key, value in vars.items():
# match key:
# case "expiry":
# if isinstance(value, str):
# field_value = datetime.strptime(value, "%Y-%m-%d")
# elif isinstance(value, date):
# field_value = datetime.combine(value, datetime.max.time())
# else:
# field_value = value
# field_value.replace(tzinfo=timezone)
# case "role":
# continue
# case _:
# field_value = value
# self.__setattr__(key, field_value)
self.save()
# print(self.__dict__)
@classproperty
def add_edit_tooltips(self):
@@ -801,8 +812,8 @@ class SubmissionType(BaseClass):
"""
return f"<SubmissionType({self.name})>"
@classmethod
def retrieve_template_file(cls) -> bytes:
@classproperty
def basic_template(cls) -> bytes:
"""
Grabs the default excel template file.
@@ -812,7 +823,8 @@ class SubmissionType(BaseClass):
submission_type = cls.query(name="Bacterial Culture")
return submission_type.template_file
def get_template_file_sheets(self) -> List[str]:
@property
def template_file_sheets(self) -> List[str]:
"""
Gets names of sheet in the stored blank form.
@@ -870,15 +882,6 @@ class SubmissionType(BaseClass):
output['custom'] = self.info_map['custom']
return output
def construct_sample_map(self) -> dict:
"""
Returns sample map
Returns:
dict: sample location map
"""
return self.sample_map
def construct_field_map(self, field: Literal['equipment', 'tip']) -> Generator[(str, dict), None, None]:
"""
Make a map of all locations for tips or equipment.
@@ -895,7 +898,8 @@ class SubmissionType(BaseClass):
fmap = {}
yield getattr(item, f"{field}_role").name, fmap
def get_default_kit(self) -> KitType | None:
@property
def default_kit(self) -> KitType | None:
"""
If only one kits exists for this Submission Type, return it.
@@ -941,7 +945,8 @@ class SubmissionType(BaseClass):
raise TypeError(f"Type {type(equipment_role)} is not allowed")
return list(set([item for items in relevant for item in items if item is not None]))
def get_submission_class(self) -> "BasicSubmission":
@property
def submission_class(self) -> "BasicSubmission":
"""
Gets submission class associated with this submission type.
@@ -993,7 +998,8 @@ class SubmissionType(BaseClass):
base_dict = dict(name=self.name)
base_dict['info'] = self.construct_info_map(mode='export')
base_dict['defaults'] = self.defaults
base_dict['samples'] = self.construct_sample_map()
# base_dict['samples'] = self.construct_sample_map()
base_dict['samples'] = self.sample_map
base_dict['kits'] = [item.to_export_dict() for item in self.submissiontype_kit_associations]
return base_dict
@@ -1413,7 +1419,8 @@ class Equipment(BaseClass, LogMixin):
return {k: v for k, v in self.__dict__.items()}
def get_processes(self, submission_type: str | SubmissionType | None = None,
extraction_kit: str | KitType | None = None) -> List[str]:
extraction_kit: str | KitType | None = None,
equipment_role: str | EquipmentRole | None=None) -> List[str]:
"""
Get all processes associated with this Equipment for a given SubmissionType
@@ -1433,6 +1440,8 @@ class Equipment(BaseClass, LogMixin):
continue
if extraction_kit and extraction_kit not in process.kit_types:
continue
if equipment_role and equipment_role not in process.equipment_roles:
continue
yield process
@classmethod
@@ -1489,12 +1498,12 @@ class Equipment(BaseClass, LogMixin):
PydEquipment: pydantic equipment object
"""
from backend.validators.pydant import PydEquipment
processes = self.get_processes(submission_type=submission_type, extraction_kit=extraction_kit)
processes = self.get_processes(submission_type=submission_type, extraction_kit=extraction_kit, equipment_role=role)
return PydEquipment(processes=processes, role=role,
**self.to_dict(processes=False))
@classmethod
def get_regex(cls) -> re.Pattern:
@classproperty
def manufacturer_regex(cls) -> re.Pattern:
"""
Creates regex to determine tip manufacturer
@@ -1809,6 +1818,9 @@ class Process(BaseClass):
def query(cls,
name: str | None = None,
id: int | None = None,
submission_type: str | SubmissionType | None = None,
extraction_kit : str | KitType | None = None,
equipment_role: str | KitType | None = None,
limit: int = 0) -> Process | List[Process]:
"""
Lookup Processes
@@ -1822,6 +1834,30 @@ class Process(BaseClass):
Process|List[Process]: Process(es) matching criteria
"""
query = cls.__database_session__.query(cls)
match submission_type:
case str():
submission_type = SubmissionType.query(name=submission_type)
query = query.filter(cls.submission_types.contains(submission_type))
case SubmissionType():
query = query.filter(cls.submission_types.contains(submission_type))
case _:
pass
match extraction_kit:
case str():
extraction_kit = KitType.query(name=extraction_kit)
query = query.filter(cls.kit_types.contains(extraction_kit))
case KitType():
query = query.filter(cls.kit_types.contains(extraction_kit))
case _:
pass
match equipment_role:
case str():
equipment_role = EquipmentRole.query(name=equipment_role)
query = query.filter(cls.equipment_roles.contains(equipment_role))
case EquipmentRole():
query = query.filter(cls.equipment_roles.contains(equipment_role))
case _:
pass
match name:
case str():
query = query.filter(cls.name == name)
@@ -1975,6 +2011,14 @@ class SubmissionTipsAssociation(BaseClass):
query = query.filter(cls.role_name == role)
return cls.execute_query(query=query, limit=limit, **kwargs)
@classmethod
def query_or_create(cls, tips, submission, role: str, **kwargs):
instance = cls.query(tip_id=tips.id, role=role, submission_id=submission.id, limit=1, **kwargs)
if instance is None:
instance = SubmissionTipsAssociation(submission=submission, tips=tips, role_name=role)
return instance
def to_pydantic(self):
from backend.validators import PydTips
return PydTips(name=self.tips.name, lot=self.tips.lot, role=self.role_name)

View File

@@ -124,7 +124,7 @@ class Contact(BaseClass):
Base of Contact
"""
searchables =[]
searchables = []
id = Column(INTEGER, primary_key=True) #: primary key
name = Column(String(64)) #: contact name

View File

@@ -2,8 +2,6 @@
Models for the main submission and sample types.
"""
from __future__ import annotations
from collections import OrderedDict
from copy import deepcopy
from getpass import getuser
import logging, uuid, tempfile, re, base64, numpy as np, pandas as pd, types, sys
@@ -12,7 +10,7 @@ from zipfile import ZipFile, BadZipfile
from tempfile import TemporaryDirectory, TemporaryFile
from operator import itemgetter
from pprint import pformat
from . import BaseClass, Reagent, SubmissionType, KitType, Organization, Contact, LogMixin
from . import BaseClass, Reagent, SubmissionType, KitType, Organization, Contact, LogMixin, SubmissionReagentAssociation
from sqlalchemy import Column, String, TIMESTAMP, INTEGER, ForeignKey, JSON, FLOAT, case, func
from sqlalchemy.orm import relationship, validates, Query
from sqlalchemy.orm.attributes import flag_modified
@@ -25,13 +23,15 @@ from openpyxl.drawing.image import Image as OpenpyxlImage
from tools import row_map, setup_lookup, jinja_template_loading, rreplace, row_keys, check_key_or_attr, Result, Report, \
report_result, create_holidays_for_year
from datetime import datetime, date, timedelta
from typing import List, Any, Tuple, Literal, Generator
from typing import List, Any, Tuple, Literal, Generator, Type
from dateutil.parser import parse
from pathlib import Path
from jinja2.exceptions import TemplateNotFound
from jinja2 import Template
from PIL import Image
logger = logging.getLogger(f"submissions.{__name__}")
@@ -126,7 +126,7 @@ class BasicSubmission(BaseClass, LogMixin):
def __repr__(self) -> str:
return f"<Submission({self.rsl_plate_num})>"
@classmethod
@classproperty
def jsons(cls) -> List[str]:
"""
Get list of JSON db columns
@@ -136,10 +136,10 @@ class BasicSubmission(BaseClass, LogMixin):
"""
output = [item.name for item in cls.__table__.columns if isinstance(item.type, JSON)]
if issubclass(cls, BasicSubmission) and not cls.__name__ == "BasicSubmission":
output += BasicSubmission.jsons()
output += BasicSubmission.jsons
return output
@classmethod
@classproperty
def timestamps(cls) -> List[str]:
"""
Get list of TIMESTAMP columns
@@ -149,7 +149,7 @@ class BasicSubmission(BaseClass, LogMixin):
"""
output = [item.name for item in cls.__table__.columns if isinstance(item.type, TIMESTAMP)]
if issubclass(cls, BasicSubmission) and not cls.__name__ == "BasicSubmission":
output += BasicSubmission.timestamps()
output += BasicSubmission.timestamps
return output
@classmethod
@@ -259,7 +259,8 @@ class BasicSubmission(BaseClass, LogMixin):
Returns:
dict: sample location map
"""
return cls.get_submission_type(submission_type).construct_sample_map()
# return cls.get_submission_type(submission_type).construct_sample_map()
return cls.get_submission_type(submission_type).sample_map
def generate_associations(self, name: str, extra: str | None = None):
try:
@@ -277,6 +278,7 @@ class BasicSubmission(BaseClass, LogMixin):
Constructs dictionary used in submissions summary
Args:
report (bool, optional): indicates if to be used for a report. Defaults to False.
full_data (bool, optional): indicates if sample dicts to be constructed. Defaults to False.
backup (bool, optional): passed to adjust_to_dict_samples. Defaults to False.
@@ -323,7 +325,8 @@ class BasicSubmission(BaseClass, LogMixin):
logger.error(f"We got an error retrieving reagents: {e}")
reagents = []
finally:
for k, v in self.extraction_kit.construct_xl_map_for_use(self.submission_type):
dicto, _ = self.extraction_kit.construct_xl_map_for_use(self.submission_type)
for k, v in dicto.items():
if k == 'info':
continue
if not any([item['role'] == k for item in reagents]):
@@ -381,7 +384,8 @@ class BasicSubmission(BaseClass, LogMixin):
output["completed_date"] = self.completed_date
return output
def calculate_column_count(self) -> int:
@property
def column_count(self) -> int:
"""
Calculate the number of columns in this submission
@@ -391,13 +395,14 @@ class BasicSubmission(BaseClass, LogMixin):
columns = set([assoc.column for assoc in self.submission_sample_associations])
return len(columns)
def calculate_base_cost(self):
def calculate_base_cost(self) -> None:
"""
Calculates cost of the plate
"""
# NOTE: Calculate number of columns based on largest column number
try:
cols_count_96 = self.calculate_column_count()
cols_count_96 = self.column_count
except Exception as e:
logger.error(f"Column count error: {e}")
# NOTE: Get kit associated with this submission
@@ -418,14 +423,15 @@ class BasicSubmission(BaseClass, LogMixin):
logger.error(f"Calculation error: {e}")
self.run_cost = round(self.run_cost, 2)
def hitpick_plate(self) -> list:
@property
def hitpicked(self) -> list:
"""
Returns positve sample locations for plate
Returns:
list: list of hitpick dictionaries for each sample
"""
output_list = [assoc.to_hitpick() for assoc in self.submission_sample_associations]
output_list = [assoc.hitpicked for assoc in self.submission_sample_associations]
return output_list
@classmethod
@@ -454,7 +460,8 @@ class BasicSubmission(BaseClass, LogMixin):
html = template.render(samples=output_samples, PLATE_ROWS=plate_rows, PLATE_COLUMNS=plate_columns)
return html + "<br/>"
def get_used_equipment(self) -> List[str]:
@property
def used_equipment(self) -> Generator[str, None, None]:
"""
Gets EquipmentRole names associated with this BasicSubmission
@@ -490,6 +497,7 @@ class BasicSubmission(BaseClass, LogMixin):
'source_plates', 'pcr_technician', 'ext_technician', 'artic_technician', 'cost_centre',
'signed_by', 'artic_date', 'gel_barcode', 'gel_date', 'ngs_date', 'contact_phone', 'contact',
'tips', 'gel_image_path', 'custom']
# NOTE: dataframe equals dataframe of all columns not in exclude
df = df.loc[:, ~df.columns.isin(exclude)]
if chronologic:
try:
@@ -531,7 +539,7 @@ class BasicSubmission(BaseClass, LogMixin):
field_value = value
case "ctx" | "csv" | "filepath" | "equipment" | "controls":
return
case item if item in self.jsons():
case item if item in self.jsons:
match key:
case "custom" | "source_plates":
existing = value
@@ -549,7 +557,7 @@ class BasicSubmission(BaseClass, LogMixin):
if isinstance(value, list):
existing += value
else:
if value is not None:
if value:
existing.append(value)
self.__setattr__(key, existing)
# NOTE: Make sure this gets updated by telling SQLAlchemy it's been modified.
@@ -636,12 +644,6 @@ class BasicSubmission(BaseClass, LogMixin):
field_value = [item.to_pydantic() for item in self.submission_tips_associations]
case "submission_type":
field_value = dict(value=self.__getattribute__(key).name, missing=missing)
# case "contact":
# try:
# field_value = dict(value=self.__getattribute__(key).name, missing=missing)
# except AttributeError:
# contact = self.submitting_lab.contacts[0]
# field_value = dict(value=contact.name, missing=True)
case "plate_number":
key = 'rsl_plate_num'
field_value = dict(value=self.rsl_plate_num, missing=missing)
@@ -677,7 +679,7 @@ class BasicSubmission(BaseClass, LogMixin):
return super().save()
@classmethod
def get_regex(cls, submission_type: SubmissionType | str | None = None) -> str:
def get_regex(cls, submission_type: SubmissionType | str | None = None) -> re.Pattern:
"""
Gets the regex string for identifying a certain class of submission.
@@ -685,18 +687,26 @@ class BasicSubmission(BaseClass, LogMixin):
submission_type (SubmissionType | str | None, optional): submission type of interest. Defaults to None.
Returns:
str: _description_
str: String from which regex will be compiled.
"""
# logger.debug(f"Class for regex: {cls}")
try:
return cls.get_submission_type(submission_type).defaults['regex']
regex = cls.get_submission_type(submission_type).defaults['regex']
except AttributeError as e:
logger.error(f"Couldn't get submission type for {cls.__mapper_args__['polymorphic_identity']}")
return ""
regex = None
try:
regex = re.compile(rf"{regex}", flags=re.IGNORECASE | re.VERBOSE)
except re.error as e:
regex = cls.construct_regex()
# logger.debug(f"Returning regex: {regex}")
return regex
# NOTE: Polymorphic functions
@classmethod
def construct_regex(cls) -> re.Pattern:
@classproperty
def regex(cls) -> re.Pattern:
"""
Constructs catchall regex.
@@ -762,7 +772,9 @@ class BasicSubmission(BaseClass, LogMixin):
"""
input_dict['custom'] = {}
for k, v in custom_fields.items():
logger.debug(f"Custom info parser getting type: {v['type']}")
match v['type']:
# NOTE: 'exempt' type not currently used
case "exempt":
continue
case "cell":
@@ -796,7 +808,7 @@ class BasicSubmission(BaseClass, LogMixin):
@classmethod
def custom_validation(cls, pyd: "PydSubmission") -> "PydSubmission":
"""
Performs any final custom parsing of the excel file.
Performs any final parsing of the pydantic object that only needs to be done for this cls.
Args:
input_dict (dict): Parser product up to this point.
@@ -849,6 +861,14 @@ class BasicSubmission(BaseClass, LogMixin):
@classmethod
def custom_sample_writer(self, sample: dict) -> dict:
"""
Performs any final alterations to sample writing unique to this submission type.
Args:
sample (dict): Dictionary of sample values.
Returns:
dict: Finalized dictionary.
"""
return sample
@classmethod
@@ -884,7 +904,7 @@ class BasicSubmission(BaseClass, LogMixin):
logger.error(f"Error making outstr: {e}, sending to RSLNamer to make new plate name.")
outstr = RSLNamer.construct_new_plate_name(data=data)
try:
# NOTE: Grab plate number
# NOTE: Grab plate number as number after a -|_ not followed by another number
plate_number = re.search(r"(?:(-|_)\d)(?!\d)", outstr).group().strip("_").strip("-")
except AttributeError as e:
plate_number = "1"
@@ -910,7 +930,7 @@ class BasicSubmission(BaseClass, LogMixin):
Args:
xl (pd.DataFrame): pcr info form
rsl_plate_number (str): rsl plate num of interest
rsl_plate_num (str): rsl plate num of interest
Returns:
Generator[dict, None, None]: Updated samples
@@ -943,16 +963,16 @@ class BasicSubmission(BaseClass, LogMixin):
submission = cls.query(rsl_plate_num=rsl_plate_num)
name_column = 1
for item in location_map:
logger.debug(f"Checking {item}")
# logger.debug(f"Checking {item}")
worksheet = xl[item['sheet']]
for iii, row in enumerate(worksheet.iter_rows(max_row=len(worksheet['A']), max_col=name_column), start=1):
logger.debug(f"Checking row {row}, {iii}")
# logger.debug(f"Checking row {row}, {iii}")
for cell in row:
logger.debug(f"Checking cell: {cell}, with value {cell.value} against {item['name']}")
# logger.debug(f"Checking cell: {cell}, with value {cell.value} against {item['name']}")
if cell.value == item['name']:
subtype, _ = item['name'].split("-")
target = item['target']
logger.debug(f"Subtype: {subtype}, target: {target}")
# logger.debug(f"Subtype: {subtype}, target: {target}")
ct = worksheet.cell(row=iii, column=item['ct_column']).value
# NOTE: Kind of a stop gap solution to find control reagents.
if subtype == "PC":
@@ -966,7 +986,7 @@ class BasicSubmission(BaseClass, LogMixin):
assoc.reagent.role])), None)
else:
ctrl = None
logger.debug(f"Control reagent: {ctrl.__dict__}")
# logger.debug(f"Control reagent: {ctrl.__dict__}")
try:
ct = float(ct)
except ValueError:
@@ -982,7 +1002,7 @@ class BasicSubmission(BaseClass, LogMixin):
target=target,
reagent_lot=ctrl
)
logger.debug(f"Control output: {pformat(output)}")
# logger.debug(f"Control output: {pformat(output)}")
yield output
@classmethod
@@ -1010,7 +1030,7 @@ class BasicSubmission(BaseClass, LogMixin):
return samples
@classmethod
def get_details_template(cls, base_dict: dict) -> Template:
def get_details_template(cls, base_dict: dict) -> Tuple[dict, Template]:
"""
Get the details jinja template for the correct class
@@ -1040,8 +1060,8 @@ class BasicSubmission(BaseClass, LogMixin):
submission_type_name: str | None = None,
id: int | str | None = None,
rsl_plate_num: str | None = None,
start_date: date | str | int | None = None,
end_date: date | str | int | None = None,
start_date: date | datetime | str | int | None = None,
end_date: date | datetime | str | int | None = None,
reagent: Reagent | str | None = None,
chronologic: bool = False,
limit: int = 0,
@@ -1065,6 +1085,7 @@ class BasicSubmission(BaseClass, LogMixin):
Returns:
models.BasicSubmission | List[models.BasicSubmission]: Submission(s) of interest
"""
from ... import SubmissionReagentAssociation
# NOTE: if you go back to using 'model' change the appropriate cls to model in the query filters
if submission_type is not None:
model = cls.find_polymorphic_subclass(polymorphic_identity=submission_type)
@@ -1078,41 +1099,48 @@ class BasicSubmission(BaseClass, LogMixin):
logger.warning(f"Start date with no end date, using today.")
end_date = date.today()
if end_date is not None and start_date is None:
logger.warning(f"End date with no start date, using Jan 1, 2023")
# NOTE: this query returns a tuple of (object, datetime), need to get only datetime.
start_date = cls.__database_session__.query(cls, func.min(cls.submitted_date)).first()[1]
logger.warning(f"End date with no start date, using first submission date: {start_date}")
if start_date is not None:
match start_date:
case date() | datetime():
start_date = start_date.strftime("%Y-%m-%d")
case date():
pass
case datetime():
start_date = start_date.date()
case int():
start_date = datetime.fromordinal(
datetime(1900, 1, 1).toordinal() + start_date - 2).date().strftime("%Y-%m-%d")
datetime(1900, 1, 1).toordinal() + start_date - 2).date()
case _:
start_date = parse(start_date).strftime("%Y-%m-%d")
start_date = parse(start_date).date()
# start_date = start_date.strftime("%Y-%m-%d")
match end_date:
case date() | datetime():
end_date = end_date + timedelta(days=1)
end_date = end_date.strftime("%Y-%m-%d")
case date():
pass
case datetime():
end_date = end_date# + timedelta(days=1)
# pass
case int():
end_date = datetime.fromordinal(datetime(1900, 1, 1).toordinal() + end_date - 2).date() \
+ timedelta(days=1)
end_date = end_date.strftime("%Y-%m-%d")
end_date = datetime.fromordinal(datetime(1900, 1, 1).toordinal() + end_date - 2).date()# \
# + timedelta(days=1)
case _:
end_date = parse(end_date) + timedelta(days=1)
end_date = end_date.strftime("%Y-%m-%d")
if start_date == end_date:
start_date = datetime.strptime(start_date, "%Y-%m-%d").strftime("%Y-%m-%d %H:%M:%S.%f")
query = query.filter(model.submitted_date == start_date)
else:
query = query.filter(model.submitted_date.between(start_date, end_date))
end_date = parse(end_date).date()# + timedelta(days=1)
# end_date = end_date.strftime("%Y-%m-%d")
start_date = datetime.combine(start_date, datetime.min.time()).strftime("%Y-%m-%d %H:%M:%S.%f")
end_date = datetime.combine(end_date, datetime.max.time()).strftime("%Y-%m-%d %H:%M:%S.%f")
# if start_date == end_date:
# start_date = start_date.strftime("%Y-%m-%d %H:%M:%S.%f")
# query = query.filter(model.submitted_date == start_date)
# else:
query = query.filter(model.submitted_date.between(start_date, end_date))
# NOTE: by reagent (for some reason)
match reagent:
case str():
query = query.join(model.submission_reagent_associations).filter(
SubmissionSampleAssociation.reagent.lot == reagent)
query = query.join(SubmissionReagentAssociation).join(Reagent).filter(
Reagent.lot == reagent)
case Reagent():
query = query.join(model.submission_reagent_associations).join(
SubmissionSampleAssociation.reagent).filter(Reagent.lot == reagent)
query = query.join(SubmissionReagentAssociation).filter(
SubmissionReagentAssociation.reagent == reagent)
case _:
pass
# NOTE: by rsl number (returns only a single value)
@@ -1217,6 +1245,7 @@ class BasicSubmission(BaseClass, LogMixin):
msg = QuestionAsker(title="Delete?", message=f"Are you sure you want to delete {self.rsl_plate_num}?\n")
if msg.exec():
try:
# NOTE: backs up file as xlsx, same as export.
self.backup(fname=fname, full_backup=True)
except BadZipfile:
logger.error("Couldn't open zipfile for writing.")
@@ -1285,16 +1314,16 @@ class BasicSubmission(BaseClass, LogMixin):
if dlg.exec():
equipment = dlg.parse_form()
for equip in equipment:
_, assoc = equip.toSQL(submission=self)
_, assoc = equip.to_sql(submission=self)
try:
assoc.save()
except AttributeError as e:
logger.error(f"Couldn't save association with {equip} due to {e}")
if equip.tips:
for tips in equip.tips:
logger.debug(f"Attempting to add tips assoc: {tips} (pydantic)")
# logger.debug(f"Attempting to add tips assoc: {tips} (pydantic)")
tassoc = tips.to_sql(submission=self)
logger.debug(f"Attempting to add tips assoc: {tips.__dict__} (sql)")
# logger.debug(f"Attempting to add tips assoc: {tips.__dict__} (sql)")
if tassoc not in self.submission_tips_associations:
tassoc.save()
else:
@@ -1320,7 +1349,8 @@ class BasicSubmission(BaseClass, LogMixin):
writer = pyd.to_writer()
writer.xl.save(filename=fname.with_suffix(".xlsx"))
def get_turnaround_time(self) -> Tuple[int | None, bool | None]:
@property
def turnaround_time(self) -> int:
try:
completed = self.completed_date.date()
except AttributeError:
@@ -1328,25 +1358,24 @@ class BasicSubmission(BaseClass, LogMixin):
return self.calculate_turnaround(start_date=self.submitted_date.date(), end_date=completed)
@classmethod
def calculate_turnaround(cls, start_date: date | None = None, end_date: date | None = None) -> Tuple[
int | None, bool | None]:
if 'pytest' not in sys.modules:
from tools import ctx
else:
from test_settings import ctx
def calculate_turnaround(cls, start_date: date | None = None, end_date: date | None = None) -> int:
"""
Calculates number of business days between data submitted and date completed
Args:
start_date (date, optional): Date submitted. defaults to None.
end_date (date, optional): Date completed. defaults to None.
Returns:
int: Number of business days.
"""
if not end_date:
return None, None
return None
try:
delta = np.busday_count(start_date, end_date, holidays=create_holidays_for_year(start_date.year)) + 1
except ValueError:
return None, None
try:
tat = cls.get_default_info("turnaround_time")
except (AttributeError, KeyError):
tat = None
if not tat:
tat = ctx.TaT_threshold
return delta, delta <= tat
return None
return delta
# NOTE: Below are the custom submission types
@@ -1385,7 +1414,7 @@ class BacterialCulture(BasicSubmission):
return template
@classmethod
def custom_validation(cls, pyd) -> dict:
def custom_validation(cls, pyd) -> "PydSubmission":
"""
Extends parent. Currently finds control sample and adds to reagents.
@@ -1395,7 +1424,7 @@ class BacterialCulture(BasicSubmission):
info_map (dict | None, optional): _description_. Defaults to None.
Returns:
dict: Updated dictionary.
PydSubmission: Updated pydantic.
"""
from . import ControlType
pyd = super().custom_validation(pyd)
@@ -1549,9 +1578,10 @@ class Wastewater(BasicSubmission):
"""
samples = [item for item in super().parse_pcr(xl=xl, rsl_plate_num=rsl_plate_num)]
# NOTE: Due to having to run through samples in for loop we need to convert to list.
# NOTE: Also, you can't change the size of a list while iterating it, so don't even think about it.
output = []
for sample in samples:
logger.debug(sample)
# logger.debug(sample)
# NOTE: remove '-{target}' from controls
sample['sample'] = re.sub('-N\\d*$', '', sample['sample'])
# NOTE: if sample is already in output skip
@@ -1559,7 +1589,7 @@ class Wastewater(BasicSubmission):
logger.warning(f"Already have {sample['sample']}")
continue
# NOTE: Set ct values
logger.debug(f"Sample ct: {sample['ct']}")
# logger.debug(f"Sample ct: {sample['ct']}")
sample[f"ct_{sample['target'].lower()}"] = sample['ct'] if isinstance(sample['ct'], float) else 0.0
# NOTE: Set assessment
logger.debug(f"Sample assessemnt: {sample['assessment']}")
@@ -1578,7 +1608,7 @@ class Wastewater(BasicSubmission):
except KeyError:
pass
output.append(sample)
# NOTE: And then convert back to list ot keep fidelity with parent method.
# NOTE: And then convert back to list to keep fidelity with parent method.
for sample in output:
yield sample
@@ -1644,7 +1674,7 @@ class Wastewater(BasicSubmission):
return events
@report_result
def link_pcr(self, obj):
def link_pcr(self, obj) -> Report:
"""
PYQT6 function to add PCR info to this submission
@@ -1660,7 +1690,8 @@ class Wastewater(BasicSubmission):
report.add_result(Result(msg="No file selected, cancelling.", status="Warning"))
return report
parser = PCRParser(filepath=fname, submission=self)
self.set_attribute("pcr_info", parser.pcr)
self.set_attribute("pcr_info", parser.pcr_info)
# NOTE: These are generators here, need to expand.
pcr_samples = [sample for sample in parser.samples]
pcr_controls = [control for control in parser.controls]
self.save(original=False)
@@ -1674,19 +1705,19 @@ class Wastewater(BasicSubmission):
result = assoc.save()
report.add_result(result)
controltype = ControlType.query(name="PCR Control")
submitted_date = datetime.strptime(" ".join(parser.pcr['run_start_date/time'].split(" ")[:-1]),
submitted_date = datetime.strptime(" ".join(parser.pcr_info['run_start_date/time'].split(" ")[:-1]),
"%Y-%m-%d %I:%M:%S %p")
for control in pcr_controls:
logger.debug(f"Control coming into save: {control}")
# logger.debug(f"Control coming into save: {control}")
new_control = PCRControl(**control)
new_control.submitted_date = submitted_date
new_control.controltype = controltype
new_control.submission = self
logger.debug(f"Control coming into save: {new_control.__dict__}")
# logger.debug(f"Control coming into save: {new_control.__dict__}")
new_control.save()
return report
def update_subsampassoc(self, sample: BasicSample, input_dict: dict):
def update_subsampassoc(self, sample: BasicSample, input_dict: dict) -> SubmissionSampleAssociation:
"""
Updates a joined submission sample association by assigning ct values to n1 or n2 based on alphabetical sorting.
@@ -1722,7 +1753,7 @@ class WastewaterArtic(BasicSubmission):
artic_date = Column(TIMESTAMP) #: Date Artic Performed
ngs_date = Column(TIMESTAMP) #: Date submission received
gel_date = Column(TIMESTAMP) #: Date submission received
gel_barcode = Column(String(16))
gel_barcode = Column(String(16)) #: Identifier for the used gel.
__mapper_args__ = dict(polymorphic_identity="Wastewater Artic",
polymorphic_load="inline",
@@ -1769,6 +1800,16 @@ class WastewaterArtic(BasicSubmission):
from openpyxl_image_loader.sheet_image_loader import SheetImageLoader
def scrape_image(wb: Workbook, info_dict: dict) -> Image or None:
"""
Pulls image from excel workbook
Args:
wb (Workbook): Workbook of interest.
info_dict (dict): Location map.
Returns:
Image or None: Image of interest.
"""
ws = wb[info_dict['sheet']]
img_loader = SheetImageLoader(ws)
for ii in range(info_dict['start_row'], info_dict['end_row'] + 1):
@@ -1805,7 +1846,7 @@ class WastewaterArtic(BasicSubmission):
if datum['plate'] in ["None", None, ""]:
continue
else:
datum['plate'] = RSLNamer(filename=datum['plate'], sub_type="Wastewater").parsed_name
datum['plate'] = RSLNamer(filename=datum['plate'], submission_type="Wastewater").parsed_name
if xl is not None:
try:
input_dict['csv'] = xl["hitpicks_csv_to_export"]
@@ -1864,6 +1905,7 @@ class WastewaterArtic(BasicSubmission):
Returns:
str: Updated name.
"""
logger.debug(f"Incoming String: {instr}")
try:
# NOTE: Deal with PCR file.
instr = re.sub(r"Artic", "", instr, flags=re.IGNORECASE)
@@ -1900,8 +1942,7 @@ class WastewaterArtic(BasicSubmission):
input_dict['source_plate_number'] = int(input_dict['source_plate_number'])
except (ValueError, KeyError):
input_dict['source_plate_number'] = 0
# NOTE: Because generate_sample_object needs the submitter_id and the artic has the "({origin well})"
# at the end, this has to be done here. No moving to sqlalchemy object :(
# NOTE: Because generate_sample_object needs the submitter_id and the artic has the "({origin well})" at the end, this has to be done here. No moving to sqlalchemy object :(
input_dict['submitter_id'] = re.sub(r"\s\(.+\)\s?$", "", str(input_dict['submitter_id'])).strip()
try:
input_dict['ww_processing_num'] = input_dict['sample_name_(lims)']
@@ -1988,7 +2029,11 @@ class WastewaterArtic(BasicSubmission):
except AttributeError:
plate_num = "1"
plate_num = plate_num.strip("-")
repeat_num = re.search(r"R(?P<repeat>\d)?$", "PBS20240426-2R").groups()[0]
# repeat_num = re.search(r"R(?P<repeat>\d)?$", "PBS20240426-2R").groups()[0]
try:
repeat_num = re.search(r"R(?P<repeat>\d)?$", processed).groups()[0]
except:
repeat_num = None
if repeat_num is None and "R" in plate_num:
repeat_num = "1"
plate_num = re.sub(r"R", rf"R{repeat_num}", plate_num)
@@ -2192,7 +2237,7 @@ class BasicSample(BaseClass, LogMixin):
Base of basic sample which polymorphs into BCSample and WWSample
"""
searchables = ['submitter_id']
searchables = [dict(label="Submitter ID", field="submitter_id")]
id = Column(INTEGER, primary_key=True) #: primary key
submitter_id = Column(String(64), nullable=False, unique=True) #: identification from submitter
@@ -2242,7 +2287,7 @@ class BasicSample(BaseClass, LogMixin):
except AttributeError:
return f"<Sample({self.submitter_id})"
@classmethod
@classproperty
def timestamps(cls) -> List[str]:
"""
Constructs a list of all attributes stored as SQL Timestamps
@@ -2252,7 +2297,7 @@ class BasicSample(BaseClass, LogMixin):
"""
output = [item.name for item in cls.__table__.columns if isinstance(item.type, TIMESTAMP)]
if issubclass(cls, BasicSample) and not cls.__name__ == "BasicSample":
output += BasicSample.timestamps()
output += BasicSample.timestamps
return output
def to_sub_dict(self, full_data: bool = False) -> dict:
@@ -2293,7 +2338,7 @@ class BasicSample(BaseClass, LogMixin):
@classmethod
def find_polymorphic_subclass(cls, polymorphic_identity: str | None = None,
attrs: dict | None = None) -> BasicSample:
attrs: dict | None = None) -> Type[BasicSample]:
"""
Retrieves subclasses of BasicSample based on type name.
@@ -2340,8 +2385,8 @@ class BasicSample(BaseClass, LogMixin):
"""
return input_dict
@classmethod
def get_details_template(cls) -> Template:
@classproperty
def details_template(cls) -> Template:
"""
Get the details jinja template for the correct class
@@ -2458,15 +2503,15 @@ class BasicSample(BaseClass, LogMixin):
def delete(self):
raise AttributeError(f"Delete not implemented for {self.__class__}")
@classmethod
def get_searchables(cls) -> List[dict]:
"""
Delivers a list of fields that can be used in fuzzy search.
Returns:
List[str]: List of fields.
"""
return [dict(label="Submitter ID", field="submitter_id")]
# @classmethod
# def get_searchables(cls) -> List[dict]:
# """
# Delivers a list of fields that can be used in fuzzy search.
#
# Returns:
# List[str]: List of fields.
# """
# return [dict(label="Submitter ID", field="submitter_id")]
@classmethod
def samples_to_df(cls, sample_list: List[BasicSample], **kwargs) -> pd.DataFrame:
@@ -2504,6 +2549,16 @@ class BasicSample(BaseClass, LogMixin):
pass
def edit_from_search(self, obj, **kwargs):
"""
Function called form search. "Edit" is dependent on function as this one just shows details.
Args:
obj (__type__): Parent widget.
**kwargs (): Required for all edit from search functions.
Returns:
"""
self.show_details(obj)
@@ -2514,7 +2569,7 @@ class WastewaterSample(BasicSample):
Derivative wastewater sample
"""
searchables = BasicSample.searchables + ['ww_processing_num', 'ww_full_sample_id', 'rsl_number']
# searchables = BasicSample.searchables + ['ww_processing_num', 'ww_full_sample_id', 'rsl_number']
id = Column(INTEGER, ForeignKey('_basicsample.id'), primary_key=True)
ww_processing_num = Column(String(64)) #: wastewater processing number
@@ -2594,15 +2649,15 @@ class WastewaterSample(BasicSample):
# logger.debug(pformat(output_dict, indent=4))
return output_dict
@classmethod
def get_searchables(cls) -> List[str]:
@classproperty
def searchables(cls) -> List[dict]:
"""
Delivers a list of fields that can be used in fuzzy search. Extends parent.
Returns:
List[str]: List of fields.
"""
searchables = super().get_searchables()
searchables = super().searchables
for item in ["ww_processing_num", "ww_full_sample_id", "rsl_number"]:
label = item.strip("ww_").replace("_", " ").replace("rsl", "RSL").title()
searchables.append(dict(label=label, field=item))
@@ -2726,7 +2781,8 @@ class SubmissionSampleAssociation(BaseClass):
from backend.validators import PydSample
return PydSample(**self.to_sub_dict())
def to_hitpick(self) -> dict | None:
@property
def hitpicked(self) -> dict | None:
"""
Outputs a dictionary usable for html plate maps.
@@ -2948,14 +3004,15 @@ class WastewaterAssociation(SubmissionSampleAssociation):
logger.error(f"Couldn't check positives for {self.sample.rsl_number}. Looks like there isn't PCR data.")
return sample
def to_hitpick(self) -> dict | None:
@property
def hitpicked(self) -> dict | None:
"""
Outputs a dictionary usable for html plate maps. Extends parent
Returns:
dict: dictionary of sample id, row and column in elution plate
"""
sample = super().to_hitpick()
sample = super().hitpicked
try:
scaler = max([self.ct_n1, self.ct_n2])
except TypeError: