Split Concentration controls on the chart so they are individually selectable.

This commit is contained in:
lwark
2025-04-11 12:54:27 -05:00
parent 96f178c09f
commit ae6717bc77
19 changed files with 380 additions and 457 deletions

View File

@@ -1,3 +1,7 @@
# 202504.03
- Split Concentration controls on the chart so they are individually selectable.
# 202504.02
- Added cscscience gitlab remote.

Binary file not shown.

View File

@@ -11,9 +11,7 @@ from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.exc import ArgumentError
from typing import Any, List
from pathlib import Path
from sqlalchemy.orm.relationships import _RelationshipDeclared
from tools import report_result, list_sort_dict
# NOTE: Load testing environment
@@ -48,7 +46,7 @@ class BaseClass(Base):
"""
__abstract__ = True #: NOTE: Will not be added to DB as a table
__table_args__ = {'extend_existing': True} #: Will only add new columns
__table_args__ = {'extend_existing': True} #: NOTE Will only add new columns
singles = ['id']
omni_removes = ["id", 'submissions', "omnigui_class_dict", "omnigui_instance_dict"]
@@ -308,7 +306,6 @@ class BaseClass(Base):
dicto = {'id': dicto.pop('id'), **dicto}
except KeyError:
pass
# logger.debug(f"{self.__class__.__name__} omnigui dict:\n\n{pformat(dicto)}")
return dicto
@classproperty
@@ -337,11 +334,6 @@ class BaseClass(Base):
"""
return dict()
@classmethod
def relevant_relationships(cls, relationship_instance):
query_kwargs = {relationship_instance.query_alias: relationship_instance}
return cls.query(**query_kwargs)
def check_all_attributes(self, attributes: dict) -> bool:
"""
Checks this instance against a dictionary of attributes to determine if they are a match.
@@ -352,14 +344,14 @@ class BaseClass(Base):
Returns:
bool: If a single unequivocal value is found will be false, else true.
"""
logger.debug(f"Incoming attributes: {attributes}")
# logger.debug(f"Incoming attributes: {attributes}")
for key, value in attributes.items():
if value.lower() == "none":
value = None
logger.debug(f"Attempting to grab attribute: {key}")
# logger.debug(f"Attempting to grab attribute: {key}")
self_value = getattr(self, key)
class_attr = getattr(self.__class__, key)
logger.debug(f"Self value: {self_value}, class attr: {class_attr} of type: {type(class_attr)}")
# logger.debug(f"Self value: {self_value}, class attr: {class_attr} of type: {type(class_attr)}")
if isinstance(class_attr, property):
filter = "property"
else:
@@ -379,7 +371,7 @@ class BaseClass(Base):
case "property":
pass
case _RelationshipDeclared():
logger.debug(f"Checking {self_value}")
# logger.debug(f"Checking {self_value}")
try:
self_value = self_value.name
except AttributeError:
@@ -387,19 +379,18 @@ class BaseClass(Base):
if class_attr.property.uselist:
self_value = self_value.__str__()
try:
logger.debug(f"Check if {self_value.__class__} is subclass of {self.__class__}")
# logger.debug(f"Check if {self_value.__class__} is subclass of {self.__class__}")
check = issubclass(self_value.__class__, self.__class__)
except TypeError as e:
logger.error(f"Couldn't check if {self_value.__class__} is subclass of {self.__class__} due to {e}")
check = False
if check:
logger.debug(f"Checking for subclass name.")
# logger.debug(f"Checking for subclass name.")
self_value = self_value.name
logger.debug(
f"Checking self_value {self_value} of type {type(self_value)} against attribute {value} of type {type(value)}")
# logger.debug(f"Checking self_value {self_value} of type {type(self_value)} against attribute {value} of type {type(value)}")
if self_value != value:
output = False
logger.debug(f"Value {key} is False, returning.")
# logger.debug(f"Value {key} is False, returning.")
return output
return True
@@ -444,7 +435,6 @@ class BaseClass(Base):
value = value[0]
else:
raise ValueError("Object is too long to parse a single value.")
# value = value
return super().__setattr__(key, value)
case _:
return super().__setattr__(key, value)
@@ -454,6 +444,32 @@ class BaseClass(Base):
def delete(self):
logger.error(f"Delete has not been implemented for {self.__class__.__name__}")
def rectify_query_date(input_date, eod: bool = False) -> str:
"""
Converts input into a datetime string for querying purposes
Args:
eod (bool, optional): Whether to use max time to indicate end of day.
input_date ():
Returns:
datetime: properly formated datetime
"""
match input_date:
case datetime() | date():
output_date = input_date#.strftime("%Y-%m-%d %H:%M:%S")
case int():
output_date = datetime.fromordinal(
datetime(1900, 1, 1).toordinal() + input_date - 2)#.date().strftime("%Y-%m-%d %H:%M:%S")
case _:
output_date = parse(input_date)#.strftime("%Y-%m-%d %H:%M:%S")
if eod:
addition_time = datetime.max.time()
else:
addition_time = datetime.min.time()
output_date = datetime.combine(output_date, addition_time).strftime("%Y-%m-%d %H:%M:%S")
return output_date
class ConfigItem(BaseClass):
"""

View File

@@ -2,7 +2,6 @@
All control related models.
"""
from __future__ import annotations
import itertools
from pprint import pformat
from PyQt6.QtWidgets import QWidget, QCheckBox, QLabel
@@ -13,10 +12,9 @@ import logging, re
from operator import itemgetter
from . import BaseClass
from tools import setup_lookup, report_result, Result, Report, Settings, get_unique_values_in_df_column, super_splitter, \
rectify_query_date
flatten_list, timer
from datetime import date, datetime, timedelta
from typing import List, Literal, Tuple, Generator
from dateutil.parser import parse
from re import Pattern
logger = logging.getLogger(f"submissions.{__name__}")
@@ -31,9 +29,6 @@ class ControlType(BaseClass):
targets = Column(JSON) #: organisms checked for
instances = relationship("Control", back_populates="controltype") #: control samples created of this type.
# def __repr__(self) -> str:
# return f"<ControlType({self.name})>"
@classmethod
@setup_lookup
def query(cls,
@@ -113,6 +108,7 @@ class ControlType(BaseClass):
Pattern: Constructed pattern
"""
strings = list(set([super_splitter(item, "-", 0) for item in cls.get_positive_control_types(control_type)]))
# NOTE: This will build a string like ^(ATCC49226|MCS)-.*
return re.compile(rf"(^{'|^'.join(strings)})-.*", flags=re.IGNORECASE)
@@ -159,7 +155,7 @@ class Control(BaseClass):
Lookup control objects in the database based on a number of parameters.
Args:
submission_type (str | None, optional): Submission type associated with control. Defaults to None.
submissiontype (str | None, optional): Submission type associated with control. Defaults to None.
subtype (str | None, optional): Control subtype, eg IridaControl. Defaults to None.
start_date (date | str | int | None, optional): Beginning date to search by. Defaults to 2023-01-01 if end_date not None.
end_date (date | str | int | None, optional): End date to search by. Defaults to today if start_date not None.
@@ -202,30 +198,8 @@ class Control(BaseClass):
logger.warning(f"End date with no start date, using 90 days ago.")
start_date = date.today() - timedelta(days=90)
if start_date is not None:
# match start_date:
# case datetime():
# start_date = start_date.strftime("%Y-%m-%d %H:%M:%S")
# case date():
# start_date = datetime.combine(start_date, datetime.min.time())
# start_date = start_date.strftime("%Y-%m-%d %H:%M:%S")
# case int():
# start_date = datetime.fromordinal(
# datetime(1900, 1, 1).toordinal() + start_date - 2).date().strftime("%Y-%m-%d %H:%M:%S")
# case _:
# start_date = parse(start_date).strftime("%Y-%m-%d %H:%M:%S")
start_date = rectify_query_date(start_date)
end_date = rectify_query_date(end_date, eod=True)
# match end_date:
# case datetime():
# end_date = end_date.strftime("%Y-%m-%d %H:%M:%S")
# case date():
# end_date = datetime.combine(end_date, datetime.max.time())
# end_date = end_date.strftime("%Y-%m-%d %H:%M:%S")
# case int():
# end_date = datetime.fromordinal(datetime(1900, 1, 1).toordinal() + end_date - 2).date().strftime(
# "%Y-%m-%d %H:%M:%S")
# case _:
# end_date = parse(end_date).strftime("%Y-%m-%d %H:%M:%S")
start_date = cls.rectify_query_date(start_date)
end_date = cls.rectify_query_date(end_date, eod=True)
query = query.filter(cls.submitted_date.between(start_date, end_date))
match name:
case str():
@@ -372,7 +346,8 @@ class PCRControl(Control):
def to_pydantic(self):
from backend.validators import PydPCRControl
return PydPCRControl(**self.to_sub_dict(), controltype_name=self.controltype_name,
return PydPCRControl(**self.to_sub_dict(),
controltype_name=self.controltype_name,
submission_id=self.submission_id)
@@ -565,7 +540,8 @@ class IridaControl(Control):
consolidate=consolidate) for
control in controls]
# NOTE: flatten data to one dimensional list
data = [item for sublist in data for item in sublist]
# data = [item for sublist in data for item in sublist]
data = flatten_list(data)
if not data:
report.add_result(Result(status="Critical", msg="No data found for controls in given date range."))
return report, None
@@ -731,11 +707,11 @@ class IridaControl(Control):
Returns:
DataFrame: dataframe with originals removed in favour of repeats.
"""
if 'rerun_regex' in ctx:
if 'rerun_regex' in ctx.model_extra:
sample_names = get_unique_values_in_df_column(df, column_name="name")
rerun_regex = re.compile(fr"{ctx.rerun_regex}")
exclude = [re.sub(rerun_regex, "", sample) for sample in sample_names if rerun_regex.search(sample)]
df = df[df.name not in exclude]
df = df[~df.name.isin(exclude)]
return df
def to_pydantic(self) -> "PydIridaControl":

View File

@@ -2,8 +2,7 @@
All kit and reagent related models
"""
from __future__ import annotations
import json, zipfile, yaml, logging, re
import sys
import json, zipfile, yaml, logging, re, sys
from pprint import pformat
from sqlalchemy import Column, String, TIMESTAMP, JSON, INTEGER, ForeignKey, Interval, Table, FLOAT, BLOB
from sqlalchemy.orm import relationship, validates, Query
@@ -11,7 +10,7 @@ from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.hybrid import hybrid_property
from datetime import date, datetime, timedelta
from tools import check_authorization, setup_lookup, Report, Result, check_regex_match, yaml_regex_creator, timezone
from typing import List, Literal, Generator, Any, Tuple, Dict, AnyStr
from typing import List, Literal, Generator, Any, Tuple
from pandas import ExcelFile
from pathlib import Path
from . import Base, BaseClass, Organization, LogMixin
@@ -136,18 +135,18 @@ class KitType(BaseClass):
return self.used_for
def get_reagents(self,
required: bool = False,
required_only: bool = False,
submission_type: str | SubmissionType | None = None
) -> Generator[ReagentRole, None, None]:
"""
Return ReagentTypes linked to kit through KitTypeReagentTypeAssociation.
Args:
required (bool, optional): If true only return required types. Defaults to False.
required_only (bool, optional): If true only return required types. Defaults to False.
submission_type (str | Submissiontype | None, optional): Submission type to narrow results. Defaults to None.
Returns:
Generator[ReagentRole, None, None]: List of reagents linked to this kit.
Generator[ReagentRole, None, None]: List of reagent roles linked to this kit.
"""
match submission_type:
case SubmissionType():
@@ -158,7 +157,7 @@ class KitType(BaseClass):
item.submission_type.name == submission_type]
case _:
relevant_associations = [item for item in self.kit_reagentrole_associations]
if required:
if required_only:
return (item.reagent_role for item in relevant_associations if item.required == 1)
else:
return (item.reagent_role for item in relevant_associations)
@@ -168,7 +167,6 @@ class KitType(BaseClass):
Creates map of locations in Excel workbook for a SubmissionType
Args:
new_kit ():
submission_type (str | SubmissionType): Submissiontype.name
Returns:
@@ -240,7 +238,7 @@ class KitType(BaseClass):
Args:
name (str, optional): Name of desired kit (returns single instance). Defaults to None.
used_for (str | Submissiontype | None, optional): Submission type the kit is used for. Defaults to None.
submissiontype (str | Submissiontype | None, optional): Submission type the kit is used for. Defaults to None.
id (int | None, optional): Kit id in the database. Defaults to None.
limit (int, optional): Maximum number of results to return (0 = all). Defaults to 0.
@@ -276,108 +274,108 @@ class KitType(BaseClass):
def save(self):
super().save()
def to_export_dict(self, submission_type: SubmissionType) -> dict:
"""
Creates dictionary for exporting to yml used in new SubmissionType Construction
# def to_export_dict(self, submission_type: SubmissionType) -> dict:
# """
# Creates dictionary for exporting to yml used in new SubmissionType Construction
#
# Args:
# submission_type (SubmissionType): SubmissionType of interest.
#
# Returns:
# dict: Dictionary containing relevant info for SubmissionType construction
# """
# base_dict = dict(name=self.name, reagent_roles=[], equipment_roles=[])
# for key, value in self.construct_xl_map_for_use(submission_type=submission_type):
# try:
# assoc = next(item for item in self.kit_reagentrole_associations if item.reagent_role.name == key)
# except StopIteration as e:
# continue
# for kk, vv in assoc.to_export_dict().items():
# value[kk] = vv
# base_dict['reagent_roles'].append(value)
# for key, value in submission_type.construct_field_map("equipment"):
# try:
# assoc = next(item for item in submission_type.submissiontype_equipmentrole_associations if
# item.equipment_role.name == key)
# except StopIteration:
# continue
# for kk, vv in assoc.to_export_dict(extraction_kit=self).items():
# value[kk] = vv
# base_dict['equipment_roles'].append(value)
# return base_dict
Args:
submission_type (SubmissionType): SubmissionType of interest.
Returns:
dict: Dictionary containing relevant info for SubmissionType construction
"""
base_dict = dict(name=self.name, reagent_roles=[], equipment_roles=[])
for key, value in self.construct_xl_map_for_use(submission_type=submission_type):
try:
assoc = next(item for item in self.kit_reagentrole_associations if item.reagent_role.name == key)
except StopIteration as e:
continue
for kk, vv in assoc.to_export_dict().items():
value[kk] = vv
base_dict['reagent_roles'].append(value)
for key, value in submission_type.construct_field_map("equipment"):
try:
assoc = next(item for item in submission_type.submissiontype_equipmentrole_associations if
item.equipment_role.name == key)
except StopIteration:
continue
for kk, vv in assoc.to_export_dict(extraction_kit=self).items():
value[kk] = vv
base_dict['equipment_roles'].append(value)
return base_dict
@classmethod
def import_from_yml(cls, submission_type: str | SubmissionType, filepath: Path | str | None = None,
import_dict: dict | None = None) -> KitType:
if isinstance(submission_type, str):
submission_type = SubmissionType.query(name=submission_type)
if filepath:
yaml.add_constructor("!regex", yaml_regex_creator)
if isinstance(filepath, str):
filepath = Path(filepath)
if not filepath.exists():
logging.critical(f"Given file could not be found.")
return None
with open(filepath, "r") as f:
if filepath.suffix == ".json":
import_dict = json.load(fp=f)
elif filepath.suffix == ".yml":
import_dict = yaml.load(stream=f, Loader=yaml.Loader)
else:
raise Exception(f"Filetype {filepath.suffix} not supported.")
new_kit = KitType.query(name=import_dict['kit_type']['name'])
if not new_kit:
new_kit = KitType(name=import_dict['kit_type']['name'])
for role in import_dict['kit_type']['reagent_roles']:
new_role = ReagentRole.query(name=role['role'])
if new_role:
check = input(f"Found existing role: {new_role.name}. Use this? [Y/n]: ")
if check.lower() == "n":
new_role = None
else:
pass
if not new_role:
eol = timedelta(role['extension_of_life'])
new_role = ReagentRole(name=role['role'], eol_ext=eol)
uses = dict(expiry=role['expiry'], lot=role['lot'], name=role['name'], sheet=role['sheet'])
ktrr_assoc = KitTypeReagentRoleAssociation(kit_type=new_kit, reagent_role=new_role, uses=uses)
ktrr_assoc.submission_type = submission_type
ktrr_assoc.required = role['required']
ktst_assoc = SubmissionTypeKitTypeAssociation(
kit_type=new_kit,
submission_type=submission_type,
mutable_cost_sample=import_dict['mutable_cost_sample'],
mutable_cost_column=import_dict['mutable_cost_column'],
constant_cost=import_dict['constant_cost']
)
for role in import_dict['kit_type']['equipment_roles']:
new_role = EquipmentRole.query(name=role['role'])
if new_role:
check = input(f"Found existing role: {new_role.name}. Use this? [Y/n]: ")
if check.lower() == "n":
new_role = None
else:
pass
if not new_role:
new_role = EquipmentRole(name=role['role'])
for equipment in Equipment.assign_equipment(equipment_role=new_role):
new_role.instances.append(equipment)
ster_assoc = SubmissionTypeEquipmentRoleAssociation(submission_type=submission_type,
equipment_role=new_role)
try:
uses = dict(name=role['name'], process=role['process'], sheet=role['sheet'],
static=role['static'])
except KeyError:
uses = None
ster_assoc.uses = uses
for process in role['processes']:
new_process = Process.query(name=process)
if not new_process:
new_process = Process(name=process)
new_process.submission_types.append(submission_type)
new_process.kit_types.append(new_kit)
new_process.equipment_roles.append(new_role)
return new_kit
# @classmethod
# def import_from_yml(cls, submission_type: str | SubmissionType, filepath: Path | str | None = None,
# import_dict: dict | None = None) -> KitType:
# if isinstance(submission_type, str):
# submission_type = SubmissionType.query(name=submission_type)
# if filepath:
# yaml.add_constructor("!regex", yaml_regex_creator)
# if isinstance(filepath, str):
# filepath = Path(filepath)
# if not filepath.exists():
# logging.critical(f"Given file could not be found.")
# return None
# with open(filepath, "r") as f:
# if filepath.suffix == ".json":
# import_dict = json.load(fp=f)
# elif filepath.suffix == ".yml":
# import_dict = yaml.load(stream=f, Loader=yaml.Loader)
# else:
# raise Exception(f"Filetype {filepath.suffix} not supported.")
# new_kit = KitType.query(name=import_dict['kit_type']['name'])
# if not new_kit:
# new_kit = KitType(name=import_dict['kit_type']['name'])
# for role in import_dict['kit_type']['reagent_roles']:
# new_role = ReagentRole.query(name=role['role'])
# if new_role:
# check = input(f"Found existing role: {new_role.name}. Use this? [Y/n]: ")
# if check.lower() == "n":
# new_role = None
# else:
# pass
# if not new_role:
# eol = timedelta(role['extension_of_life'])
# new_role = ReagentRole(name=role['role'], eol_ext=eol)
# uses = dict(expiry=role['expiry'], lot=role['lot'], name=role['name'], sheet=role['sheet'])
# ktrr_assoc = KitTypeReagentRoleAssociation(kit_type=new_kit, reagent_role=new_role, uses=uses)
# ktrr_assoc.submission_type = submission_type
# ktrr_assoc.required = role['required']
# ktst_assoc = SubmissionTypeKitTypeAssociation(
# kit_type=new_kit,
# submission_type=submission_type,
# mutable_cost_sample=import_dict['mutable_cost_sample'],
# mutable_cost_column=import_dict['mutable_cost_column'],
# constant_cost=import_dict['constant_cost']
# )
# for role in import_dict['kit_type']['equipment_roles']:
# new_role = EquipmentRole.query(name=role['role'])
# if new_role:
# check = input(f"Found existing role: {new_role.name}. Use this? [Y/n]: ")
# if check.lower() == "n":
# new_role = None
# else:
# pass
# if not new_role:
# new_role = EquipmentRole(name=role['role'])
# for equipment in Equipment.assign_equipment(equipment_role=new_role):
# new_role.instances.append(equipment)
# ster_assoc = SubmissionTypeEquipmentRoleAssociation(submission_type=submission_type,
# equipment_role=new_role)
# try:
# uses = dict(name=role['name'], process=role['process'], sheet=role['sheet'],
# static=role['static'])
# except KeyError:
# uses = None
# ster_assoc.uses = uses
# for process in role['processes']:
# new_process = Process.query(name=process)
# if not new_process:
# new_process = Process(name=process)
# new_process.submission_types.append(submission_type)
# new_process.kit_types.append(new_kit)
# new_process.equipment_roles.append(new_role)
# return new_kit
def to_omni(self, expand: bool = False) -> "OmniKitType":
from backend.validators.omni_gui_objects import OmniKitType
@@ -395,7 +393,7 @@ class KitType(BaseClass):
kit_reagentrole_associations=kit_reagentrole_associations,
kit_submissiontype_associations=kit_submissiontype_associations
)
logger.debug(f"Creating omni for {pformat(data)}")
# logger.debug(f"Creating omni for {pformat(data)}")
return OmniKitType(instance_object=self, **data)
@@ -405,7 +403,6 @@ class ReagentRole(BaseClass):
"""
skip_on_edit = False
id = Column(INTEGER, primary_key=True) #: primary key
name = Column(String(64)) #: name of role reagent plays
instances = relationship("Reagent", back_populates="role",
@@ -453,7 +450,7 @@ class ReagentRole(BaseClass):
Args:
id (id | None, optional): Id of the object. Defaults to None.
name (str | None, optional): Reagent type name. Defaults to None.
kit_type (KitType | str | None, optional): Kit the type of interest belongs to. Defaults to None.
kittype (KitType | str | None, optional): Kit the type of interest belongs to. Defaults to None.
reagent (Reagent | str | None, optional): Concrete instance of the type of interest. Defaults to None.
limit (int, optional): maxmimum number of results to return (0 = all). Defaults to 0.
@@ -507,14 +504,14 @@ class ReagentRole(BaseClass):
from backend.validators.pydant import PydReagent
return PydReagent(lot=None, role=self.name, name=self.name, expiry=date.today())
def to_export_dict(self) -> dict:
"""
Creates dictionary for exporting to yml used in new SubmissionType Construction
Returns:
dict: Dictionary containing relevant info for SubmissionType construction
"""
return dict(role=self.name, extension_of_life=self.eol_ext.days)
# def to_export_dict(self) -> dict:
# """
# Creates dictionary for exporting to yml used in new SubmissionType Construction
#
# Returns:
# dict: Dictionary containing relevant info for SubmissionType construction
# """
# return dict(role=self.name, extension_of_life=self.eol_ext.days)
@check_authorization
def save(self):
@@ -1278,20 +1275,20 @@ class SubmissionType(BaseClass):
pass
return cls.execute_query(query=query, limit=limit)
def to_export_dict(self):
"""
Creates dictionary for exporting to yml used in new SubmissionType Construction
Returns:
dict: Dictionary containing relevant info for SubmissionType construction
"""
base_dict = dict(name=self.name)
base_dict['info'] = self.construct_info_map(mode='export')
base_dict['defaults'] = self.defaults
# base_dict['samples'] = self.construct_sample_map()
base_dict['samples'] = self.sample_map
base_dict['kits'] = [item.to_export_dict() for item in self.submissiontype_kit_associations]
return base_dict
# def to_export_dict(self):
# """
# Creates dictionary for exporting to yml used in new SubmissionType Construction
#
# Returns:
# dict: Dictionary containing relevant info for SubmissionType construction
# """
# base_dict = dict(name=self.name)
# base_dict['info'] = self.construct_info_map(mode='export')
# base_dict['defaults'] = self.defaults
# # base_dict['samples'] = self.construct_sample_map()
# base_dict['samples'] = self.sample_map
# base_dict['kits'] = [item.to_export_dict() for item in self.submissiontype_kit_associations]
# return base_dict
@check_authorization
def save(self):
@@ -1499,17 +1496,17 @@ class SubmissionTypeKitTypeAssociation(BaseClass):
# limit = query.count()
return cls.execute_query(query=query, limit=limit)
def to_export_dict(self):
"""
Creates a dictionary of relevant values in this object.
Returns:
dict: dictionary of Association and related kittype
"""
exclude = ['_sa_instance_state', 'submission_types_id', 'kits_id', 'submission_type', 'kit_type']
base_dict = {k: v for k, v in self.__dict__.items() if k not in exclude}
base_dict['kit_type'] = self.kit_type.to_export_dict(submission_type=self.submission_type)
return base_dict
# def to_export_dict(self):
# """
# Creates a dictionary of relevant values in this object.
#
# Returns:
# dict: dictionary of Association and related kittype
# """
# exclude = ['_sa_instance_state', 'submission_types_id', 'kits_id', 'submission_type', 'kit_type']
# base_dict = {k: v for k, v in self.__dict__.items() if k not in exclude}
# base_dict['kit_type'] = self.kit_type.to_export_dict(submission_type=self.submission_type)
# return base_dict
def to_omni(self, expand: bool = False):
from backend.validators.omni_gui_objects import OmniSubmissionTypeKitTypeAssociation
@@ -1719,17 +1716,17 @@ class KitTypeReagentRoleAssociation(BaseClass):
limit = 1
return cls.execute_query(query=query, limit=limit)
def to_export_dict(self) -> dict:
"""
Creates a dictionary of relevant values in this object.
Returns:
dict: dictionary of Association and related reagent role
"""
base_dict = dict(required=self.required)
for k, v in self.reagent_role.to_export_dict().items():
base_dict[k] = v
return base_dict
# def to_export_dict(self) -> dict:
# """
# Creates a dictionary of relevant values in this object.
#
# Returns:
# dict: dictionary of Association and related reagent role
# """
# base_dict = dict(required=self.required)
# for k, v in self.reagent_role.to_export_dict().items():
# base_dict[k] = v
# return base_dict
def get_all_relevant_reagents(self) -> Generator[Reagent, None, None]:
"""
@@ -1915,13 +1912,6 @@ class Equipment(BaseClass, LogMixin):
submissions = association_proxy("equipment_submission_associations",
"submission") #: proxy to equipment_submission_associations.submission
# def __repr__(self) -> str:
# """
# Returns:
# str: representation of this Equipment
# """
# return f"<Equipment({self.name})>"
def to_dict(self, processes: bool = False) -> dict:
"""
This Equipment as a dictionary
@@ -2085,13 +2075,6 @@ class EquipmentRole(BaseClass):
submission_types = association_proxy("equipmentrole_submissiontype_associations",
"submission_type") #: proxy to equipmentrole_submissiontype_associations.submission_type
# def __repr__(self) -> str:
# """
# Returns:
# str: Representation of this EquipmentRole
# """
# return f"<EquipmentRole({self.name})>"
def to_dict(self) -> dict:
"""
This EquipmentRole as a dictionary
@@ -2192,16 +2175,6 @@ class EquipmentRole(BaseClass):
continue
yield process.name
def to_export_dict(self, submission_type: SubmissionType, kit_type: KitType):
"""
Creates a dictionary of relevant values in this object.
Returns:
dict: dictionary of Association and related reagent role
"""
processes = self.get_processes(submission_type=submission_type, extraction_kit=kit_type)
return dict(role=self.name, processes=[item for item in processes])
def to_omni(self, expand: bool = False) -> "OmniEquipmentRole":
from backend.validators.omni_gui_objects import OmniEquipmentRole
return OmniEquipmentRole(instance_object=self, name=self.name)
@@ -2320,23 +2293,6 @@ class SubmissionTypeEquipmentRoleAssociation(BaseClass):
def save(self):
super().save()
def to_export_dict(self, extraction_kit: KitType | str) -> dict:
"""
Creates dictionary for exporting to yml used in new SubmissionType Construction
Args:
extraction_kit (KitType | str): KitType of interest.
Returns:
dict: Dictionary containing relevant info for SubmissionType construction
"""
if isinstance(extraction_kit, str):
extraction_kit = KitType.query(name=extraction_kit)
base_dict = {k: v for k, v in self.equipment_role.to_export_dict(submission_type=self.submission_type,
kit_type=extraction_kit).items()}
base_dict['static'] = self.static
return base_dict
class Process(BaseClass):
"""
@@ -2360,14 +2316,6 @@ class Process(BaseClass):
tip_roles = relationship("TipRole", back_populates='processes',
secondary=process_tiprole) #: relation to KitType
# def __repr__(self) -> str:
# """
# Returns:
# str: Representation of this Process
# """
# return f"<Process({self.name})>"
def set_attribute(self, key, value):
match key:
case "name":
@@ -2496,9 +2444,6 @@ class TipRole(BaseClass):
def tips(self):
return self.instances
# def __repr__(self):
# return f"<TipRole({self.name})>"
@classmethod
def query_or_create(cls, **kwargs) -> Tuple[TipRole, bool]:
new = False
@@ -2567,9 +2512,6 @@ class Tips(BaseClass, LogMixin):
def tiprole(self):
return self.role
# def __repr__(self):
# return f"<Tips({self.name})>"
@classmethod
def query_or_create(cls, **kwargs) -> Tuple[Tips, bool]:
new = False

View File

@@ -2,14 +2,14 @@
All client organization related models.
'''
from __future__ import annotations
import json, yaml, logging
import logging
from pathlib import Path
from pprint import pformat
from sqlalchemy import Column, String, INTEGER, ForeignKey, Table
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship, Query
from . import Base, BaseClass
from tools import check_authorization, setup_lookup, yaml_regex_creator
from tools import check_authorization, setup_lookup
from typing import List, Tuple
logger = logging.getLogger(f"submissions.{__name__}")
@@ -41,9 +41,6 @@ class Organization(BaseClass):
def contact(self):
return self.contacts
# def __repr__(self) -> str:
# return f"<Organization({self.name})>"
@classmethod
@setup_lookup
def query(cls,
@@ -80,49 +77,6 @@ class Organization(BaseClass):
def save(self):
super().save()
@classmethod
@check_authorization
def import_from_yml(cls, filepath: Path | str):
"""
An ambitious project to create a Organization from a yml file
Args:
filepath (Path): Filepath of the yml.
Returns:
"""
yaml.add_constructor("!regex", yaml_regex_creator)
if isinstance(filepath, str):
filepath = Path(filepath)
if not filepath.exists():
logging.critical(f"Given file could not be found.")
return None
with open(filepath, "r") as f:
if filepath.suffix == ".json":
import_dict = json.load(fp=f)
elif filepath.suffix == ".yml":
import_dict = yaml.load(stream=f, Loader=yaml.Loader)
else:
raise Exception(f"Filetype {filepath.suffix} not supported.")
data = import_dict['orgs']
for org in data:
organ = Organization.query(name=org['name'])
if organ is None:
organ = Organization(name=org['name'])
try:
organ.cost_centre = org['cost_centre']
except KeyError:
organ.cost_centre = "xxx"
for contact in org['contacts']:
cont = Contact.query(name=contact['name'])
if cont is None:
cont = Contact()
for k, v in contact.items():
cont.__setattr__(k, v)
organ.contacts.append(cont)
organ.save()
def to_omni(self, expand: bool = False):
from backend.validators.omni_gui_objects import OmniOrganization
if self.cost_centre:
@@ -151,9 +105,6 @@ class Contact(BaseClass):
secondary=orgs_contacts) #: relationship to joined organization
submissions = relationship("BasicSubmission", back_populates="contact") #: submissions this contact has submitted
# def __repr__(self) -> str:
# return f"<Contact({self.name})>"
@classproperty
def searchables(cls):
return []

View File

@@ -13,7 +13,6 @@ from zipfile import ZipFile, BadZipfile
from tempfile import TemporaryDirectory, TemporaryFile
from operator import itemgetter
from pprint import pformat
from pandas import DataFrame
from sqlalchemy.ext.hybrid import hybrid_property
from . import BaseClass, Reagent, SubmissionType, KitType, Organization, Contact, LogMixin, SubmissionReagentAssociation
@@ -27,10 +26,9 @@ from sqlite3 import OperationalError as SQLOperationalError, IntegrityError as S
from openpyxl import Workbook
from openpyxl.drawing.image import Image as OpenpyxlImage
from tools import row_map, setup_lookup, jinja_template_loading, rreplace, row_keys, check_key_or_attr, Result, Report, \
report_result, create_holidays_for_year, check_dictionary_inclusion_equality, rectify_query_date
from datetime import datetime, date, timedelta
report_result, create_holidays_for_year, check_dictionary_inclusion_equality
from datetime import datetime, date
from typing import List, Any, Tuple, Literal, Generator, Type
from dateutil.parser import parse
from pathlib import Path
from jinja2.exceptions import TemplateNotFound
from jinja2 import Template
@@ -271,7 +269,6 @@ class BasicSubmission(BaseClass, LogMixin):
Returns:
dict: sample location map
"""
# return cls.get_submission_type(submission_type).construct_sample_map()
return cls.get_submission_type(submission_type).sample_map
def generate_associations(self, name: str, extra: str | None = None):
@@ -445,11 +442,11 @@ class BasicSubmission(BaseClass, LogMixin):
except Exception as e:
logger.error(f"Column count error: {e}")
# NOTE: Get kit associated with this submission
logger.debug(f"Checking associations with submission type: {self.submission_type_name}")
# logger.debug(f"Checking associations with submission type: {self.submission_type_name}")
assoc = next((item for item in self.extraction_kit.kit_submissiontype_associations if
item.submission_type == self.submission_type),
None)
logger.debug(f"Got association: {assoc}")
# logger.debug(f"Got association: {assoc}")
# NOTE: If every individual cost is 0 this is probably an old plate.
if all(item == 0.0 for item in [assoc.constant_cost, assoc.mutable_cost_column, assoc.mutable_cost_sample]):
try:
@@ -635,16 +632,13 @@ class BasicSubmission(BaseClass, LogMixin):
# NOTE: No longer searches for association here, done in caller function
for k, v in input_dict.items():
try:
# logger.debug(f"Setting assoc {assoc} with key {k} to value {v}")
setattr(assoc, k, v)
# NOTE: for some reason I don't think assoc.__setattr__(k, v) works here.
except AttributeError:
# logger.error(f"Can't set {k} to {v}")
pass
return assoc
def update_reagentassoc(self, reagent: Reagent, role: str):
from backend.db import SubmissionReagentAssociation
# NOTE: get the first reagent assoc that fills the given role.
try:
assoc = next(item for item in self.submission_reagent_associations if
@@ -1134,7 +1128,7 @@ class BasicSubmission(BaseClass, LogMixin):
Returns:
models.BasicSubmission | List[models.BasicSubmission]: Submission(s) of interest
"""
from ... import SubmissionReagentAssociation
# from ... import SubmissionReagentAssociation
# NOTE: if you go back to using 'model' change the appropriate cls to model in the query filters
if submissiontype is not None:
model = cls.find_polymorphic_subclass(polymorphic_identity=submissiontype)
@@ -1181,8 +1175,8 @@ class BasicSubmission(BaseClass, LogMixin):
# # start_date = start_date.strftime("%Y-%m-%d %H:%M:%S.%f")
# # query = query.filter(model.submitted_date == start_date)
# # else:
start_date = rectify_query_date(start_date)
end_date = rectify_query_date(end_date, eod=True)
start_date = cls.rectify_query_date(start_date)
end_date = cls.rectify_query_date(end_date, eod=True)
query = query.filter(model.submitted_date.between(start_date, end_date))
# NOTE: by reagent (for some reason)
match reagent:
@@ -1575,19 +1569,40 @@ class BacterialCulture(BasicSubmission):
column=lookup_table['sample_columns']['concentration']).value
yield sample
def get_provisional_controls(self, controls_only: bool = True):
if controls_only:
# def get_provisional_controls(self, controls_only: bool = True):
def get_provisional_controls(self, include: List[str] = []):
# NOTE To ensure Samples are done last.
include = sorted(include)
logger.debug(include)
pos_str = "(ATCC)|(MCS)"
pos_regex = re.compile(rf"^{pos_str}")
neg_str = "(EN)"
neg_regex = re.compile(rf"^{neg_str}")
total_str = pos_str + "|" + neg_str
total_regex = re.compile(rf"^{total_str}")
output = []
for item in include:
# if self.controls:
# logger.debug(item)
match item:
case "Positive":
if self.controls:
provs = (control.sample for control in self.controls)
provs = (control.sample for control in self.controls if control.is_positive_control)
else:
regex = re.compile(r"^(ATCC)|(MCS)|(EN)")
provs = (sample for sample in self.samples if bool(regex.match(sample.submitter_id)))
provs = (sample for sample in self.samples if bool(pos_regex.match(sample.submitter_id)))
case "Negative":
if self.controls:
provs = (control.sample for control in self.controls if not control.is_positive_control)
else:
provs = self.samples
provs = (sample for sample in self.samples if bool(neg_regex.match(sample.submitter_id)))
case _:
provs = (sample for sample in self.samples if not sample.control and sample not in output)
for prov in provs:
# logger.debug(f"Prov: {prov}")
prov.submission = self.rsl_plate_num
prov.submitted_date = self.submitted_date
yield prov
output.append(prov)
return output
class Wastewater(BasicSubmission):
@@ -2794,8 +2809,7 @@ class WastewaterSample(BasicSample):
output_dict['rsl_number'] = "RSL-WW-" + output_dict['ww_processing_num']
if output_dict['ww_full_sample_id'] is not None and output_dict["submitter_id"] in disallowed:
output_dict["submitter_id"] = output_dict['ww_full_sample_id']
check = check_key_or_attr("rsl_number", output_dict, check_none=True)
# logger.debug(pformat(output_dict, indent=4))
# check = check_key_or_attr("rsl_number", output_dict, check_none=True)
return output_dict
@classproperty
@@ -3089,7 +3103,6 @@ class SubmissionSampleAssociation(BaseClass):
Returns:
SubmissionSampleAssociation: Queried or new association.
"""
# disallowed = ['id']
match submission:
case BasicSubmission():
pass
@@ -3184,7 +3197,6 @@ class WastewaterAssociation(SubmissionSampleAssociation):
sample['background_color'] = f"rgb({red}, {grn}, {blu})"
try:
sample[
# 'tooltip'] += f"<br>- ct N1: {'{:.2f}'.format(self.ct_n1)} ({self.n1_status})<br>- ct N2: {'{:.2f}'.format(self.ct_n2)} ({self.n2_status})"
'tooltip'] += f"<br>- ct N1: {'{:.2f}'.format(self.ct_n1)}<br>- ct N2: {'{:.2f}'.format(self.ct_n2)}"
except (TypeError, AttributeError) as e:
logger.error(f"Couldn't set tooltip for {self.sample.rsl_number}. Looks like there isn't PCR data.")

View File

@@ -259,7 +259,6 @@ class ReagentParser(object):
if isinstance(extraction_kit, dict):
extraction_kit = extraction_kit['value']
self.kit_object = KitType.query(name=extraction_kit)
# self.kit_map = self.kit_map(submission_type=submission_type)
self.xl = xl
@property

View File

@@ -1,17 +1,14 @@
"""
Contains functions for generating summary reports
"""
import itertools
import re
import sys
import re, sys, logging
from pprint import pformat
from pandas import DataFrame, ExcelWriter
import logging
from pathlib import Path
from datetime import date
from typing import Tuple
from backend.db.models import BasicSubmission, IridaControl
from tools import jinja_template_loading, get_first_blank_df_row, row_map
from typing import Tuple, List
from backend.db.models import BasicSubmission
from tools import jinja_template_loading, get_first_blank_df_row, row_map, flatten_list
from PyQt6.QtWidgets import QWidget
from openpyxl.worksheet.worksheet import Worksheet
@@ -198,14 +195,15 @@ class TurnaroundMaker(ReportArchetype):
class ConcentrationMaker(ReportArchetype):
def __init__(self, start_date: date, end_date: date, submission_type: str = "Bacterial Culture",
controls_only: bool = True):
# controls_only: bool = True):
include: List[str] = []):
self.start_date = start_date
self.end_date = end_date
# NOTE: Set page size to zero to override limiting query size.
self.subs = BasicSubmission.query(start_date=start_date, end_date=end_date,
submission_type_name=submission_type, page_size=0)
# self.known_controls = list(itertools.chain.from_iterable([sub.controls for sub in self.subs]))
self.samples = list(itertools.chain.from_iterable([sub.get_provisional_controls(controls_only=controls_only) for sub in self.subs]))
# self.samples = flatten_list([sub.get_provisional_controls(controls_only=controls_only) for sub in self.subs])
self.samples = flatten_list([sub.get_provisional_controls(include=include) for sub in self.subs])
self.records = [self.build_record(sample) for sample in self.samples]
self.df = DataFrame.from_records(self.records)
self.sheet_name = "Concentration"

View File

@@ -176,7 +176,7 @@ class InfoWriter(object):
for loc in locations:
sheet = self.xl[loc['sheet']]
try:
logger.debug(f"Writing {v['value']} to row {loc['row']} and column {loc['column']}")
# logger.debug(f"Writing {v['value']} to row {loc['row']} and column {loc['column']}")
sheet.cell(row=loc['row'], column=loc['column'], value=v['value'])
except AttributeError as e:
logger.error(f"Can't write {k} to that cell due to AttributeError: {e}")

View File

@@ -80,26 +80,25 @@ class RSLNamer(object):
submission_type = cls.retrieve_submission_type(filename=filepath.stem.__str__())
return submission_type
def st_from_str(filename: str) -> str:
if filename.startswith("tmp"):
def st_from_str(file_name: str) -> str:
if file_name.startswith("tmp"):
return "Bacterial Culture"
regex = BasicSubmission.regex
m = regex.search(filename)
m = regex.search(file_name)
try:
submission_type = m.lastgroup
sub_type = m.lastgroup
except AttributeError as e:
submission_type = None
sub_type = None
logger.critical(f"No submission type found or submission type found!: {e}")
return submission_type
return sub_type
match filename:
case Path():
submission_type = st_from_path(filepath=filename)
case str():
submission_type = st_from_str(filename=filename)
submission_type = st_from_str(file_name=filename)
case _:
raise TypeError(f"Unsupported filename type: {type(filename)}.")
submission_type = None
try:
check = submission_type is None
except UnboundLocalError:
@@ -137,7 +136,7 @@ class RSLNamer(object):
if m is not None:
try:
parsed_name = m.group().upper().strip(".")
except:
except AttributeError:
parsed_name = None
else:
parsed_name = None

View File

@@ -1,3 +1,7 @@
"""
Collection of pydantic objects to be used in the Gui system.
"""
from __future__ import annotations
import logging
from pydantic import BaseModel, field_validator, Field
@@ -10,6 +14,7 @@ logger = logging.getLogger(f"submissions.{__name__}")
class BaseOmni(BaseModel):
instance_object: Any | None = Field(default=None)
def __repr__(self):
@@ -23,23 +28,23 @@ class BaseOmni(BaseModel):
return cls.class_object.aliases
def check_all_attributes(self, attributes: dict) -> bool:
logger.debug(f"Incoming attributes: {attributes}")
# logger.debug(f"Incoming attributes: {attributes}")
attributes = {k : v for k, v in attributes.items() if k in self.list_searchables.keys()}
for key, value in attributes.items():
try:
logger.debug(f"Check if {value.__class__} is subclass of {BaseOmni}")
# logger.debug(f"Check if {value.__class__} is subclass of {BaseOmni}")
check = issubclass(value.__class__, BaseOmni)
except TypeError as e:
logger.error(f"Couldn't check if {value.__class__} is subclass of {BaseOmni} due to {e}")
check = False
if check:
logger.debug(f"Checking for subclass name.")
# logger.debug(f"Checking for subclass name.")
value = value.name
self_value = self.list_searchables[key]
if value != self_value:
logger.debug(f"Value {key} is False, these are not the same object.")
# logger.debug(f"Value {key} is False, these are not the same object.")
return False
logger.debug("Everything checks out, these are the same object.")
# logger.debug("Everything checks out, these are the same object.")
return True
def __setattr__(self, key, value):
@@ -51,24 +56,24 @@ class BaseOmni(BaseModel):
new_key = class_value.impl.key
except AttributeError:
new_key = None
logger.debug(f"Class value before new key: {class_value.property}")
# logger.debug(f"Class value before new key: {class_value.property}")
if new_key and new_key != key:
class_value = getattr(self.class_object, new_key)
logger.debug(f"Class value after new key: {class_value.property}")
# logger.debug(f"Class value after new key: {class_value.property}")
if isinstance(class_value, InstrumentedAttribute):
logger.debug(f"{key} is an InstrumentedAttribute with class_value.property: {class_value.property}.")
# logger.debug(f"{key} is an InstrumentedAttribute with class_value.property: {class_value.property}.")
match class_value.property:
case ColumnProperty():
logger.debug(f"Setting ColumnProperty to {value}")
# logger.debug(f"Setting ColumnProperty to {value}")
return super().__setattr__(key, value)
case _RelationshipDeclared():
logger.debug(f" {self.__class__.__name__} Setting _RelationshipDeclared for {key} to {value}")
# logger.debug(f" {self.__class__.__name__} Setting _RelationshipDeclared for {key} to {value}")
if class_value.property.uselist:
logger.debug(f"Setting {key} with uselist")
# logger.debug(f"Setting {key} with uselist")
existing = self.__getattribute__(key)
if existing is not None:
# NOTE: Getting some really weird duplicates for OmniSubmissionTypeKitTypeAssociation here.
logger.debug(f"Existing: {existing}, incoming: {value}")
# logger.debug(f"Existing: {existing}, incoming: {value}")
if isinstance(value, list):
if value != existing:
value = existing + value
@@ -82,7 +87,7 @@ class BaseOmni(BaseModel):
if issubclass(value.__class__, self.__class__):
value = value.to_sql()
value = [value]
logger.debug(f"Final value for {key}: {value}")
# logger.debug(f"Final value for {key}: {value}")
return super().__setattr__(key, value)
else:
if isinstance(value, list):
@@ -98,6 +103,7 @@ class BaseOmni(BaseModel):
class OmniSubmissionType(BaseOmni):
class_object: ClassVar[Any] = SubmissionType
name: str = Field(default="", description="property")
@@ -161,6 +167,7 @@ class OmniSubmissionType(BaseOmni):
class OmniReagentRole(BaseOmni):
class_object: ClassVar[Any] = ReagentRole
name: str = Field(default="", description="property")
@@ -197,6 +204,7 @@ class OmniReagentRole(BaseOmni):
class OmniSubmissionTypeKitTypeAssociation(BaseOmni):
class_object: ClassVar[Any] = SubmissionTypeKitTypeAssociation
submissiontype: str | OmniSubmissionType = Field(default="", description="relationship", title="SubmissionType")
@@ -262,7 +270,7 @@ class OmniSubmissionTypeKitTypeAssociation(BaseOmni):
)
def to_sql(self):
logger.debug(f"Self kittype: {self.submissiontype}")
# logger.debug(f"Self kittype: {self.submissiontype}")
if issubclass(self.submissiontype.__class__, BaseOmni):
submissiontype = SubmissionType.query(name=self.submissiontype.name)
else:
@@ -272,7 +280,7 @@ class OmniSubmissionTypeKitTypeAssociation(BaseOmni):
else:
kittype = KitType.query(name=self.kittype)
# logger.debug(f"Self kittype: {self.kittype}")
logger.debug(f"Query or create with {kittype}, {submissiontype}")
# logger.debug(f"Query or create with {kittype}, {submissiontype}")
instance, is_new = self.class_object.query_or_create(kittype=kittype, submissiontype=submissiontype)
instance.mutable_cost_column = self.mutable_cost_column
instance.mutable_cost_sample = self.mutable_cost_sample
@@ -293,6 +301,7 @@ class OmniSubmissionTypeKitTypeAssociation(BaseOmni):
class OmniKitTypeReagentRoleAssociation(BaseOmni):
class_object: ClassVar[Any] = KitTypeReagentRoleAssociation
reagent_role: str | OmniReagentRole = Field(default="", description="relationship", title="ReagentRole")
@@ -363,7 +372,7 @@ class OmniKitTypeReagentRoleAssociation(BaseOmni):
kittype=kittype,
submissiontype=submissiontype
)
logger.debug(f"KitTypeReagentRoleAssociation coming out of query_or_create: {instance.__dict__}\nnew: {new}")
# logger.debug(f"KitTypeReagentRoleAssociation coming out of query_or_create: {instance.__dict__}\nnew: {new}")
if new:
logger.warning(f"This is a new instance: {instance.__dict__}")
try:
@@ -371,10 +380,10 @@ class OmniKitTypeReagentRoleAssociation(BaseOmni):
except AttributeError:
reagent_role = ReagentRole.query(name=self.reagent_role)
instance.reagent_role = reagent_role
logger.debug(f"KTRRAssoc uses: {self.uses}")
# logger.debug(f"KTRRAssoc uses: {self.uses}")
instance.uses = self.uses
instance.required = int(self.required)
logger.debug(f"KitTypeReagentRoleAssociation: {pformat(instance.__dict__)}")
# logger.debug(f"KitTypeReagentRoleAssociation: {pformat(instance.__dict__)}")
return instance
@property
@@ -395,6 +404,7 @@ class OmniKitTypeReagentRoleAssociation(BaseOmni):
class OmniEquipmentRole(BaseOmni):
class_object: ClassVar[Any] = EquipmentRole
name: str = Field(default="", description="property")
@@ -421,6 +431,7 @@ class OmniEquipmentRole(BaseOmni):
class OmniTips(BaseOmni):
class_object: ClassVar[Any] = Tips
name: str = Field(default="", description="property")
@@ -447,6 +458,7 @@ class OmniTips(BaseOmni):
class OmniTipRole(BaseOmni):
class_object: ClassVar[Any] = TipRole
name: str = Field(default="", description="property")
@@ -477,6 +489,7 @@ class OmniTipRole(BaseOmni):
class OmniProcess(BaseOmni):
class_object: ClassVar[Any] = Process
# NOTE: How am I going to figure out relatioinships without getting into recursion issues?
@@ -540,6 +553,7 @@ class OmniProcess(BaseOmni):
class OmniKitType(BaseOmni):
class_object: ClassVar[Any] = KitType
name: str = Field(default="", description="property")
@@ -565,17 +579,17 @@ class OmniKitType(BaseOmni):
def to_sql(self) -> KitType:
kit, is_new = KitType.query_or_create(name=self.name)
if is_new:
logger.debug(f"New kit made: {kit}")
else:
logger.debug(f"Kit retrieved: {kit}")
# if is_new:
# logger.debug(f"New kit made: {kit}")
# else:
# logger.debug(f"Kit retrieved: {kit}")
new_rr = []
for rr_assoc in self.kit_reagentrole_associations:
new_assoc = rr_assoc.to_sql()
if new_assoc not in new_rr:
logger.debug(f"Adding {new_assoc} to kit_reagentrole_associations")
# logger.debug(f"Adding {new_assoc} to kit_reagentrole_associations")
new_rr.append(new_assoc)
logger.debug(f"Setting kit_reagentrole_associations to {pformat([item.__dict__ for item in new_rr])}")
# logger.debug(f"Setting kit_reagentrole_associations to {pformat([item.__dict__ for item in new_rr])}")
kit.kit_reagentrole_associations = new_rr
new_st = []
for st_assoc in self.kit_submissiontype_associations:
@@ -589,9 +603,9 @@ class OmniKitType(BaseOmni):
if new_process not in new_processes:
new_processes.append(new_process)
kit.processes = new_processes
logger.debug(f"Kit: {pformat(kit.__dict__)}")
for item in kit.kit_reagentrole_associations:
logger.debug(f"KTRRassoc: {item.__dict__}")
# logger.debug(f"Kit: {pformat(kit.__dict__)}")
# for item in kit.kit_reagentrole_associations:
# logger.debug(f"KTRRassoc: {item.__dict__}")
return kit
@@ -601,11 +615,10 @@ class OmniOrganization(BaseOmni):
name: str = Field(default="", description="property")
cost_centre: str = Field(default="", description="property")
# TODO: add in List[OmniContacts]
contact: List[str] | List[OmniContact] = Field(default=[], description="relationship", title="Contact")
def __init__(self, instance_object: Any, **data):
logger.debug(f"Incoming data: {data}")
# logger.debug(f"Incoming data: {data}")
super().__init__(**data)
self.instance_object = instance_object
@@ -642,8 +655,8 @@ class OmniContact(BaseOmni):
def to_sql(self):
contact, is_new = Contact.query_or_create(name=self.name, email=self.email, phone=self.phone)
if is_new:
logger.debug(f"New contact made: {contact}")
else:
logger.debug(f"Contact retrieved: {contact}")
# if is_new:
# logger.debug(f"New contact made: {contact}")
# else:
# logger.debug(f"Contact retrieved: {contact}")
return contact

View File

@@ -22,6 +22,7 @@ logger = logging.getLogger(f"submissions.{__name__}")
class PydReagent(BaseModel):
lot: str | None
role: str | None
expiry: date | datetime | Literal['NA'] | None = Field(default=None, validate_default=True)
@@ -131,7 +132,7 @@ class PydReagent(BaseModel):
if self.model_extra is not None:
self.__dict__.update(self.model_extra)
reagent = Reagent.query(lot=self.lot, name=self.name)
logger.debug(f"Reagent: {reagent}")
# logger.debug(f"Reagent: {reagent}")
if reagent is None:
reagent = Reagent()
for key, value in self.__dict__.items():
@@ -151,6 +152,7 @@ class PydReagent(BaseModel):
class PydSample(BaseModel, extra='allow'):
submitter_id: str
sample_type: str
row: int | List[int] | None
@@ -252,6 +254,7 @@ class PydSample(BaseModel, extra='allow'):
class PydTips(BaseModel):
name: str
lot: str | None = Field(default=None)
role: str
@@ -282,6 +285,7 @@ class PydTips(BaseModel):
class PydEquipment(BaseModel, extra='ignore'):
asset_number: str
name: str
nickname: str | None
@@ -376,6 +380,7 @@ class PydEquipment(BaseModel, extra='ignore'):
class PydSubmission(BaseModel, extra='allow'):
filepath: Path
submission_type: dict | None
submitter_plate_num: dict | None = Field(default=dict(value=None, missing=True), validate_default=True)
@@ -948,7 +953,7 @@ class PydSubmission(BaseModel, extra='allow'):
self.extraction_kit['value'] = extraction_kit['value']
ext_kit = KitType.query(name=self.extraction_kit['value'])
ext_kit_rtypes = [item.to_pydantic() for item in
ext_kit.get_reagents(required=True, submission_type=self.submission_type['value'])]
ext_kit.get_reagents(required_only=True, submission_type=self.submission_type['value'])]
# NOTE: Exclude any reagenttype found in this pyd not expected in kit.
expected_check = [item.role for item in ext_kit_rtypes]
output_reagents = [rt for rt in self.reagents if rt.role in expected_check]
@@ -1014,6 +1019,7 @@ class PydSubmission(BaseModel, extra='allow'):
class PydContact(BaseModel):
name: str
phone: str | None
email: str | None
@@ -1061,6 +1067,7 @@ class PydContact(BaseModel):
class PydOrganization(BaseModel):
name: str
cost_centre: str
contacts: List[PydContact] | None
@@ -1101,6 +1108,7 @@ class PydOrganization(BaseModel):
class PydReagentRole(BaseModel):
name: str
eol_ext: timedelta | int | None
uses: dict | None
@@ -1139,6 +1147,7 @@ class PydReagentRole(BaseModel):
class PydKitType(BaseModel):
name: str
reagent_roles: List[PydReagent] = []
@@ -1160,6 +1169,7 @@ class PydKitType(BaseModel):
class PydEquipmentRole(BaseModel):
name: str
equipment: List[PydEquipment]
processes: List[str] | None
@@ -1187,6 +1197,7 @@ class PydEquipmentRole(BaseModel):
class PydPCRControl(BaseModel):
name: str
subtype: str
target: str
@@ -1210,6 +1221,7 @@ class PydPCRControl(BaseModel):
class PydIridaControl(BaseModel, extra='ignore'):
name: str
contains: list | dict #: unstructured hashes in contains.tsv for each organism
matches: list | dict #: unstructured hashes in matches.tsv for each organism
@@ -1244,6 +1256,7 @@ class PydIridaControl(BaseModel, extra='ignore'):
class PydProcess(BaseModel, extra="allow"):
name: str
version: str = Field(default="1")
submission_types: List[str]
@@ -1297,7 +1310,7 @@ class PydElastic(BaseModel, extra="allow", arbitrary_types_allowed=True):
@report_result
def to_sql(self):
print(self.instance)
# print(self.instance)
fields = [item for item in self.model_extra]
for field in fields:
try:
@@ -1307,11 +1320,11 @@ class PydElastic(BaseModel, extra="allow", arbitrary_types_allowed=True):
continue
match field_type:
case _RelationshipDeclared():
logger.debug(f"{field} is a relationship with {field_type.entity.class_}")
# logger.debug(f"{field} is a relationship with {field_type.entity.class_}")
field_value = field_type.entity.class_.argument.query(name=getattr(self, field))
logger.debug(f"{field} query result: {field_value}")
# logger.debug(f"{field} query result: {field_value}")
case ColumnProperty():
logger.debug(f"{field} is a property.")
# logger.debug(f"{field} is a property.")
field_value = getattr(self, field)
self.instance.__setattr__(field, field_value)
return self.instance

View File

@@ -2,11 +2,11 @@
Contains all operations for creating charts, graphs and visual effects.
'''
from datetime import timedelta, date
from pathlib import Path
from typing import Generator
from PyQt6.QtWidgets import QWidget
import plotly, logging
import pandas as pd, logging
from plotly.graph_objects import Figure
import pandas as pd
from tools import divide_chunks
logger = logging.getLogger(f"submissions.{__name__}")
@@ -123,12 +123,16 @@ class CustomFigure(Figure):
Returns:
str: html string
"""
html = '<html><body>'
html = f'<html><body>'
if self is not None:
html += plotly.offline.plot(self, output_type='div', include_plotlyjs='cdn')
# NOTE: Just cannot get this load from string to freaking work.
html += self.to_html(include_plotlyjs='cdn', full_html=False)
# html += plotly.offline.plot(self, output_type='div', include_plotlyjs=True)
else:
html += "<h1>No data was retrieved for the given parameters.</h1>"
html += '</body></html>'
with open("test.html", "w", encoding="utf-8") as f:
f.write(html)
return html

View File

@@ -7,12 +7,14 @@ from backend.excel.reports import ConcentrationMaker
from frontend.visualizations.concentrations_chart import ConcentrationsChart
import logging
logger = logging.getLogger(f"submissions.{__name__}")
class Concentrations(InfoPane):
def __init__(self, parent: QWidget):
from .. import CheckableComboBox
super().__init__(parent)
self.save_button = QPushButton("Save Chart", parent=self)
self.save_button.pressed.connect(self.save_png)
@@ -20,12 +22,14 @@ class Concentrations(InfoPane):
self.export_button = QPushButton("Save Data", parent=self)
self.export_button.pressed.connect(self.save_excel)
self.layout.addWidget(self.export_button, 0, 3, 1, 1)
check_label = QLabel("Controls Only")
self.all_box = QCheckBox()
self.all_box.setChecked(True)
self.all_box.checkStateChanged.connect(self.update_data)
self.layout.addWidget(check_label, 1, 0, 1, 1)
self.layout.addWidget(self.all_box, 1, 1, 1, 1)
self.pos_neg = CheckableComboBox(parent=self)
self.pos_neg.model().itemChanged.connect(self.update_data)
self.pos_neg.setEditable(False)
self.pos_neg.addItem("Positive")
self.pos_neg.addItem("Negative")
self.pos_neg.addItem("Samples", start_checked=False)
self.layout.addWidget(QLabel("Control Types"), 1, 0, 1, 1)
self.layout.addWidget(self.pos_neg, 1, 1, 1, 1)
self.fig = None
self.report_object = None
self.update_data()
@@ -37,10 +41,14 @@ class Concentrations(InfoPane):
Returns:
None
"""
include = self.pos_neg.get_checked()
# logger.debug(f"Include: {include}")
super().update_data()
months = self.diff_month(self.start_date, self.end_date)
# logger.debug(f"Box checked: {self.all_box.isChecked()}")
chart_settings = dict(start_date=self.start_date, end_date=self.end_date, controls_only=self.all_box.isChecked())
# chart_settings = dict(start_date=self.start_date, end_date=self.end_date, controls_only=self.all_box.isChecked())
chart_settings = dict(start_date=self.start_date, end_date=self.end_date,
include=include)
self.report_obj = ConcentrationMaker(**chart_settings)
self.fig = ConcentrationsChart(df=self.report_obj.df, settings=chart_settings, modes=[], months=months)
self.webview.setHtml(self.fig.html)

View File

@@ -46,12 +46,15 @@ class CheckableComboBox(QComboBox):
# once there is a checkState set, it is rendered
# here we assume default Unchecked
def addItem(self, item, header: bool = False):
def addItem(self, item, header: bool = False, start_checked: bool = True):
super(CheckableComboBox, self).addItem(item)
item: QStandardItem = self.model().item(self.count() - 1, 0)
if not header:
item.setFlags(Qt.ItemFlag.ItemIsUserCheckable | Qt.ItemFlag.ItemIsEnabled)
if start_checked:
item.setCheckState(Qt.CheckState.Checked)
else:
item.setCheckState(Qt.CheckState.Unchecked)
def itemChecked(self, index):
item = self.model().item(index, 0)
@@ -60,6 +63,10 @@ class CheckableComboBox(QComboBox):
def changed(self):
self.updated.emit()
def get_checked(self):
checked = [self.itemText(i) for i in range(self.count()) if self.itemChecked(i)]
return checked
class Pagifier(QWidget):

View File

@@ -92,6 +92,7 @@ class SubmissionDetails(QDialog):
Args:
sample (str): Submitter Id of the sample.
"""
logger.debug(f"Sample details.")
if isinstance(sample, str):
sample = BasicSample.query(submitter_id=sample)
base_dict = sample.to_sub_dict(full_data=True)
@@ -102,6 +103,8 @@ class SubmissionDetails(QDialog):
with open(template_path.joinpath("css", "styles.css"), "r") as f:
css = f.read()
html = template.render(sample=base_dict, css=css)
with open(f"{sample.submitter_id}.html", 'w') as f:
f.write(html)
self.webview.setHtml(html)
self.setWindowTitle(f"Sample Details - {sample.submitter_id}")
@@ -114,6 +117,7 @@ class SubmissionDetails(QDialog):
kit (str | KitType): Name of kit.
reagent (str | Reagent): Lot number of the reagent
"""
logger.debug(f"Reagent details.")
if isinstance(reagent, str):
reagent = Reagent.query(lot=reagent)
if isinstance(kit, str):
@@ -164,6 +168,7 @@ class SubmissionDetails(QDialog):
Args:
submission (str | BasicSubmission): Submission of interest.
"""
logger.debug(f"Submission details.")
if isinstance(submission, str):
submission = BasicSubmission.query(rsl_plate_num=submission)
self.rsl_plate_num = submission.rsl_plate_num

View File

@@ -71,11 +71,8 @@
{% endif %}
{% endblock %}
{% block signing_button %}
{% if permission and not sub['signed_by'] %}
<button type="button" id="sign_btn">Sign Off</button>
{% endif %}
<button type="button" id="sign_btn" {% if permission and not sub['signed_by'] %}{% else %}hidden{% endif %}>Sign Off</button>
{% endblock %}
<br>
<br>
<br>
@@ -84,13 +81,11 @@
{% block script %}
{{ super() }}
document.getElementById("sign_btn").addEventListener("click", function(){
backend.sign_off("{{ sub['plate_number'] }}");
});
var sampleSelection = document.getElementsByClassName('sample');
for(let i = 0; i < sampleSelection.length; i++) {
sampleSelection[i].addEventListener("click", function() {
console.log(sampleSelection[i].id);
backend.sample_details(sampleSelection[i].id);
})
}
@@ -99,10 +94,15 @@
for(let i = 0; i < reagentSelection.length; i++) {
reagentSelection[i].addEventListener("click", function() {
console.log(reagentSelection[i].id);
backend.reagent_details(reagentSelection[i].id, "{{ sub['extraction_kit'] }}");
})
}
document.getElementById("sign_btn").addEventListener("click", function(){
backend.sign_off("{{ sub['plate_number'] }}");
});
{% endblock %}
</script>
</html>

View File

@@ -3,6 +3,7 @@ Contains miscellaenous functions used by both frontend and backend.
'''
from __future__ import annotations
import builtins, importlib, time, logging, re, yaml, sys, os, stat, platform, getpass, json, numpy as np, pandas as pd
import itertools
from datetime import date, datetime, timedelta
from json import JSONDecodeError
from threading import Thread
@@ -254,7 +255,7 @@ def timer(func):
value = func(*args, **kwargs)
end_time = time.perf_counter()
run_time = end_time - start_time
logger.debug(f"Finished {func.__name__}() in {run_time:.4f} secs")
print(f"Finished {func.__name__}() in {run_time:.4f} secs")
return value
return wrapper
@@ -896,33 +897,8 @@ def check_dictionary_inclusion_equality(listo: List[dict] | dict, dicto: dict) -
raise TypeError(f"Unsupported variable: {type(listo)}")
def rectify_query_date(input_date, eod: bool = False) -> str:
"""
Converts input into a datetime string for querying purposes
Args:
eod (bool, optional): Whether to use max time to indicate end of day.
input_date ():
Returns:
datetime: properly formated datetime
"""
match input_date:
case datetime():
output_date = input_date.strftime("%Y-%m-%d %H:%M:%S")
case date():
if eod:
addition_time = datetime.max.time()
else:
addition_time = datetime.min.time()
output_date = datetime.combine(input_date, addition_time)
output_date = output_date.strftime("%Y-%m-%d %H:%M:%S")
case int():
output_date = datetime.fromordinal(
datetime(1900, 1, 1).toordinal() + input_date - 2).date().strftime("%Y-%m-%d %H:%M:%S")
case _:
output_date = parse(input_date).strftime("%Y-%m-%d %H:%M:%S")
return output_date
def flatten_list(input_list: list):
return list(itertools.chain.from_iterable(input_list))
class classproperty(property):