From cddb947ec80a61082d8608a288a6d660449f2a3e Mon Sep 17 00:00:00 2001 From: Landon Wark Date: Tue, 5 Dec 2023 10:20:46 -0600 Subject: [PATCH] Pre-sample/control connect --- CHANGELOG.md | 10 + TODO.md | 11 +- alembic.ini | 2 +- ...ca468_add_templates_to_submission_types.py | 32 + src/submissions/__init__.py | 2 +- src/submissions/__main__.py | 7 +- src/submissions/backend/db/models/__init__.py | 44 +- src/submissions/backend/db/models/controls.py | 62 +- src/submissions/backend/db/models/kits.py | 394 +++++----- .../backend/db/models/organizations.py | 62 +- .../backend/db/models/submissions.py | 730 ++++++++++++------ src/submissions/backend/excel/parser.py | 100 +-- src/submissions/backend/excel/reports.py | 25 +- .../backend/validators/__init__.py | 13 +- src/submissions/backend/validators/pydant.py | 226 ++++-- .../frontend/visualizations/barcode.py | 11 + .../frontend/visualizations/control_charts.py | 141 ++-- .../frontend/visualizations/plate_map.py | 22 +- src/submissions/frontend/widgets/app.py | 171 +--- .../frontend/widgets/controls_chart.py | 21 +- src/submissions/frontend/widgets/functions.py | 4 - .../frontend/widgets/kit_creator.py | 11 +- src/submissions/frontend/widgets/misc.py | 53 +- src/submissions/frontend/widgets/pop_ups.py | 1 - .../frontend/widgets/submission_table.py | 173 +++-- .../frontend/widgets/submission_widget.py | 16 +- .../templates/submission_details.html | 2 +- src/submissions/templates/summary_report.html | 4 +- src/submissions/tools.py | 49 +- 29 files changed, 1357 insertions(+), 1042 deletions(-) create mode 100644 alembic/versions/7e7b6eeca468_add_templates_to_submission_types.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 4235d68..f2ce4be 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +## 202312.01 + +- Backups will now create an regenerated xlsx file. +- Report generator now does sums automatically. + +## 202311.04 + +- Added xlsx template files to the database. +- Switched session hand-off to sqlalchemy to abstract parent class. + ## 202311.03 - Added in tabular log parser. diff --git a/TODO.md b/TODO.md index 6bd2480..47da8d1 100644 --- a/TODO.md +++ b/TODO.md @@ -1,5 +1,9 @@ -- [ ] Buuuuuuhh. Split polymorphic objects into different tables... and rebuild DB.... FFFFF - - https://stackoverflow.com/questions/16910782/sqlalchemy-nested-inheritance-polymorphic-relationships +- [x] Clean up DB objects after failed test fix. +- [x] Fix tests. +- [ ] Fix pydant.PydSample.handle_duplicate_samples? +- [ ] See if the number of queries in BasicSubmission functions (and others) can be trimmed down. +- [x] Document code + - Done Submissions up to BasicSample - [x] Create a result object to facilitate returning function results. - [x] Refactor main_window_functions into as many objects (forms, etc.) as possible to clean it up. - [x] Integrate 'Construct First Strand' into the Artic import. @@ -10,7 +14,6 @@ - [x] Move lookup functions into class methods of db objects? - Not sure if will work for associations. - [x] Update artic submission type database entry to add more technicians. -- [ ] Document code - [x] Rewrite tests... again. - [x] Have InfoItem change status self.missing to True if value changed. - [x] Make the kit verifier make more sense. @@ -26,7 +29,7 @@ - [x] Drag and drop files into submission form area? - [ ] Get info for controls into their sample hitpicks. - [x] Move submission-type specific parser functions into class methods in their respective models. -- [ ] Improve function results reporting. +- [x] Improve function results reporting. - Maybe make it a list until it gets to the reporter? - [x] Increase robustness of form parsers by adding custom procedures for each. - [x] Rerun Kit integrity if extraction kit changed in the form. diff --git a/alembic.ini b/alembic.ini index 309811a..edfb698 100644 --- a/alembic.ini +++ b/alembic.ini @@ -56,7 +56,7 @@ version_path_separator = os # Use os.pathsep. Default configuration used for ne # output_encoding = utf-8 ; sqlalchemy.url = sqlite:///L:\Robotics Laboratory Support\Submissions\submissions.db -sqlalchemy.url = sqlite:///C:\Users\lwark\Documents\Archives\Submissions_app_backups\DB_backups\submissions-new.db +; sqlalchemy.url = sqlite:///C:\Users\lwark\Documents\Archives\Submissions_app_backups\DB_backups\submissions-new.db ; sqlalchemy.url = sqlite:///C:\Users\lwark\Documents\python\submissions\tests\test_assets\submissions-test.db diff --git a/alembic/versions/7e7b6eeca468_add_templates_to_submission_types.py b/alembic/versions/7e7b6eeca468_add_templates_to_submission_types.py new file mode 100644 index 0000000..88feb4f --- /dev/null +++ b/alembic/versions/7e7b6eeca468_add_templates_to_submission_types.py @@ -0,0 +1,32 @@ +"""add templates to submission types + +Revision ID: 7e7b6eeca468 +Revises: +Create Date: 2023-11-23 08:07:51.103392 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '7e7b6eeca468' +down_revision = None +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('_submission_types', schema=None) as batch_op: + batch_op.add_column(sa.Column('template_file', sa.BLOB(), nullable=True)) + + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('_submission_types', schema=None) as batch_op: + batch_op.drop_column('template_file') + + # ### end Alembic commands ### diff --git a/src/submissions/__init__.py b/src/submissions/__init__.py index 78398b8..6f9e001 100644 --- a/src/submissions/__init__.py +++ b/src/submissions/__init__.py @@ -4,7 +4,7 @@ from pathlib import Path # Version of the realpython-reader package __project__ = "submissions" -__version__ = "202311.3b" +__version__ = "202312.1b" __author__ = {"name":"Landon Wark", "email":"Landon.Wark@phac-aspc.gc.ca"} __copyright__ = "2022-2023, Government of Canada" diff --git a/src/submissions/__main__.py b/src/submissions/__main__.py index 0e40426..5e9f5b8 100644 --- a/src/submissions/__main__.py +++ b/src/submissions/__main__.py @@ -1,17 +1,14 @@ import sys import os # environment variable must be set to enable qtwebengine in network path -# if getattr(sys, 'frozen', False): -# os.environ['QTWEBENGINE_DISABLE_SANDBOX'] = "1" -from tools import get_config, setup_logger, check_if_app +from tools import ctx, setup_logger, check_if_app if check_if_app(): os.environ['QTWEBENGINE_DISABLE_SANDBOX'] = "1" # setup custom logger logger = setup_logger(verbosity=3) # create settings object -ctx = get_config(None) + from PyQt6.QtWidgets import QApplication -# from frontend import App from frontend.widgets.app import App if __name__ == '__main__': diff --git a/src/submissions/backend/db/models/__init__.py b/src/submissions/backend/db/models/__init__.py index c13e75f..40a9935 100644 --- a/src/submissions/backend/db/models/__init__.py +++ b/src/submissions/backend/db/models/__init__.py @@ -1,8 +1,50 @@ ''' Contains all models for sqlalchemy ''' +import sys +from sqlalchemy.orm import DeclarativeMeta, declarative_base +from sqlalchemy.ext.declarative import declared_attr +if 'pytest' in sys.modules: + from pathlib import Path + sys.path.append(Path(__file__).parents[4].absolute().joinpath("tests").__str__()) + +Base: DeclarativeMeta = declarative_base() + +class BaseClass(Base): + """ + Abstract class to pass ctx values to all SQLAlchemy objects. + + Args: + Base (DeclarativeMeta): Declarative base for metadata. + """ + __abstract__ = True + + __table_args__ = {'extend_existing': True} + + @declared_attr + def __database_session__(cls): + if not 'pytest' in sys.modules: + from tools import ctx + else: + from test_settings import ctx + return ctx.database_session + + @declared_attr + def __directory_path__(cls): + if not 'pytest' in sys.modules: + from tools import ctx + else: + from test_settings import ctx + return ctx.directory_path + + @declared_attr + def __backup_path__(cls): + if not 'pytest' in sys.modules: + from tools import ctx + else: + from test_settings import ctx + return ctx.backup_path -from tools import Base from .controls import * # import order must go: orgs, kit, subs due to circular import issues from .organizations import * diff --git a/src/submissions/backend/db/models/controls.py b/src/submissions/backend/db/models/controls.py index 8f29fce..4754952 100644 --- a/src/submissions/backend/db/models/controls.py +++ b/src/submissions/backend/db/models/controls.py @@ -7,7 +7,7 @@ from sqlalchemy.orm import relationship, Query import logging from operator import itemgetter import json -from . import Base +from . import BaseClass from tools import setup_lookup, query_return from datetime import date, datetime from typing import List @@ -15,12 +15,11 @@ from dateutil.parser import parse logger = logging.getLogger(f"submissions.{__name__}") -class ControlType(Base): +class ControlType(BaseClass): """ Base class of a control archetype. """ __tablename__ = '_control_types' - __table_args__ = {'extend_existing': True} id = Column(INTEGER, primary_key=True) #: primary key name = Column(String(255), unique=True) #: controltype name (e.g. MCS) @@ -29,7 +28,7 @@ class ControlType(Base): @classmethod @setup_lookup - def query(cls, + def query(cls, name:str=None, limit:int=0 ) -> ControlType|List[ControlType]: @@ -37,14 +36,13 @@ class ControlType(Base): Lookup control archetypes in the database Args: - ctx (Settings): Settings object passed down from gui. name (str, optional): Control type name (limits results to 1). Defaults to None. limit (int, optional): Maximum number of results to return. Defaults to 0. Returns: models.ControlType|List[models.ControlType]: ControlType(s) of interest. """ - query = cls.metadata.session.query(cls) + query = cls.__database_session__.query(cls) match name: case str(): query = query.filter(cls.name==name) @@ -52,14 +50,13 @@ class ControlType(Base): case _: pass return query_return(query=query, limit=limit) - -class Control(Base): + +class Control(BaseClass): """ Base class of a control sample. """ __tablename__ = '_control_samples' - __table_args__ = {'extend_existing': True} id = Column(INTEGER, primary_key=True) #: primary key parent_id = Column(String, ForeignKey("_control_types.id", name="fk_control_parent_id")) #: primary key of control type @@ -114,10 +111,9 @@ class Control(Base): def convert_by_mode(self, mode:str) -> list[dict]: """ - split control object into analysis types for controls graphs + split this instance into analysis types for controls graphs Args: - control (models.Control): control to be parsed into list mode (str): analysis type, 'contains', etc Returns: @@ -168,6 +164,21 @@ class Control(Base): data = {} return data + @classmethod + def get_modes(cls) -> List[str]: + """ + Get all control modes from database + + Returns: + List[str]: List of control mode names. + """ + try: + cols = [item.name for item in list(cls.__table__.columns) if isinstance(item.type, JSON)] + except AttributeError as e: + logger.error(f"Failed to get available modes from db: {e}") + cols = [] + return cols + @classmethod @setup_lookup def query(cls, @@ -190,15 +201,14 @@ class Control(Base): Returns: models.Control|List[models.Control]: Control object of interest. """ - query: Query = cls.metadata.session.query(cls) + query: Query = cls.__database_session__.query(cls) # by control type match control_type: case ControlType(): - logger.debug(f"Looking up control by control type: {control_type}") - # query = query.join(models.ControlType).filter(models.ControlType==control_type) + # logger.debug(f"Looking up control by control type: {control_type}") query = query.filter(cls.controltype==control_type) case str(): - logger.debug(f"Looking up control by control type: {control_type}") + # logger.debug(f"Looking up control by control type: {control_type}") query = query.join(ControlType).filter(ControlType.name==control_type) case _: pass @@ -224,7 +234,7 @@ class Control(Base): end_date = datetime.fromordinal(datetime(1900, 1, 1).toordinal() + end_date - 2).date().strftime("%Y-%m-%d") case _: end_date = parse(end_date).strftime("%Y-%m-%d") - logger.debug(f"Looking up BasicSubmissions from start date: {start_date} and end date: {end_date}") + # logger.debug(f"Looking up BasicSubmissions from start date: {start_date} and end date: {end_date}") query = query.filter(cls.submitted_date.between(start_date, end_date)) match control_name: case str(): @@ -233,23 +243,3 @@ class Control(Base): case _: pass return query_return(query=query, limit=limit) - - @classmethod - def get_modes(cls): - """ - Get all control modes from database - - Args: - ctx (Settings): Settings object passed down from gui. - - Returns: - List[str]: List of control mode names. - """ - rel = cls.metadata.session.query(cls).first() - try: - cols = [item.name for item in list(rel.__table__.columns) if isinstance(item.type, JSON)] - except AttributeError as e: - logger.debug(f"Failed to get available modes from db: {e}") - cols = [] - return cols - diff --git a/src/submissions/backend/db/models/kits.py b/src/submissions/backend/db/models/kits.py index 6f0b572..6cb0b75 100644 --- a/src/submissions/backend/db/models/kits.py +++ b/src/submissions/backend/db/models/kits.py @@ -2,14 +2,15 @@ All kit and reagent related models ''' from __future__ import annotations -from sqlalchemy import Column, String, TIMESTAMP, JSON, INTEGER, ForeignKey, Interval, Table, FLOAT, func, BLOB +from sqlalchemy import Column, String, TIMESTAMP, JSON, INTEGER, ForeignKey, Interval, Table, FLOAT, BLOB from sqlalchemy.orm import relationship, validates, Query from sqlalchemy.ext.associationproxy import association_proxy from datetime import date import logging -from tools import check_authorization, setup_lookup, query_return, Report, Result +from tools import check_authorization, setup_lookup, query_return, Report, Result, Settings from typing import List -from . import Base, Organization +from pandas import ExcelFile +from . import Base, BaseClass, Organization logger = logging.getLogger(f'submissions.{__name__}') @@ -21,12 +22,12 @@ reagenttypes_reagents = Table( extend_existing = True ) -class KitType(Base): +class KitType(BaseClass): """ Base of kits used in submission processing """ __tablename__ = "_kits" - __table_args__ = {'extend_existing': True} + # __table_args__ = {'extend_existing': True} id = Column(INTEGER, primary_key=True) #: primary key name = Column(String(64), unique=True) #: name of kit @@ -54,16 +55,7 @@ class KitType(Base): def __repr__(self) -> str: return f"" - def __str__(self) -> str: - """ - a string representing this object - - Returns: - str: a string representing this object's name - """ - return self.name - - def get_reagents(self, required:bool=False, submission_type:str|None=None) -> list: + def get_reagents(self, required:bool=False, submission_type:str|SubmissionType|None=None) -> list: """ Return ReagentTypes linked to kit through KitTypeReagentTypeAssociation. @@ -74,10 +66,13 @@ class KitType(Base): Returns: list: List of reagent types """ - if submission_type != None: - relevant_associations = [item for item in self.kit_reagenttype_associations if submission_type in item.uses.keys()] - else: - relevant_associations = [item for item in self.kit_reagenttype_associations] + match submission_type: + case SubmissionType(): + relevant_associations = [item for item in self.kit_reagenttype_associations if submission_type.name in item.uses.keys()] + case str(): + relevant_associations = [item for item in self.kit_reagenttype_associations if submission_type in item.uses.keys()] + case _: + relevant_associations = [item for item in self.kit_reagenttype_associations] if required: return [item.reagent_type for item in relevant_associations if item.required == 1] else: @@ -109,14 +104,9 @@ class KitType(Base): map['info'] = {} return map - @check_authorization - def save(self): - self.metadata.session.add(self) - self.metadata.session.commit() - @classmethod @setup_lookup - def query(cls, + def query(cls, name:str=None, used_for:str|SubmissionType|None=None, id:int|None=None, @@ -126,7 +116,6 @@ class KitType(Base): Lookup a list of or single KitType. Args: - ctx (Settings): Settings object passed down from gui name (str, optional): Name of desired kit (returns single instance). Defaults to None. used_for (str | models.Submissiontype | None, optional): Submission type the kit is used for. Defaults to None. id (int | None, optional): Kit id in the database. Defaults to None. @@ -135,10 +124,10 @@ class KitType(Base): Returns: models.KitType|List[models.KitType]: KitType(s) of interest. """ - query: Query = cls.metadata.session.query(cls) + query: Query = cls.__database_session__.query(cls) match used_for: case str(): - logger.debug(f"Looking up kit type by use: {used_for}") + # logger.debug(f"Looking up kit type by use: {used_for}") query = query.filter(cls.used_for.any(name=used_for)) case SubmissionType(): query = query.filter(cls.used_for.contains(used_for)) @@ -146,30 +135,37 @@ class KitType(Base): pass match name: case str(): - logger.debug(f"Looking up kit type by name: {name}") + # logger.debug(f"Looking up kit type by name: {name}") query = query.filter(cls.name==name) limit = 1 case _: pass match id: case int(): - logger.debug(f"Looking up kit type by id: {id}") + # logger.debug(f"Looking up kit type by id: {id}") query = query.filter(cls.id==id) limit = 1 case str(): - logger.debug(f"Looking up kit type by id: {id}") + # logger.debug(f"Looking up kit type by id: {id}") query = query.filter(cls.id==int(id)) limit = 1 case _: pass return query_return(query=query, limit=limit) + + @check_authorization + def save(self, ctx:Settings): + """ + Add this instance to database and commit + """ + self.__database_session__.add(self) + self.__database_session__.commit() -class ReagentType(Base): +class ReagentType(BaseClass): """ Base of reagent type abstract """ __tablename__ = "_reagent_types" - __table_args__ = {'extend_existing': True} id = Column(INTEGER, primary_key=True) #: primary key name = Column(String(64)) #: name of reagent type @@ -187,21 +183,21 @@ class ReagentType(Base): # creator function: https://stackoverflow.com/questions/11091491/keyerror-when-adding-objects-to-sqlalchemy-association-object/11116291#11116291 kit_types = association_proxy("reagenttype_kit_associations", "kit_type", creator=lambda kit: KitTypeReagentTypeAssociation(kit_type=kit)) - def __str__(self) -> str: - """ - string representing this object + # def __str__(self) -> str: + # """ + # string representing this object - Returns: - str: string representing this object's name - """ - return self.name + # Returns: + # str: string representing this object's name + # """ + # return self.name def __repr__(self): - return f"ReagentType({self.name})" + return f"" @classmethod @setup_lookup - def query(cls, + def query(cls, name: str|None=None, kit_type: KitType|str|None=None, reagent: Reagent|str|None=None, @@ -211,14 +207,18 @@ class ReagentType(Base): Lookup reagent types in the database. Args: - ctx (Settings): Settings object passed down from gui. name (str | None, optional): Reagent type name. Defaults to None. + kit_type (KitType | str | None, optional): Kit the type of interest belongs to. Defaults to None. + reagent (Reagent | str | None, optional): Concrete instance of the type of interest. Defaults to None. limit (int, optional): maxmimum number of results to return (0 = all). Defaults to 0. + Raises: + ValueError: Raised if only kit_type or reagent, not both, given. + Returns: - models.ReagentType|List[models.ReagentType]: ReagentType or list of ReagentTypes matching filter. - """ - query: Query = cls.metadata.session.query(cls) + ReagentType|List[ReagentType]: ReagentType or list of ReagentTypes matching filter. + """ + query: Query = cls.__database_session__.query(cls) if (kit_type != None and reagent == None) or (reagent != None and kit_type == None): raise ValueError("Cannot filter without both reagent and kit type.") elif kit_type == None and reagent == None: @@ -235,9 +235,8 @@ class ReagentType(Base): case _: pass assert reagent.type != [] - logger.debug(f"Looking up reagent type for {type(kit_type)} {kit_type} and {type(reagent)} {reagent}") - logger.debug(f"Kit reagent types: {kit_type.reagent_types}") - # logger.debug(f"Reagent reagent types: {reagent._sa_instance_state}") + # logger.debug(f"Looking up reagent type for {type(kit_type)} {kit_type} and {type(reagent)} {reagent}") + # logger.debug(f"Kit reagent types: {kit_type.reagent_types}") result = list(set(kit_type.reagent_types).intersection(reagent.type)) logger.debug(f"Result: {result}") try: @@ -246,34 +245,33 @@ class ReagentType(Base): return None match name: case str(): - logger.debug(f"Looking up reagent type by name: {name}") + # logger.debug(f"Looking up reagent type by name: {name}") query = query.filter(cls.name==name) limit = 1 case _: pass return query_return(query=query, limit=limit) - -class KitTypeReagentTypeAssociation(Base): + +class KitTypeReagentTypeAssociation(BaseClass): """ table containing reagenttype/kittype associations DOC: https://docs.sqlalchemy.org/en/14/orm/extensions/associationproxy.html """ __tablename__ = "_reagenttypes_kittypes" - __table_args__ = {'extend_existing': True} - reagent_types_id = Column(INTEGER, ForeignKey("_reagent_types.id"), primary_key=True) - kits_id = Column(INTEGER, ForeignKey("_kits.id"), primary_key=True) - uses = Column(JSON) - required = Column(INTEGER) + reagent_types_id = Column(INTEGER, ForeignKey("_reagent_types.id"), primary_key=True) #: id of associated reagent type + kits_id = Column(INTEGER, ForeignKey("_kits.id"), primary_key=True) #: id of associated reagent type + uses = Column(JSON) #: map to location on excel sheets of different submission types + required = Column(INTEGER) #: whether the reagent type is required for the kit (Boolean 1 or 0) last_used = Column(String(32)) #: last used lot number of this type of reagent - kit_type = relationship(KitType, back_populates="kit_reagenttype_associations") + kit_type = relationship(KitType, back_populates="kit_reagenttype_associations") #: relationship to associated kit # reference to the "ReagentType" object - reagent_type = relationship(ReagentType, back_populates="reagenttype_kit_associations") + reagent_type = relationship(ReagentType, back_populates="reagenttype_kit_associations") #: relationship to associated reagent type def __init__(self, kit_type=None, reagent_type=None, uses=None, required=1): - logger.debug(f"Parameters: Kit={kit_type}, RT={reagent_type}, Uses={uses}, Required={required}") + # logger.debug(f"Parameters: Kit={kit_type}, RT={reagent_type}, Uses={uses}, Required={required}") self.kit_type = kit_type self.reagent_type = reagent_type self.uses = uses @@ -284,12 +282,38 @@ class KitTypeReagentTypeAssociation(Base): @validates('required') def validate_age(self, key, value): + """ + Ensures only 1 & 0 used in 'required' + + Args: + key (str): name of attribute + value (_type_): value of attribute + + Raises: + ValueError: Raised if bad value given + + Returns: + _type_: value + """ if not 0 <= value < 2: raise ValueError(f'Invalid required value {value}. Must be 0 or 1.') return value @validates('reagenttype') def validate_reagenttype(self, key, value): + """ + Ensures reagenttype is an actual ReagentType + + Args: + key (str)): name of attribute + value (_type_): value of attribute + + Raises: + ValueError: raised if reagenttype is not a ReagentType + + Returns: + _type_: ReagentType + """ if not isinstance(value, ReagentType): raise ValueError(f'{value} is not a reagenttype') return value @@ -297,15 +321,14 @@ class KitTypeReagentTypeAssociation(Base): @classmethod @setup_lookup def query(cls, - kit_type:KitType|str|None, - reagent_type:ReagentType|str|None, + kit_type:KitType|str|None=None, + reagent_type:ReagentType|str|None=None, limit:int=0 ) -> KitTypeReagentTypeAssociation|List[KitTypeReagentTypeAssociation]: """ Lookup junction of ReagentType and KitType Args: - ctx (Settings): Settings object passed down from gui. kit_type (models.KitType | str | None): KitType of interest. reagent_type (models.ReagentType | str | None): ReagentType of interest. limit (int, optional): Maximum number of results to return (0 = all). Defaults to 0. @@ -313,7 +336,7 @@ class KitTypeReagentTypeAssociation(Base): Returns: models.KitTypeReagentTypeAssociation|List[models.KitTypeReagentTypeAssociation]: Junction of interest. """ - query: Query = cls.metadata.session.query(cls) + query: Query = cls.__database_session__.query(cls) match kit_type: case KitType(): query = query.filter(cls.kit_type==kit_type) @@ -333,17 +356,22 @@ class KitTypeReagentTypeAssociation(Base): return query_return(query=query, limit=limit) def save(self) -> Report: + """ + Adds this instance to the database and commits. + + Returns: + Report: Result of save action + """ report = Report() - self.metadata.session.add(self) - self.metadata.session.commit() + self.__database_session__.add(self) + self.__database_session__.commit() return report -class Reagent(Base): +class Reagent(BaseClass): """ Concrete reagent instance """ __tablename__ = "_reagents" - __table_args__ = {'extend_existing': True} id = Column(INTEGER, primary_key=True) #: primary key type = relationship("ReagentType", back_populates="instances", secondary=reagenttypes_reagents) #: joined parent reagent type @@ -358,16 +386,7 @@ class Reagent(Base): return f"" else: return f"" - - def __str__(self) -> str: - """ - string representing this object - - Returns: - str: string representing this object's type and lot number - """ - return str(self.lot) - + def to_sub_dict(self, extraction_kit:KitType=None) -> dict: """ dictionary containing values necessary for gui @@ -376,7 +395,7 @@ class Reagent(Base): extraction_kit (KitType, optional): KitType to use to get reagent type. Defaults to None. Returns: - dict: _description_ + dict: representation of the reagent's attributes """ if extraction_kit != None: # Get the intersection of this reagent's ReagentType and all ReagentTypes in KitType @@ -388,73 +407,59 @@ class Reagent(Base): else: reagent_role = self.type[0] try: - rtype = reagent_role.name.replace("_", " ").title() + rtype = reagent_role.name.replace("_", " ") except AttributeError: rtype = "Unknown" # Calculate expiry with EOL from ReagentType try: place_holder = self.expiry + reagent_role.eol_ext - except TypeError as e: + except (TypeError, AttributeError) as e: place_holder = date.today() logger.debug(f"We got a type error setting {self.lot} expiry: {e}. setting to today for testing") - except AttributeError as e: - place_holder = date.today() - logger.debug(f"We got an attribute error setting {self.lot} expiry: {e}. Setting to today for testing") - return { - "type": rtype, - "lot": self.lot, - "expiry": place_holder.strftime("%Y-%m-%d") - } + return dict( + name=self.name, + type=rtype, + lot=self.lot, + expiry=place_holder.strftime("%Y-%m-%d") + ) - def to_reagent_dict(self, extraction_kit:KitType|str=None) -> dict: + def update_last_used(self, kit:KitType) -> Report: """ - Returns basic reagent dictionary. + Updates last used reagent lot for ReagentType/KitType Args: - extraction_kit (KitType, optional): KitType to use to get reagent type. Defaults to None. + kit (KitType): Kit this instance is used in. Returns: - dict: Basic reagent dictionary of 'type', 'lot', 'expiry' + Report: Result of operation """ - if extraction_kit != None: - # Get the intersection of this reagent's ReagentType and all ReagentTypes in KitType - try: - reagent_role = list(set(self.type).intersection(extraction_kit.reagent_types))[0] - # Most will be able to fall back to first ReagentType in itself because most will only have 1. - except: - reagent_role = self.type[0] - else: - reagent_role = self.type[0] - try: - rtype = reagent_role.name - except AttributeError: - rtype = "Unknown" - try: - expiry = self.expiry.strftime("%Y-%m-%d") - except: - expiry = date.today() - return { - "name":self.name, - "type": rtype, - "lot": self.lot, - "expiry": self.expiry.strftime("%Y-%m-%d") - } - - def save(self): - self.metadata.session.add(self) - self.metadata.session.commit() - + report = Report() + logger.debug(f"Attempting update of reagent type at intersection of ({self}), ({kit})") + rt = ReagentType.query(kit_type=kit, reagent=self, limit=1) + if rt != None: + logger.debug(f"got reagenttype {rt}") + assoc = KitTypeReagentTypeAssociation.query(kit_type=kit, reagent_type=rt) + if assoc != None: + if assoc.last_used != self.lot: + logger.debug(f"Updating {assoc} last used to {self.lot}") + assoc.last_used = self.lot + result = assoc.save() + report.add_result(result) + return report + report.add_result(Result(msg=f"Updating last used {rt} was not performed.", status="Information")) + return report + @classmethod @setup_lookup - def query(cls, reagent_type:str|ReagentType|None=None, - lot_number:str|None=None, - limit:int=0 - ) -> Reagent|List[Reagent]: + def query(cls, + reagent_type:str|ReagentType|None=None, + lot_number:str|None=None, + limit:int=0 + ) -> Reagent|List[Reagent]: """ Lookup a list of reagents from the database. Args: - ctx (Settings): Settings object passed down from gui reagent_type (str | models.ReagentType | None, optional): Reagent type. Defaults to None. lot_number (str | None, optional): Reagent lot number. Defaults to None. limit (int, optional): limit of results returned. Defaults to 0. @@ -462,13 +467,14 @@ class Reagent(Base): Returns: models.Reagent | List[models.Reagent]: reagent or list of reagents matching filter. """ - query: Query = cls.metadata.session.query(cls) + # super().query(session) + query: Query = cls.__database_session__.query(cls) match reagent_type: case str(): - logger.debug(f"Looking up reagents by reagent type: {reagent_type}") - query = query.join(cls.type, aliased=True).filter(ReagentType.name==reagent_type) + # logger.debug(f"Looking up reagents by reagent type: {reagent_type}") + query = query.join(cls.type).filter(ReagentType.name==reagent_type) case ReagentType(): - logger.debug(f"Looking up reagents by reagent type: {reagent_type}") + # logger.debug(f"Looking up reagents by reagent type: {reagent_type}") query = query.filter(cls.type.contains(reagent_type)) case _: pass @@ -482,42 +488,33 @@ class Reagent(Base): pass return query_return(query=query, limit=limit) - def update_last_used(self, kit:KitType): - report = Report() - logger.debug(f"Attempting update of reagent type at intersection of ({self}), ({kit})") - rt = ReagentType.query(kit_type=kit, reagent=self, limit=1) - if rt != None: - logger.debug(f"got reagenttype {rt}") - assoc = KitTypeReagentTypeAssociation.query(kit_type=kit, reagent_type=rt) - if assoc != None: - if assoc.last_used != self.lot: - logger.debug(f"Updating {assoc} last used to {self.lot}") - assoc.last_used = self.lot - result = assoc.save() - return(report.add_result(result)) - return report.add_result(Result(msg=f"Updating last used {rt} was not performed.", status="Information")) - -class Discount(Base): + def save(self): + """ + Add this instance to the database and commit + """ + self.__database_session__.add(self) + self.__database_session__.commit() + +class Discount(BaseClass): """ Relationship table for client labs for certain kits. """ __tablename__ = "_discounts" - __table_args__ = {'extend_existing': True} id = Column(INTEGER, primary_key=True) #: primary key kit = relationship("KitType") #: joined parent reagent type - kit_id = Column(INTEGER, ForeignKey("_kits.id", ondelete='SET NULL', name="fk_kit_type_id")) + kit_id = Column(INTEGER, ForeignKey("_kits.id", ondelete='SET NULL', name="fk_kit_type_id")) #: id of joined kit client = relationship("Organization") #: joined client lab - client_id = Column(INTEGER, ForeignKey("_organizations.id", ondelete='SET NULL', name="fk_org_id")) - name = Column(String(128)) - amount = Column(FLOAT(2)) + client_id = Column(INTEGER, ForeignKey("_organizations.id", ondelete='SET NULL', name="fk_org_id")) #: id of joined client + name = Column(String(128)) #: Short description + amount = Column(FLOAT(2)) #: Dollar amount of discount def __repr__(self) -> str: return f"" @classmethod @setup_lookup - def query(cls, + def query(cls, organization:Organization|str|int|None=None, kit_type:KitType|str|int|None=None, ) -> Discount|List[Discount]: @@ -525,7 +522,6 @@ class Discount(Base): Lookup discount objects (union of kit and organization) Args: - ctx (Settings): Settings object passed down from the gui. organization (models.Organization | str | int): Organization receiving discount. kit_type (models.KitType | str | int): Kit discount received on. @@ -536,60 +532,68 @@ class Discount(Base): Returns: models.Discount|List[models.Discount]: Discount(s) of interest. """ - query: Query = cls.metadata.session.query(cls) + query: Query = cls.__database_session__.query(cls) match organization: case Organization(): - logger.debug(f"Looking up discount with organization: {organization}") + # logger.debug(f"Looking up discount with organization: {organization}") query = query.filter(cls.client==Organization) case str(): - logger.debug(f"Looking up discount with organization: {organization}") + # logger.debug(f"Looking up discount with organization: {organization}") query = query.join(Organization).filter(Organization.name==organization) case int(): - logger.debug(f"Looking up discount with organization id: {organization}") + # logger.debug(f"Looking up discount with organization id: {organization}") query = query.join(Organization).filter(Organization.id==organization) case _: # raise ValueError(f"Invalid value for organization: {organization}") pass match kit_type: case KitType(): - logger.debug(f"Looking up discount with kit type: {kit_type}") + # logger.debug(f"Looking up discount with kit type: {kit_type}") query = query.filter(cls.kit==kit_type) case str(): - logger.debug(f"Looking up discount with kit type: {kit_type}") + # logger.debug(f"Looking up discount with kit type: {kit_type}") query = query.join(KitType).filter(KitType.name==kit_type) case int(): - logger.debug(f"Looking up discount with kit type id: {organization}") + # logger.debug(f"Looking up discount with kit type id: {organization}") query = query.join(KitType).filter(KitType.id==kit_type) case _: # raise ValueError(f"Invalid value for kit type: {kit_type}") pass return query.all() - -class SubmissionType(Base): + +class SubmissionType(BaseClass): """ Abstract of types of submissions. """ __tablename__ = "_submission_types" - __table_args__ = {'extend_existing': True} id = Column(INTEGER, primary_key=True) #: primary key name = Column(String(128), unique=True) #: name of submission type info_map = Column(JSON) #: Where basic information is found in the excel workbook corresponding to this type. - instances = relationship("BasicSubmission", backref="submission_type") + instances = relationship("BasicSubmission", backref="submission_type") #: Concrete instances of this type. # regex = Column(String(512)) - # template_file = Column(BLOB) + template_file = Column(BLOB) #: Blank form for this type stored as binary. submissiontype_kit_associations = relationship( "SubmissionTypeKitTypeAssociation", back_populates="submission_type", cascade="all, delete-orphan", - ) + ) #: Association of kittypes - kit_types = association_proxy("submissiontype_kit_associations", "kit_type") + kit_types = association_proxy("submissiontype_kit_associations", "kit_type") #: Proxy of kittype association def __repr__(self) -> str: return f"" + def get_template_file_sheets(self) -> List[str]: + """ + Gets names of sheet in the stored blank form. + + Returns: + List[str]: List of sheet names + """ + return ExcelFile(self.template_file).sheet_names + @classmethod @setup_lookup def query(cls, @@ -603,15 +607,16 @@ class SubmissionType(Base): Args: ctx (Settings): Settings object passed down from gui name (str | None, optional): Name of submission type. Defaults to None. + key (str | None, optional): A key present in the info-map to lookup. Defaults to None. limit (int, optional): Maximum number of results to return (0 = all). Defaults to 0. Returns: models.SubmissionType|List[models.SubmissionType]: SubmissionType(s) of interest. """ - query: Query = cls.metadata.session.query(cls) + query: Query = cls.__database_session__.query(cls) match name: case str(): - logger.debug(f"Looking up submission type by name: {name}") + # logger.debug(f"Looking up submission type by name: {name}") query = query.filter(cls.name==name) limit = 1 case _: @@ -624,27 +629,28 @@ class SubmissionType(Base): return query_return(query=query, limit=limit) def save(self): - self.metadata.session.add(self) - self.metadata.session.commit() - return None + """ + Adds this instances to the database and commits. + """ + self.__database_session__.add(self) + self.__database_session__.commit() -class SubmissionTypeKitTypeAssociation(Base): +class SubmissionTypeKitTypeAssociation(BaseClass): """ Abstract of relationship between kits and their submission type. """ __tablename__ = "_submissiontypes_kittypes" - __table_args__ = {'extend_existing': True} - submission_types_id = Column(INTEGER, ForeignKey("_submission_types.id"), primary_key=True) - kits_id = Column(INTEGER, ForeignKey("_kits.id"), primary_key=True) + submission_types_id = Column(INTEGER, ForeignKey("_submission_types.id"), primary_key=True) #: id of joined submission type + kits_id = Column(INTEGER, ForeignKey("_kits.id"), primary_key=True) #: id of joined kit mutable_cost_column = Column(FLOAT(2)) #: dollar amount per 96 well plate that can change with number of columns (reagents, tips, etc) mutable_cost_sample = Column(FLOAT(2)) #: dollar amount that can change with number of samples (reagents, tips, etc) constant_cost = Column(FLOAT(2)) #: dollar amount per plate that will remain constant (plates, man hours, etc) - kit_type = relationship(KitType, back_populates="kit_submissiontype_associations") + kit_type = relationship(KitType, back_populates="kit_submissiontype_associations") #: joined kittype # reference to the "SubmissionType" object - submission_type = relationship(SubmissionType, back_populates="submissiontype_kit_associations") + submission_type = relationship(SubmissionType, back_populates="submissiontype_kit_associations") #: joined submission type def __init__(self, kit_type=None, submission_type=None): self.kit_type = kit_type @@ -661,32 +667,42 @@ class SubmissionTypeKitTypeAssociation(Base): @classmethod @setup_lookup - def query(cls, - submission_type:SubmissionType|str|int|None=None, + def query(cls, + submission_type:SubmissionType|str|int|None=None, kit_type:KitType|str|int|None=None, limit:int=0 - ): - query: Query = cls.metadata.session.query(cls) + ) -> SubmissionTypeKitTypeAssociation|List[SubmissionTypeKitTypeAssociation]: + """ + Lookup SubmissionTypeKitTypeAssociations of interest. + + Args: + submission_type (SubmissionType | str | int | None, optional): Identifier of submission type. Defaults to None. + kit_type (KitType | str | int | None, optional): Identifier of kit type. Defaults to None. + limit (int, optional): Maximum number of results to return (0 = all). Defaults to 0. + + Returns: + SubmissionTypeKitTypeAssociation|List[SubmissionTypeKitTypeAssociation]: SubmissionTypeKitTypeAssociation(s) of interest + """ + query: Query = cls.__database_session__.query(cls) match submission_type: case SubmissionType(): - logger.debug(f"Looking up {cls.__name__} by SubmissionType {submission_type}") + # logger.debug(f"Looking up {cls.__name__} by SubmissionType {submission_type}") query = query.filter(cls.submission_type==submission_type) case str(): - logger.debug(f"Looking up {cls.__name__} by name {submission_type}") + # logger.debug(f"Looking up {cls.__name__} by name {submission_type}") query = query.join(SubmissionType).filter(SubmissionType.name==submission_type) case int(): - logger.debug(f"Looking up {cls.__name__} by id {submission_type}") + # logger.debug(f"Looking up {cls.__name__} by id {submission_type}") query = query.join(SubmissionType).filter(SubmissionType.id==submission_type) match kit_type: case KitType(): - logger.debug(f"Looking up {cls.__name__} by KitType {kit_type}") + # logger.debug(f"Looking up {cls.__name__} by KitType {kit_type}") query = query.filter(cls.kit_type==kit_type) case str(): - logger.debug(f"Looking up {cls.__name__} by name {kit_type}") + # logger.debug(f"Looking up {cls.__name__} by name {kit_type}") query = query.join(KitType).filter(KitType.name==kit_type) case int(): - logger.debug(f"Looking up {cls.__name__} by id {kit_type}") + # logger.debug(f"Looking up {cls.__name__} by id {kit_type}") query = query.join(KitType).filter(KitType.id==kit_type) limit = query.count() return query_return(query=query, limit=limit) - diff --git a/src/submissions/backend/db/models/organizations.py b/src/submissions/backend/db/models/organizations.py index 70f1ff4..ab6c1e3 100644 --- a/src/submissions/backend/db/models/organizations.py +++ b/src/submissions/backend/db/models/organizations.py @@ -4,8 +4,8 @@ All client organization related models. from __future__ import annotations from sqlalchemy import Column, String, INTEGER, ForeignKey, Table from sqlalchemy.orm import relationship, Query -from . import Base -from tools import check_authorization, setup_lookup, query_return +from . import Base, BaseClass +from tools import check_authorization, setup_lookup, query_return, Settings from typing import List import logging @@ -21,12 +21,11 @@ orgs_contacts = Table( extend_existing = True ) -class Organization(Base): +class Organization(BaseClass): """ Base of organization """ __tablename__ = "_organizations" - __table_args__ = {'extend_existing': True} id = Column(INTEGER, primary_key=True) #: primary key name = Column(String(64)) #: organization name @@ -34,29 +33,15 @@ class Organization(Base): cost_centre = Column(String()) #: cost centre used by org for payment contacts = relationship("Contact", back_populates="organization", secondary=orgs_contacts) #: contacts involved with this org - def __str__(self) -> str: - """ - String representing organization - - Returns: - str: string representing organization name - """ - return self.name.replace("_", " ").title() - def __repr__(self) -> str: return f"" - - @check_authorization - def save(self, ctx): - ctx.database_session.add(self) - ctx.database_session.commit() def set_attribute(self, name:str, value): setattr(self, name, value) @classmethod @setup_lookup - def query(cls, + def query(cls, name:str|None=None, limit:int=0, ) -> Organization|List[Organization]: @@ -68,24 +53,34 @@ class Organization(Base): limit (int, optional): Maximum number of results to return (0 = all). Defaults to 0. Returns: - Organization|List[Organization]: _description_ + Organization|List[Organization]: """ - query: Query = cls.metadata.session.query(cls) + query: Query = cls.__database_session__.query(cls) match name: case str(): - logger.debug(f"Looking up organization with name: {name}") + # logger.debug(f"Looking up organization with name: {name}") query = query.filter(cls.name==name) limit = 1 case _: pass return query_return(query=query, limit=limit) + + @check_authorization + def save(self, ctx:Settings): + """ + Adds this instance to the database and commits -class Contact(Base): + Args: + ctx (Settings): Settings object passed down from GUI. Necessary to check authorization + """ + ctx.database_session.add(self) + ctx.database_session.commit() + +class Contact(BaseClass): """ Base of Contact """ __tablename__ = "_contacts" - __table_args__ = {'extend_existing': True} id = Column(INTEGER, primary_key=True) #: primary key name = Column(String(64)) #: contact name @@ -98,7 +93,7 @@ class Contact(Base): @classmethod @setup_lookup - def query(cls, + def query(cls, name:str|None=None, email:str|None=None, phone:str|None=None, @@ -109,32 +104,35 @@ class Contact(Base): Args: name (str | None, optional): Name of the contact. Defaults to None. + email (str | None, optional): Email of the contact. Defaults to None. + phone (str | None, optional): Phone number of the contact. Defaults to None. limit (int, optional): Maximum number of results to return (0 = all). Defaults to 0. Returns: - Contact|List[Contact]: _description_ - """ - query: Query = cls.metadata.session.query(cls) + Contact|List[Contact]: Contact(s) of interest. + """ + # super().query(session) + query: Query = cls.__database_session__.query(cls) match name: case str(): - logger.debug(f"Looking up contact with name: {name}") + # logger.debug(f"Looking up contact with name: {name}") query = query.filter(cls.name==name) limit = 1 case _: pass match email: case str(): - logger.debug(f"Looking up contact with email: {name}") + # logger.debug(f"Looking up contact with email: {name}") query = query.filter(cls.email==email) limit = 1 case _: pass match phone: case str(): - logger.debug(f"Looking up contact with phone: {name}") + # logger.debug(f"Looking up contact with phone: {name}") query = query.filter(cls.phone==phone) limit = 1 case _: pass return query_return(query=query, limit=limit) - + \ No newline at end of file diff --git a/src/submissions/backend/db/models/submissions.py b/src/submissions/backend/db/models/submissions.py index 7920f63..68fd172 100644 --- a/src/submissions/backend/db/models/submissions.py +++ b/src/submissions/backend/db/models/submissions.py @@ -3,29 +3,24 @@ Models for the main submission types. ''' from __future__ import annotations from getpass import getuser -import math +import math, json, logging, uuid, tempfile, re, yaml from pprint import pformat from . import Reagent, SubmissionType, KitType, Organization from sqlalchemy import Column, String, TIMESTAMP, INTEGER, ForeignKey, Table, JSON, FLOAT, case -from sqlalchemy.orm import relationship, validates, Query, declared_attr -import logging -import json +from sqlalchemy.orm import relationship, validates, Query from json.decoder import JSONDecodeError -from math import ceil from sqlalchemy.ext.associationproxy import association_proxy -import uuid -import re import pandas as pd from openpyxl import Workbook -from . import Base +from . import Base, BaseClass from tools import check_not_nan, row_map, query_return, setup_lookup from datetime import datetime, date from typing import List from dateutil.parser import parse from dateutil.parser._parser import ParserError -import yaml from sqlalchemy.exc import OperationalError as AlcOperationalError, IntegrityError as AlcIntegrityError, StatementError from sqlite3 import OperationalError as SQLOperationalError, IntegrityError as SQLIntegrityError +from pathlib import Path logger = logging.getLogger(f"submissions.{__name__}") @@ -38,15 +33,11 @@ reagents_submissions = Table( extend_existing = True ) -class BasicSubmission(Base): +class BasicSubmission(BaseClass): """ Concrete of basic submission which polymorphs into BacterialCulture and Wastewater """ - # @declared_attr - # def __tablename__(cls): - # return cls.__name__.lower() __tablename__ = "_submissions" - __table_args__ = {'extend_existing': True} id = Column(INTEGER, primary_key=True) #: primary key rsl_plate_num = Column(String(32), unique=True, nullable=False) #: RSL name (e.g. RSL-22-0012) @@ -56,8 +47,8 @@ class BasicSubmission(Base): submitting_lab_id = Column(INTEGER, ForeignKey("_organizations.id", ondelete="SET NULL", name="fk_BS_sublab_id")) #: client lab id from _organizations sample_count = Column(INTEGER) #: Number of samples in the submission extraction_kit = relationship("KitType", back_populates="submissions") #: The extraction kit used - extraction_kit_id = Column(INTEGER, ForeignKey("_kits.id", ondelete="SET NULL", name="fk_BS_extkit_id")) - submission_type_name = Column(String, ForeignKey("_submission_types.name", ondelete="SET NULL", name="fk_BS_subtype_name")) + extraction_kit_id = Column(INTEGER, ForeignKey("_kits.id", ondelete="SET NULL", name="fk_BS_extkit_id")) #: id of joined extraction kit + submission_type_name = Column(String, ForeignKey("_submission_types.name", ondelete="SET NULL", name="fk_BS_subtype_name")) #: name of joined submission type technician = Column(String(64)) #: initials of processing tech(s) # Move this into custom types? reagents = relationship("Reagent", back_populates="submissions", secondary=reagents_submissions) #: relationship to reagents @@ -73,10 +64,10 @@ class BasicSubmission(Base): "SubmissionSampleAssociation", back_populates="submission", cascade="all, delete-orphan", - ) + ) #: Relation to SubmissionSampleAssociation # association proxy of "user_keyword_associations" collection # to "keyword" attribute - samples = association_proxy("submission_sample_associations", "sample") + samples = association_proxy("submission_sample_associations", "sample") #: Association proxy to SubmissionSampleAssociation.samples # Allows for subclassing into ex. BacterialCulture, Wastewater, etc. __mapper_args__ = { @@ -129,7 +120,7 @@ class BasicSubmission(Base): ext_info = None except JSONDecodeError as e: ext_info = None - logger.debug(f"Json error in {self.rsl_plate_num}: {e}") + logger.error(f"Json error in {self.rsl_plate_num}: {e}") # Updated 2023-09 to use the extraction kit to pull reagents. if full_data: try: @@ -137,14 +128,15 @@ class BasicSubmission(Base): except Exception as e: logger.error(f"We got an error retrieving reagents: {e}") reagents = None - samples = [item.sample.to_sub_dict(submission_rsl=self.rsl_plate_num) for item in self.submission_sample_associations] + # samples = [item.sample.to_sub_dict(submission_rsl=self.rsl_plate_num) for item in self.submission_sample_associations] + samples = [item.to_sub_dict() for item in self.submission_sample_associations] else: reagents = None samples = None try: comments = self.comment - except: - logger.error(self.comment) + except Exception as e: + logger.error(f"Error setting comment: {self.comment}") comments = None output = { "id": self.id, @@ -160,8 +152,8 @@ class BasicSubmission(Base): "Cost": self.run_cost, "reagents": reagents, "samples": samples, - "ext_info": ext_info, - "comments": comments + "extraction_info": ext_info, + "comment": comments } return output @@ -210,7 +202,7 @@ class BasicSubmission(Base): logger.error(f"Column count error: {e}") # Get kit associated with this submission assoc = [item for item in self.extraction_kit.kit_submissiontype_associations if item.submission_type == self.submission_type][0] - logger.debug(f"Came up with association: {assoc}") + # logger.debug(f"Came up with association: {assoc}") # If every individual cost is 0 this is probably an old plate. if all(item == 0.0 for item in [assoc.constant_cost, assoc.mutable_cost_column, assoc.mutable_cost_sample]): try: @@ -230,9 +222,9 @@ class BasicSubmission(Base): Returns: int: Number of unique columns. """ - logger.debug(f"Here's the samples: {self.samples}") + # logger.debug(f"Here's the samples: {self.samples}") columns = set([assoc.column for assoc in self.submission_sample_associations]) - logger.debug(f"Here are the columns for {self.rsl_plate_num}: {columns}") + # logger.debug(f"Here are the columns for {self.rsl_plate_num}: {columns}") return len(columns) def hitpick_plate(self, plate_number:int|None=None) -> list: @@ -271,6 +263,7 @@ class BasicSubmission(Base): Returns: pd.DataFrame: updated plate map. """ + logger.info(f"Calling {cls.__mapper_args__['polymorphic_identity']} plate mapper.") return plate_map @classmethod @@ -285,7 +278,7 @@ class BasicSubmission(Base): Returns: dict: Updated sample dictionary """ - logger.debug(f"Calling {cls.__name__} info parser.") + logger.info(f"Calling {cls.__mapper_args__['polymorphic_identity']} info parser.") return input_dict @classmethod @@ -299,41 +292,86 @@ class BasicSubmission(Base): Returns: dict: Updated sample dictionary """ - # logger.debug(f"Called {cls.__name__} sample parser") + logger.info(f"Called {cls.__mapper_args__['polymorphic_identity']} sample parser") return input_dict @classmethod def finalize_parse(cls, input_dict:dict, xl:pd.ExcelFile|None=None, info_map:dict|None=None, plate_map:dict|None=None) -> dict: + """ + Performs any final custom parsing of the excel file. + + Args: + input_dict (dict): Parser product up to this point. + xl (pd.ExcelFile | None, optional): Excel submission form. Defaults to None. + info_map (dict | None, optional): Map of information locations from SubmissionType. Defaults to None. + plate_map (dict | None, optional): Constructed plate map of samples. Defaults to None. + + Returns: + dict: Updated parser product. + """ + logger.info(f"Called {cls.__mapper_args__['polymorphic_identity']} finalizer") return input_dict @classmethod - def custom_autofill(cls, input_excel:Workbook) -> Workbook: + def custom_autofill(cls, input_excel:Workbook, info:dict|None=None, backup:bool=False) -> Workbook: """ Adds custom autofill methods for submission Args: - input_excel (Workbook): input workbook + input_excel (Workbook): initial workbook. + info (dict | None, optional): dictionary of additional info. Defaults to None. + backup (bool, optional): Whether this is part of a backup operation. Defaults to False. Returns: - Workbook: updated workbook - """ + Workbook: Updated workbook + """ + logger.info(f"Hello from {cls.__mapper_args__['polymorphic_identity']} autofill") return input_excel @classmethod def enforce_name(cls, instr:str, data:dict|None=None) -> str: - logger.debug(f"Hello from {cls.__mapper_args__['polymorphic_identity']} Enforcer!") - logger.debug(f"Attempting enforcement on {instr} using data: {pformat(data)}") + """ + Custom naming method for this class. + + Args: + instr (str): Initial name. + data (dict | None, optional): Additional parameters for name. Defaults to None. + + Returns: + str: Updated name. + """ + logger.info(f"Hello from {cls.__mapper_args__['polymorphic_identity']} Enforcer!") + # logger.debug(f"Attempting enforcement on {instr} using data: {pformat(data)}") # sys.exit() return instr @classmethod - def construct_regex(cls): + def construct_regex(cls) -> re.Pattern: + """ + Constructs catchall regex. + + Returns: + re.Pattern: Regular expression pattern to discriminate between submission types. + """ rstring = rf'{"|".join([item.get_regex() for item in cls.__subclasses__()])}' regex = re.compile(rstring, flags = re.IGNORECASE | re.VERBOSE) return regex @classmethod def find_subclasses(cls, attrs:dict|None=None, submission_type:str|SubmissionType|None=None): + """ + Retrieves subclasses of this class matching patterned + + Args: + attrs (dict | None, optional): Attributes to look for. Defaults to None. + submission_type (str | SubmissionType | None, optional): Submission type. Defaults to None. + + Raises: + AttributeError: Raised if attr given, but not found. + + Returns: + _type_: Subclass of interest. + """ match submission_type: case str(): return cls.find_polymorphic_subclass(submission_type) @@ -351,11 +389,20 @@ class BasicSubmission(Base): raise AttributeError(f"Couldn't find existing class/subclass of {cls} with all attributes:\n{pformat(attrs)}") else: model = cls - logger.debug(f"Using model: {model}") + logger.info(f"Recruiting model: {model}") return model @classmethod - def find_polymorphic_subclass(cls, polymorphic_identity:str|None=None): + def find_polymorphic_subclass(cls, polymorphic_identity:str|None=None): + """ + Find subclass based on polymorphic identity. + + Args: + polymorphic_identity (str | None, optional): String representing polymorphic identity. Defaults to None. + + Returns: + _type_: Subclass of interest. + """ if isinstance(polymorphic_identity, dict): polymorphic_identity = polymorphic_identity['value'] if polymorphic_identity != None: @@ -368,31 +415,175 @@ class BasicSubmission(Base): @classmethod def parse_pcr(cls, xl:pd.DataFrame, rsl_number:str) -> list: + """ + Perform custom parsing of pcr info. + + Args: + xl (pd.DataFrame): pcr info form + rsl_number (str): rsl plate num of interest + + Returns: + list: _description_ + """ logger.debug(f"Hello from {cls.__mapper_args__['polymorphic_identity']} PCR parser!") return [] + @classmethod + def filename_template(cls) -> str: + """ + Constructs template for filename of this class. + + Returns: + str: filename template in jinja friendly format. + """ + return "{{ rsl_plate_num }}" + + def set_attribute(self, key:str, value): + """ + Performs custom attribute setting based on values. + + Args: + key (str): name of attribute + value (_type_): value of attribute + """ + match key: + case "extraction_kit": + # logger.debug(f"Looking up kit {value}") + # field_value = lookup_kit_types(ctx=self.ctx, name=value) + field_value = KitType.query(name=value) + # logger.debug(f"Got {field_value} for kit {value}") + case "submitting_lab": + # logger.debug(f"Looking up organization: {value}") + # field_value = lookup_organizations(ctx=self.ctx, name=value) + field_value = Organization.query(name=value) + # logger.debug(f"Got {field_value} for organization {value}") + case "submitter_plate_num": + # logger.debug(f"Submitter plate id: {value}") + field_value = value + case "samples": + # instance = construct_samples(ctx=ctx, instance=instance, samples=value) + for sample in value: + # logger.debug(f"Parsing {sample} to sql.") + sample, _ = sample.toSQL(submission=self) + # instance.samples.append(sample) + return + case "reagents": + field_value = [reagent['value'].toSQL()[0] if isinstance(reagent, dict) else reagent.toSQL()[0] for reagent in value] + case "submission_type": + # field_value = lookup_submission_type(ctx=self.ctx, name=value) + field_value = SubmissionType.query(name=value) + case "sample_count": + if value == None: + field_value = len(self.samples) + else: + field_value = value + case "ctx" | "csv" | "filepath": + return + case _: + field_value = value + # insert into field + try: + setattr(self, key, field_value) + except AttributeError: + logger.error(f"Could not set {self} attribute {key} to {value}") + + def update_subsampassoc(self, sample:BasicSample, input_dict:dict): + """ + Update a joined submission sample association. + + Args: + sample (BasicSample): Associated sample. + input_dict (dict): values to be updated + + Returns: + _type_: _description_ + """ + # assoc = SubmissionSampleAssociation.query(submission=self, sample=sample, limit=1) + assoc = [item.sample for item in self.submission_sample_associations if item.sample==sample][0] + for k,v in input_dict.items(): + try: + setattr(assoc, k, v) + except AttributeError: + logger.error(f"Can't set {k} to {v}") + # result = store_object(ctx=ctx, object=assoc) + result = assoc.save() + return result + + def to_pydantic(self): + """ + Converts this instance into a PydSubmission + + Returns: + PydSubmission: converted object. + """ + from backend.validators import PydSubmission, PydSample, PydReagent + dicto = self.to_dict(full_data=True) + # dicto['filepath'] = Path(tempfile.TemporaryFile().name) + new_dict = {} + for key, value in dicto.items(): + match key: + case "reagents": + new_dict[key] = [PydReagent(**reagent) for reagent in value] + case "samples": + new_dict[key] = [PydSample(**sample) for sample in dicto['samples']] + case "Plate Number": + new_dict['rsl_plate_num'] = dict(value=value, missing=True) + case "Submitter Plate Number": + new_dict['submitter_plate_num'] = dict(value=value, missing=True) + case _: + logger.debug(f"Setting dict {key} to {value}") + new_dict[key.lower().replace(" ", "_")] = dict(value=value, missing=True) + # new_dict[key.lower().replace(" ", "_")]['value'] = value + # new_dict[key.lower().replace(" ", "_")]['missing'] = True + new_dict['filepath'] = Path(tempfile.TemporaryFile().name) + logger.debug(f"Dictionary coming into PydSubmission: {pformat(new_dict)}") + # sys.exit() + return PydSubmission(**new_dict) + + def backup(self, fname:Path): + """ + Exports xlsx and yml info files for this instance. + + Args: + fname (Path): Filename of xlsx file. + """ + backup = self.to_dict(full_data=True) + try: + with open(self.__backup_path__.joinpath(fname.with_suffix(".yml")), "w") as f: + yaml.dump(backup, f) + except KeyError as e: + logger.error(f"Problem saving yml backup file: {e}") + pyd = self.to_pydantic() + wb = pyd.autofill_excel() + wb = pyd.autofill_samples(wb) + wb.save(filename=fname.with_suffix(".xlsx")) + def save(self, original:bool=True): + """ + Adds this instance to database and commits. + + Args: + original (bool, optional): Is this the first save. Defaults to True. + """ if original: self.uploaded_by = getuser() - self.metadata.session.add(self) - self.metadata.session.commit() - return None - - def update(self): - pass + self.__database_session__.add(self) + self.__database_session__.commit() def delete(self): - backup = self.to_dict() + """ + Performs backup and deletes this instance from database. + + Raises: + e: Raised in something goes wrong. + """ + fname = self.__backup_path__.joinpath(f"{self.rsl_plate_num}-backup({date.today().strftime('%Y%m%d')})") + self.backup(fname=fname) + self.__database_session__.delete(self) try: - with open(self.metadata.backup_path.joinpath(f"{self.rsl_plate_num}-backup({date.today().strftime('%Y%m%d')}).yml"), "w") as f: - yaml.dump(backup, f) - except KeyError: - pass - self.metadata.session.delete(self) - try: - self.metadata.session.commit() + self.__database_session__.commit() except (SQLIntegrityError, SQLOperationalError, AlcIntegrityError, AlcOperationalError) as e: - self.metadata.session.rollback() + self.__database_session__.rollback() raise e @classmethod @@ -423,7 +614,7 @@ class BasicSubmission(Base): Returns: models.BasicSubmission | List[models.BasicSubmission]: Submission(s) of interest """ - logger.debug(f"kwargs coming into query: {kwargs}") + # logger.debug(f"kwargs coming into query: {kwargs}") # NOTE: if you go back to using 'model' change the appropriate cls to model in the query filters if submission_type == None: model = cls.find_subclasses(attrs=kwargs) @@ -432,7 +623,7 @@ class BasicSubmission(Base): model = cls.find_subclasses(submission_type=submission_type.name) else: model = cls.find_subclasses(submission_type=submission_type) - query: Query = cls.metadata.session.query(model) + query: Query = cls.__database_session__.query(model) if start_date != None and end_date == None: logger.warning(f"Start date with no end date, using today.") end_date = date.today() @@ -454,17 +645,15 @@ class BasicSubmission(Base): end_date = datetime.fromordinal(datetime(1900, 1, 1).toordinal() + end_date - 2).date().strftime("%Y-%m-%d") case _: end_date = parse(end_date).strftime("%Y-%m-%d") - logger.debug(f"Looking up BasicSubmissions from start date: {start_date} and end date: {end_date}") + # logger.debug(f"Looking up BasicSubmissions from start date: {start_date} and end date: {end_date}") query = query.filter(cls.submitted_date.between(start_date, end_date)) # by reagent (for some reason) match reagent: case str(): - logger.debug(f"Looking up BasicSubmission with reagent: {reagent}") - # reagent = Reagent.query(lot_number=reagent) - # query = query.join(reagents_submissions).filter(reagents_submissions.c.reagent_id==reagent.id) + # logger.debug(f"Looking up BasicSubmission with reagent: {reagent}") query = query.join(cls.reagents).filter(Reagent.lot==reagent) case Reagent(): - logger.debug(f"Looking up BasicSubmission with reagent: {reagent}") + # logger.debug(f"Looking up BasicSubmission with reagent: {reagent}") query = query.join(reagents_submissions).filter(reagents_submissions.c.reagent_id==reagent.id) case _: pass @@ -472,18 +661,18 @@ class BasicSubmission(Base): match rsl_number: case str(): query = query.filter(cls.rsl_plate_num==rsl_number) - logger.debug(f"At this point the query gets: {query.all()}") + # logger.debug(f"At this point the query gets: {query.all()}") limit = 1 case _: pass # by id (returns only a single value) match id: case int(): - logger.debug(f"Looking up BasicSubmission with id: {id}") + # logger.debug(f"Looking up BasicSubmission with id: {id}") query = query.filter(cls.id==id) limit = 1 case str(): - logger.debug(f"Looking up BasicSubmission with id: {id}") + # logger.debug(f"Looking up BasicSubmission with id: {id}") query = query.filter(cls.id==int(id)) limit = 1 case _: @@ -507,8 +696,8 @@ class BasicSubmission(Base): submission_type (str | SubmissionType | None, optional): Submission type to be created. Defaults to None. Raises: - ValueError: _description_ - ValueError: _description_ + ValueError: Raised if no kwargs passed. + ValueError: Raised if disallowed key is passed. Returns: cls: _description_ @@ -522,7 +711,7 @@ class BasicSubmission(Base): if key in disallowed: raise ValueError(f"{key} is not allowed as a query argument as it could lead to creation of duplicate objects. Use .query() instead.") instance = cls.query(submission_type=submission_type, limit=1, **kwargs) - logger.debug(f"Retrieved instance: {instance}") + # logger.debug(f"Retrieved instance: {instance}") if instance == None: used_class = cls.find_subclasses(attrs=kwargs, submission_type=submission_type) instance = used_class(**kwargs) @@ -539,64 +728,7 @@ class BasicSubmission(Base): code = 1 msg = "This submission already exists.\nWould you like to overwrite?" return instance, code, msg - - @classmethod - def filename_template(cls): - return "{{ rsl_plate_num }}" - - def set_attribute(self, key, value): - match key: - case "extraction_kit": - logger.debug(f"Looking up kit {value}") - # field_value = lookup_kit_types(ctx=self.ctx, name=value) - field_value = KitType.query(name=value) - logger.debug(f"Got {field_value} for kit {value}") - case "submitting_lab": - logger.debug(f"Looking up organization: {value}") - # field_value = lookup_organizations(ctx=self.ctx, name=value) - field_value = Organization.query(name=value) - logger.debug(f"Got {field_value} for organization {value}") - case "submitter_plate_num": - logger.debug(f"Submitter plate id: {value}") - field_value = value - case "samples": - # instance = construct_samples(ctx=ctx, instance=instance, samples=value) - for sample in value: - # logger.debug(f"Parsing {sample} to sql.") - sample, _ = sample.toSQL(submission=self) - # instance.samples.append(sample) - return - case "reagents": - field_value = [reagent['value'].toSQL()[0] if isinstance(reagent, dict) else reagent.toSQL()[0] for reagent in value] - case "submission_type": - # field_value = lookup_submission_type(ctx=self.ctx, name=value) - field_value = SubmissionType.query(name=value) - case "sample_count": - if value == None: - field_value = len(self.samples) - else: - field_value = value - case "ctx" | "csv" | "filepath": - return - case _: - field_value = value - # insert into field - try: - setattr(self, key, field_value) - except AttributeError: - logger.error(f"Could not set {self} attribute {key} to {value}") - - def update_subsampassoc(self, sample:BasicSample, input_dict:dict): - assoc = SubmissionSampleAssociation.query(submission=self, sample=sample, limit=1) - for k,v in input_dict.items(): - try: - setattr(assoc, k, v) - except AttributeError: - logger.error(f"Can't set {k} to {v}") - # result = store_object(ctx=ctx, object=assoc) - result = assoc.save() - return result - + # Below are the custom submission types class BacterialCulture(BasicSubmission): @@ -643,7 +775,7 @@ class BacterialCulture(BasicSubmission): return plate_map @classmethod - def custom_autofill(cls, input_excel: Workbook) -> Workbook: + def custom_autofill(cls, input_excel: Workbook, info:dict|None=None, backup:bool=False) -> Workbook: """ Stupid stopgap solution to there being an issue with the Bacterial Culture plate map. Extends parent. @@ -664,6 +796,9 @@ class BacterialCulture(BasicSubmission): @classmethod def enforce_name(cls, instr:str, data:dict|None=None) -> str: + """ + Extends parent + """ outstr = super().enforce_name(instr=instr, data=data) def construct(data:dict|None=None) -> str: """ @@ -672,25 +807,23 @@ class BacterialCulture(BasicSubmission): Returns: str: new RSL number """ - logger.debug(f"Attempting to construct RSL number from scratch...") - # directory = Path(self.ctx['directory_path']).joinpath("Bacteria") - # directory = Path(ctx.directory_path).joinpath("Bacteria") - directory = cls.metadata.directory_path.joinpath("Bacteria") + # logger.debug(f"Attempting to construct RSL number from scratch...") + directory = cls.__directory_path__.joinpath("Bacteria") year = str(datetime.now().year)[-2:] if directory.exists(): logger.debug(f"Year: {year}") relevant_rsls = [] all_xlsx = [item.stem for item in directory.rglob("*.xlsx") if bool(re.search(r"RSL-\d{2}-\d{4}", item.stem)) and year in item.stem[4:6]] - logger.debug(f"All rsls: {all_xlsx}") + # logger.debug(f"All rsls: {all_xlsx}") for item in all_xlsx: try: relevant_rsls.append(re.match(r"RSL-\d{2}-\d{4}", item).group(0)) except Exception as e: logger.error(f"Regex error: {e}") continue - logger.debug(f"Initial xlsx: {relevant_rsls}") + # logger.debug(f"Initial xlsx: {relevant_rsls}") max_number = max([int(item[-4:]) for item in relevant_rsls]) - logger.debug(f"The largest sample number is: {max_number}") + # logger.debug(f"The largest sample number is: {max_number}") return f"RSL-{year}-{str(max_number+1).zfill(4)}" else: # raise FileNotFoundError(f"Unable to locate the directory: {directory.__str__()}") @@ -704,11 +837,20 @@ class BacterialCulture(BasicSubmission): return re.sub(r"RSL-(\d{2})(\d{4})", r"RSL-\1-\2", outstr, flags=re.IGNORECASE) @classmethod - def get_regex(cls): + def get_regex(cls) -> str: + """ + Retrieves string for regex construction. + + Returns: + str: string for regex construction + """ return "(?PRSL-?\\d{2}-?\\d{4})" @classmethod def filename_template(cls): + """ + extends parent + """ template = super().filename_template() template += "_{{ submitting_lab }}_{{ submitter_plate_num }}" return template @@ -793,6 +935,9 @@ class Wastewater(BasicSubmission): @classmethod def enforce_name(cls, instr:str, data:dict|None=None) -> str: + """ + Extends parent + """ outstr = super().enforce_name(instr=instr, data=data) def construct(data:dict|None=None): if "submitted_date" in data.keys(): @@ -817,15 +962,15 @@ class Wastewater(BasicSubmission): outstr = outstr.replace("RSLWW", "RSL-WW") outstr = re.sub(r"WW(\d{4})", r"WW-\1", outstr, flags=re.IGNORECASE) outstr = re.sub(r"(\d{4})-(\d{2})-(\d{2})", r"\1\2\3", outstr) - logger.debug(f"Coming out of the preliminary parsing, the plate name is {outstr}") + # logger.debug(f"Coming out of the preliminary parsing, the plate name is {outstr}") try: plate_number = re.search(r"(?:(-|_)\d)(?!\d)", outstr).group().strip("_").strip("-") - logger.debug(f"Plate number is: {plate_number}") + # logger.debug(f"Plate number is: {plate_number}") except AttributeError as e: plate_number = "1" # self.parsed_name = re.sub(r"(\d{8})(-|_\d)?(R\d)?", fr"\1-{plate_number}\3", self.parsed_name) outstr = re.sub(r"(\d{8})(-|_)?\d?(R\d?)?", rf"\1-{plate_number}\3", outstr) - logger.debug(f"After addition of plate number the plate name is: {outstr}") + # logger.debug(f"After addition of plate number the plate name is: {outstr}") try: repeat = re.search(r"-\dR(?P\d)?", outstr).groupdict()['repeat'] if repeat == None: @@ -835,9 +980,13 @@ class Wastewater(BasicSubmission): return re.sub(r"(-\dR)\d?", rf"\1 {repeat}", outstr).replace(" ", "") @classmethod - def get_regex(cls): - # return "(?PRSL(?:-|_)?WW(?:-|_)?20\d{2}-?\d{2}-?\d{2}(?:(_|-)\d?(\D|$)R?\d?)?)" - # return "(?PRSL(?:-|_)?WW(?:-|_)?20\d{2}-?\d{2}-?\d{2}(?:(_|-)\d?([^_|\D]|$)R?\d?)?)" + def get_regex(cls) -> str: + """ + Retrieves string for regex construction + + Returns: + str: String for regex construction + """ return "(?PRSL(?:-|_)?WW(?:-|_)?20\d{2}-?\d{2}-?\d{2}(?:(_|-)?\d?([^_0123456789]|$)R?\d?)?)" class WastewaterArtic(BasicSubmission): @@ -854,7 +1003,7 @@ class WastewaterArtic(BasicSubmission): """ logger.debug(f"Hello from calculate base cost in WWArtic") try: - cols_count_96 = ceil(int(self.sample_count) / 8) + cols_count_96 = math.ceil(int(self.sample_count) / 8) except Exception as e: logger.error(f"Column count error: {e}") assoc = [item for item in self.extraction_kit.kit_submissiontype_associations if item.submission_type == self.submission_type][0] @@ -887,6 +1036,9 @@ class WastewaterArtic(BasicSubmission): @classmethod def enforce_name(cls, instr:str, data:dict|None=None) -> str: + """ + Extends parent + """ outstr = super().enforce_name(instr=instr, data=data) def construct(data:dict|None=None): today = datetime.now() @@ -902,17 +1054,36 @@ class WastewaterArtic(BasicSubmission): return re.sub(r"(_|-\d)?_ARTIC", f"-{plate_number}", outstr) @classmethod - def get_regex(cls): + def get_regex(cls) -> str: + """ + Retrieves string for regex construction + + Returns: + str: string for regex construction. + """ return "(?P(\\d{4}-\\d{2}-\\d{2}(?:-|_)(?:\\d_)?artic)|(RSL(?:-|_)?AR(?:-|_)?20\\d{2}-?\\d{2}-?\\d{2}(?:(_|-)\\d?(\\D|$)R?\\d?)?))" @classmethod def finalize_parse(cls, input_dict: dict, xl: pd.ExcelFile | None = None, info_map: dict | None = None, plate_map: dict | None = None) -> dict: + """ + Performs any final custom parsing of the excel file. Extends parent + + Args: + input_dict (dict): Parser product up to this point. + xl (pd.ExcelFile | None, optional): Excel submission form. Defaults to None. + info_map (dict | None, optional): Map of information locations from SubmissionType. Defaults to None. + plate_map (dict | None, optional): Constructed plate map of samples. Defaults to None. + + Returns: + dict: Updated parser product. + """ input_dict = super().finalize_parse(input_dict, xl, info_map, plate_map) - logger.debug(pformat(input_dict)) - logger.debug(pformat(info_map)) - logger.debug(pformat(plate_map)) + # logger.debug(pformat(input_dict)) + # logger.debug(pformat(info_map)) + # logger.debug(pformat(plate_map)) samples = [] for sample in input_dict['samples']: + logger.debug(f"Input sample: {pformat(sample.__dict__)}") if sample.submitter_id == "NTC1": samples.append(dict(sample=sample.submitter_id, destination_row=8, destination_column=2, source_row=0, source_column=0, plate_number='control', plate=None)) continue @@ -921,80 +1092,92 @@ class WastewaterArtic(BasicSubmission): continue destination_row = sample.row[0] destination_column = sample.column[0] - logger.debug(f"Looking up: {sample.submitter_id} friend.") + # logger.debug(f"Looking up: {sample.submitter_id} friend.") lookup_sample = BasicSample.query(submitter_id=sample.submitter_id) lookup_ssa = SubmissionSampleAssociation.query(sample=lookup_sample, exclude_submission_type=cls.__mapper_args__['polymorphic_identity'] , chronologic=True, reverse=True, limit=1) try: plate = lookup_ssa.submission.rsl_plate_num source_row = lookup_ssa.row source_column = lookup_ssa.column - except AttributeError: - # plate = "Error" - # source_row = 0 - # source_column = 0 - continue - samples.append(dict( + except AttributeError as e: + logger.error(f"Problem with lookup: {e}") + plate = "Error" + source_row = 0 + source_column = 0 + # continue + output_sample = dict( sample=sample.submitter_id, destination_column=destination_column, destination_row=destination_row, plate=plate, source_column=source_column, source_row = source_row - )) - plates = sorted(list(set([sample['plate'] for sample in samples if sample['plate'] != None]))) + ) + logger.debug(f"output sample: {pformat(output_sample)}") + samples.append(output_sample) + plates = sorted(list(set([sample['plate'] for sample in samples if sample['plate'] != None and sample['plate'] != "Error"]))) + logger.debug(f"Here's what I got for plates: {plates}") for iii, plate in enumerate(plates): for sample in samples: if sample['plate'] == plate: sample['plate_number'] = iii + 1 df = pd.DataFrame.from_records(samples).fillna(value="") - df.source_row = df.source_row.astype(int) - df.source_column = df.source_column.astype(int) - df.sort_values(by=['plate_number', 'source_column', 'source_row'], inplace=True) + try: + df.source_row = df.source_row.astype(int) + df.source_column = df.source_column.astype(int) + df.sort_values(by=['destination_column', 'destination_row'], inplace=True) + except AttributeError as e: + logger.error(f"Couldn't construct df due to {e}") input_dict['csv'] = df return input_dict # Sample Classes -class BasicSample(Base): +class BasicSample(BaseClass): """ Base of basic sample which polymorphs into BCSample and WWSample """ - # @declared_attr - # def __tablename__(cls): - # return cls.__name__.lower() __tablename__ = "_samples" - __table_args__ = {'extend_existing': True} id = Column(INTEGER, primary_key=True) #: primary key submitter_id = Column(String(64), nullable=False, unique=True) #: identification from submitter - sample_type = Column(String(32)) + sample_type = Column(String(32)) #: subtype of sample sample_submission_associations = relationship( "SubmissionSampleAssociation", back_populates="sample", cascade="all, delete-orphan", - ) + ) #: associated submissions __mapper_args__ = { "polymorphic_identity": "Basic Sample", # "polymorphic_on": sample_type, "polymorphic_on": case( - [ + (sample_type == "Wastewater Sample", "Wastewater Sample"), (sample_type == "Wastewater Artic Sample", "Wastewater Sample"), (sample_type == "Bacterial Culture Sample", "Bacterial Culture Sample"), - ], + else_="Basic Sample" ), "with_polymorphic": "*", } - submissions = association_proxy("sample_submission_associations", "submission") + submissions = association_proxy("sample_submission_associations", "submission") #: proxy of associated submissions @validates('submitter_id') - def create_id(self, key, value): - # logger.debug(f"validating sample_id of: {value}") + def create_id(self, key:str, value:str): + """ + Creates a random string as a submitter id. + + Args: + key (str): name of attribute + value (str): submitter id + + Returns: + str: new (or unchanged) submitter id + """ if value == None: return uuid.uuid4().hex.upper() else: @@ -1003,14 +1186,20 @@ class BasicSample(Base): def __repr__(self) -> str: return f"<{self.sample_type.replace('_', ' ').title().replace(' ', '')}({self.submitter_id})>" - def set_attribute(self, name, value): - # logger.debug(f"Setting {name} to {value}") + def set_attribute(self, name:str, value): + """ + Custom attribute setter + + Args: + name (str): name of attribute + value (_type_): value to be set to attribute + """ try: setattr(self, name, value) except AttributeError: logger.error(f"Attribute {name} not found") - def to_sub_dict(self, submission_rsl:str) -> dict: + def to_sub_dict(self, submission_rsl:str|BasicSubmission) -> dict: """ Returns a dictionary of locations. @@ -1020,8 +1209,11 @@ class BasicSample(Base): Returns: dict: 'well' and sample submitter_id as 'name' """ - - assoc = [item for item in self.sample_submission_associations if item.submission.rsl_plate_num==submission_rsl][0] + match submission_rsl: + case BasicSubmission(): + assoc = [item for item in self.sample_submission_associations if item.submission==submission_rsl][0] + case str(): + assoc = [item for item in self.sample_submission_associations if item.submission.rsl_plate_num==submission_rsl][0] sample = {} try: sample['well'] = f"{row_map[assoc.row]}{assoc.column}" @@ -1029,6 +1221,16 @@ class BasicSample(Base): logger.error(f"Unable to find row {assoc.row} in row_map.") sample['well'] = None sample['name'] = self.submitter_id + sample['submitter_id'] = self.submitter_id + sample['sample_type'] = self.sample_type + if isinstance(assoc.row, list): + sample['row'] = assoc.row[0] + else: + sample['row'] = assoc.row + if isinstance(assoc.column, list): + sample['column'] = assoc.column[0] + else: + sample['column'] = assoc.column return sample def to_hitpick(self, submission_rsl:str|None=None) -> dict|None: @@ -1047,11 +1249,24 @@ class BasicSample(Base): return dict(name=self.submitter_id[:10], positive=False, tooltip=tooltip_text) @classmethod - def find_subclasses(cls, attrs:dict|None=None, sample_type:str|None=None): + def find_subclasses(cls, attrs:dict|None=None, sample_type:str|None=None) -> BasicSample: + """ + Retrieves subclass of BasicSample based on type or possessed attributes. + + Args: + attrs (dict | None, optional): attributes for query. Defaults to None. + sample_type (str | None, optional): sample type by name. Defaults to None. + + Raises: + AttributeError: Raised if class containing all given attributes cannot be found. + + Returns: + BasicSample: sample type object of interest + """ if sample_type != None: return cls.find_polymorphic_subclass(polymorphic_identity=sample_type) if len(attrs) == 0 or attrs == None: - logger.debug(f"No attr, returning {cls}") + logger.warning(f"No attr, returning {cls}") return cls if any([not hasattr(cls, attr) for attr in attrs]): logger.debug(f"{cls} is missing attrs. searching for better match.") @@ -1061,13 +1276,22 @@ class BasicSample(Base): except IndexError as e: raise AttributeError(f"Couldn't find existing class/subclass of {cls} with all attributes:\n{pformat(attrs)}") else: - logger.debug(f"{cls} has all necessary attributes, returning") + # logger.debug(f"{cls} has all necessary attributes, returning") return cls - logger.debug(f"Using model: {model}") + # logger.debug(f"Using model: {model}") return model @classmethod - def find_polymorphic_subclass(cls, polymorphic_identity:str|None=None): + def find_polymorphic_subclass(cls, polymorphic_identity:str|None=None) -> BasicSample: + """ + Retrieves subclasses of BasicSample based on type name. + + Args: + polymorphic_identity (str | None, optional): Name of subclass fed to polymorphic identity. Defaults to None. + + Returns: + BasicSample: Subclass of interest. + """ if isinstance(polymorphic_identity, dict): polymorphic_identity = polymorphic_identity['value'] if polymorphic_identity == None: @@ -1081,7 +1305,15 @@ class BasicSample(Base): @classmethod def parse_sample(cls, input_dict:dict) -> dict: - # logger.debug(f"Called {cls.__name__} sample parser") + f""" + Custom sample parser for {cls.__name__} + + Args: + input_dict (dict): Basic parser results. + + Returns: + dict: Updated parser results. + """ return input_dict @classmethod @@ -1096,7 +1328,6 @@ class BasicSample(Base): Lookup samples in the database by a number of parameters. Args: - ctx (Settings): Settings object passed down from gui submitter_id (str | None, optional): Name of the sample (limits results to 1). Defaults to None. sample_type (str | None, optional): Sample type. Defaults to None. limit (int, optional): Maximum number of results to return (0 = all). Defaults to 0. @@ -1111,30 +1342,43 @@ class BasicSample(Base): logger.debug(f"Length of kwargs: {len(kwargs)}") # model = models.BasicSample.find_subclasses(ctx=ctx, attrs=kwargs) # query: Query = setup_lookup(ctx=ctx, locals=locals()).query(model) - query: Query = cls.metadata.session.query(model) + query: Query = cls.__database_session__.query(model) match submitter_id: case str(): - logger.debug(f"Looking up {model} with submitter id: {submitter_id}") + # logger.debug(f"Looking up {model} with submitter id: {submitter_id}") query = query.filter(model.submitter_id==submitter_id) limit = 1 case _: pass - # match sample_type: - # case str(): - # logger.debug(f"Looking up {model} with sample type: {sample_type}") - # query = query.filter(models.BasicSample.sample_type==sample_type) - # case _: - # pass + match sample_type: + case str(): + logger.warning(f"Looking up samples with sample_type is disabled.") + # query = query.filter(models.BasicSample.sample_type==sample_type) + case _: + pass for k, v in kwargs.items(): attr = getattr(model, k) - logger.debug(f"Got attr: {attr}") + # logger.debug(f"Got attr: {attr}") query = query.filter(attr==v) if len(kwargs) > 0: limit = 1 return query_return(query=query, limit=limit) @classmethod - def query_or_create(cls, sample_type:str, **kwargs): + def query_or_create(cls, sample_type:str|None=None, **kwargs) -> BasicSample: + """ + Queries for a sample, if none found creates a new one. + + Args: + sample_type (str): sample subclass name + + Raises: + ValueError: Raised if no kwargs are passed to narrow down instances + ValueError: Raised if unallowed key is given. + + Returns: + _type_: _description_ + """ disallowed = ["id"] if kwargs == {}: raise ValueError("Need to narrow down query or the first available instance will be returned.") @@ -1149,6 +1393,12 @@ class BasicSample(Base): instance.sample_type = sample_type return instance + def save(self): + raise AttributeError(f"Save not implemented for {self.__class__}") + + def delete(self): + raise AttributeError(f"Delete not implemented for {self.__class__}") + #Below are the custom sample types class WastewaterSample(BasicSample): @@ -1157,11 +1407,11 @@ class WastewaterSample(BasicSample): """ # id = Column(INTEGER, ForeignKey('basicsample.id'), primary_key=True) ww_processing_num = Column(String(64)) #: wastewater processing number - ww_full_sample_id = Column(String(64)) + ww_full_sample_id = Column(String(64)) #: full id given by entrics rsl_number = Column(String(64)) #: rsl plate identification number collection_date = Column(TIMESTAMP) #: Date sample collected received_date = Column(TIMESTAMP) #: Date sample received - notes = Column(String(2000)) + notes = Column(String(2000)) #: notes from submission form sample_location = Column(String(8)) #: location on 24 well plate __mapper_args__ = {"polymorphic_identity": "Wastewater Sample", "polymorphic_load": "inline"} @@ -1169,9 +1419,12 @@ class WastewaterSample(BasicSample): """ Outputs a dictionary usable for html plate maps. Extends parent method. + Args: + submission_rsl (str): rsl_plate_num of the submission + Returns: - dict: dictionary of sample id, row and column in elution plate - """ + dict|None: dict: dictionary of sample id, row and column in elution plate + """ sample = super().to_hitpick(submission_rsl=submission_rsl) assoc = [item for item in self.sample_submission_associations if item.submission.rsl_plate_num==submission_rsl][0] # if either n1 or n2 is positive, include this sample @@ -1185,7 +1438,13 @@ class WastewaterSample(BasicSample): logger.error(f"Couldn't set tooltip for {self.rsl_number}. Looks like there isn't PCR data.") return sample - def get_recent_ww_submission(self): + def get_recent_ww_submission(self) -> Wastewater: + """ + Gets most recent associated wastewater submission + + Returns: + Wastewater: Most recent wastewater submission + """ results = [sub for sub in self.submissions if isinstance(sub, Wastewater)] if len(results) > 1: results = results.sort(key=lambda sub: sub.submitted_date) @@ -1239,30 +1498,26 @@ class BacterialCultureSample(BasicSample): # Submission to Sample Associations -class SubmissionSampleAssociation(Base): +class SubmissionSampleAssociation(BaseClass): """ table containing submission/sample associations DOC: https://docs.sqlalchemy.org/en/14/orm/extensions/associationproxy.html """ - # @declared_attr - # def __tablename__(cls): - # return cls.__name__.lower() __tablename__ = "_submission_sample" - __table_args__ = {'extend_existing': True} - sample_id = Column(INTEGER, ForeignKey("_samples.id"), nullable=False) - submission_id = Column(INTEGER, ForeignKey("_submissions.id"), primary_key=True) + sample_id = Column(INTEGER, ForeignKey("_samples.id"), nullable=False) #: id of associated sample + submission_id = Column(INTEGER, ForeignKey("_submissions.id"), primary_key=True) #: id of associated submission row = Column(INTEGER, primary_key=True) #: row on the 96 well plate column = Column(INTEGER, primary_key=True) #: column on the 96 well plate # reference to the Submission object - submission = relationship(BasicSubmission, back_populates="submission_sample_associations") + submission = relationship(BasicSubmission, back_populates="submission_sample_associations") #: associated submission # reference to the Sample object - sample = relationship(BasicSample, back_populates="sample_submission_associations") + sample = relationship(BasicSample, back_populates="sample_submission_associations") #: associated sample - base_sub_type = Column(String) + base_sub_type = Column(String) #: string of subtype name # Refers to the type of parent. # Hooooooo boy, polymorphic association type, now we're getting into the weeds! @@ -1281,8 +1536,34 @@ class SubmissionSampleAssociation(Base): def __repr__(self) -> str: return f" dict: + """ + Returns a sample dictionary updated with instance information + + Returns: + dict: Updated dictionary with row, column and well updated + """ + sample = self.sample.to_sub_dict(submission_rsl=self.submission) + sample['row'] = self.row + sample['column'] = self.column + try: + sample['well'] = f"{row_map[self.row]}{self.column}" + except KeyError as e: + logger.error(f"Unable to find row {self.row} in row_map.") + sample['well'] = None + return sample + @classmethod - def find_polymorphic_subclass(cls, polymorphic_identity:str|None=None): + def find_polymorphic_subclass(cls, polymorphic_identity:str|None=None) -> SubmissionSampleAssociation: + """ + Retrieves subclasses of SubmissionSampleAssociation based on type name. + + Args: + polymorphic_identity (str | None, optional): Name of subclass fed to polymorphic identity. Defaults to None. + + Returns: + SubmissionSampleAssociation: Subclass of interest. + """ if isinstance(polymorphic_identity, dict): polymorphic_identity = polymorphic_identity['value'] if polymorphic_identity == None: @@ -1296,7 +1577,7 @@ class SubmissionSampleAssociation(Base): @classmethod @setup_lookup - def query(cls, + def query(cls, submission:BasicSubmission|str|None=None, exclude_submission_type:str|None=None, sample:BasicSample|str|None=None, @@ -1320,7 +1601,7 @@ class SubmissionSampleAssociation(Base): Returns: models.SubmissionSampleAssociation|List[models.SubmissionSampleAssociation]: Junction(s) of interest """ - query: Query = cls.metadata.session.query(cls) + query: Query = cls.__database_session__.query(cls) match submission: case BasicSubmission(): query = query.filter(cls.submission==submission) @@ -1347,18 +1628,11 @@ class SubmissionSampleAssociation(Base): # logger.debug(f"Query count: {query.count()}") if reverse and not chronologic: query = query.order_by(BasicSubmission.id.desc()) - # query = query.join(BasicSubmission).order_by(BasicSubmission.id.desc()) - # query.join(BasicSubmission).order_by(cls.submission.id.desc()) if chronologic: if reverse: query = query.order_by(BasicSubmission.submitted_date.desc()) - # query = query.join(BasicSubmission).order_by(BasicSubmission.submitted_date.desc()) - # query.join(BasicSubmission).order_by(cls.submission.submitted_date.desc()) else: query = query.order_by(BasicSubmission.submitted_date) - # query.join(BasicSubmission).order_by(cls.submission.submitted_date) - # if query.count() == 1: - # limit = 1 return query_return(query=query, limit=limit) @classmethod @@ -1366,7 +1640,18 @@ class SubmissionSampleAssociation(Base): association_type:str="Basic Association", submission:BasicSubmission|str|None=None, sample:BasicSample|str|None=None, - **kwargs): + **kwargs) -> SubmissionSampleAssociation: + """ + Queries for an association, if none exists creates a new one. + + Args: + association_type (str, optional): Subclass name. Defaults to "Basic Association". + submission (BasicSubmission | str | None, optional): associated submission. Defaults to None. + sample (BasicSample | str | None, optional): associated sample. Defaults to None. + + Returns: + SubmissionSampleAssociation: Queried or new association. + """ match submission: case BasicSubmission(): pass @@ -1399,17 +1684,20 @@ class SubmissionSampleAssociation(Base): return instance def save(self): - self.metadata.session.add(self) - self.metadata.session.commit() + """ + Adds this instance to the database and commits. + """ + self.__database_session__.add(self) + self.__database_session__.commit() return None + def delete(self): + raise AttributeError(f"Delete not implemented for {self.__class__}") + class WastewaterAssociation(SubmissionSampleAssociation): """ Derivative custom Wastewater/Submission Association... fancy. """ - # submission_id = Column(INTEGER, ForeignKey("submissionsampleassociation.submission_id"), primary_key=True) - # row = Column(INTEGER, ForeignKey("submissionsampleassociation.row"), nullable=False) - # column = Column(INTEGER, ForeignKey("submissionsampleassociation.column"), primary_key=True) ct_n1 = Column(FLOAT(2)) #: AKA ct for N1 ct_n2 = Column(FLOAT(2)) #: AKA ct for N2 n1_status = Column(String(32)) #: positive or negative for N1 diff --git a/src/submissions/backend/excel/parser.py b/src/submissions/backend/excel/parser.py index 43f99f3..35ca7e7 100644 --- a/src/submissions/backend/excel/parser.py +++ b/src/submissions/backend/excel/parser.py @@ -16,7 +16,6 @@ from datetime import date from dateutil.parser import parse, ParserError from tools import check_not_nan, convert_nans_to_nones, Settings - logger = logging.getLogger(f"submissions.{__name__}") row_keys = dict(A=1, B=2, C=3, D=4, E=5, F=6, G=7, H=8) @@ -28,7 +27,7 @@ class SheetParser(object): def __init__(self, ctx:Settings, filepath:Path|None = None): """ Args: - ctx (Settings): Settings object passed down from gui + ctx (Settings): Settings object passed down from gui. Necessary for Bacterial to get directory path. filepath (Path | None, optional): file path to excel sheet. Defaults to None. """ self.ctx = ctx @@ -56,6 +55,7 @@ class SheetParser(object): self.import_reagent_validation_check() self.parse_samples() self.finalize_parse() + logger.debug(f"Parser.sub after info scrape: {pformat(self.sub)}") def parse_info(self): """ @@ -70,15 +70,17 @@ class SheetParser(object): pass case _: self.sub[k] = v - logger.debug(f"Parser.sub after info scrape: {pformat(self.sub)}") - + def parse_reagents(self, extraction_kit:str|None=None): """ Pulls reagent info from the excel sheet + + Args: + extraction_kit (str | None, optional): Relevant extraction kit for reagent map. Defaults to None. """ if extraction_kit == None: extraction_kit = extraction_kit=self.sub['extraction_kit'] - logger.debug(f"Parsing reagents for {extraction_kit}") + # logger.debug(f"Parsing reagents for {extraction_kit}") self.sub['reagents'] = ReagentParser(xl=self.xl, submission_type=self.sub['submission_type'], extraction_kit=extraction_kit).parse_reagents() def parse_samples(self): @@ -92,13 +94,6 @@ class SheetParser(object): def import_kit_validation_check(self): """ Enforce that the parser has an extraction kit - - Args: - ctx (Settings): Settings obj passed down from gui - parser_sub (dict): The parser dictionary before going to pydantic - - Returns: - List[PydReagent]: List of reagents """ from frontend.widgets.pop_ups import KitSelector if not check_not_nan(self.sub['extraction_kit']['value']): @@ -115,18 +110,18 @@ class SheetParser(object): """ Enforce that only allowed reagents get into the Pydantic Model """ - # kit = lookup_kit_types(ctx=self.ctx, name=self.sub['extraction_kit']['value']) kit = KitType.query(name=self.sub['extraction_kit']['value']) allowed_reagents = [item.name for item in kit.get_reagents()] - logger.debug(f"List of reagents for comparison with allowed_reagents: {pformat(self.sub['reagents'])}") - # self.sub['reagents'] = [reagent for reagent in self.sub['reagents'] if reagent['value'].type in allowed_reagents] + # logger.debug(f"List of reagents for comparison with allowed_reagents: {pformat(self.sub['reagents'])}") self.sub['reagents'] = [reagent for reagent in self.sub['reagents'] if reagent.type in allowed_reagents] def finalize_parse(self): + """ + Run custom final validations of data for submission subclasses. + """ finisher = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.sub['submission_type']).finalize_parse self.sub = finisher(input_dict=self.sub, xl=self.xl, info_map=self.info_map, plate_map=self.plate_map) - def to_pydantic(self) -> PydSubmission: """ Generates a pydantic model of scraped data for validation @@ -134,21 +129,19 @@ class SheetParser(object): Returns: PydSubmission: output pydantic model """ - logger.debug(f"Submission dictionary coming into 'to_pydantic':\n{pformat(self.sub)}") + # logger.debug(f"Submission dictionary coming into 'to_pydantic':\n{pformat(self.sub)}") psm = PydSubmission(filepath=self.filepath, **self.sub) - # delattr(psm, "filepath") return psm class InfoParser(object): def __init__(self, xl:pd.ExcelFile, submission_type:str): - logger.debug(f"\n\nHello from InfoParser!") + logger.info(f"\n\Hello from InfoParser!\n\n") # self.ctx = ctx self.map = self.fetch_submission_info_map(submission_type=submission_type) self.xl = xl logger.debug(f"Info map for InfoParser: {pformat(self.map)}") - def fetch_submission_info_map(self, submission_type:str|dict) -> dict: """ Gets location of basic info from the submission_type object in the database. @@ -192,6 +185,11 @@ class InfoParser(object): continue for item in relevant: value = df.iat[relevant[item]['row']-1, relevant[item]['column']-1] + match item: + case "submission_type": + value = value.title() + case _: + pass logger.debug(f"Setting {item} on {sheet} to {value}") if check_not_nan(value): if value != "None": @@ -206,10 +204,6 @@ class InfoParser(object): continue else: dicto[item] = dict(value=convert_nans_to_nones(value), missing=True) - try: - check = dicto['submission_category'] not in ["", None] - except KeyError: - check = False return self.custom_parser(input_dict=dicto, xl=self.xl) class ReagentParser(object): @@ -220,7 +214,17 @@ class ReagentParser(object): self.map = self.fetch_kit_info_map(extraction_kit=extraction_kit, submission_type=submission_type) self.xl = xl - def fetch_kit_info_map(self, extraction_kit:dict, submission_type:str): + def fetch_kit_info_map(self, extraction_kit:dict, submission_type:str) -> dict: + """ + Gets location of kit reagents from database + + Args: + extraction_kit (dict): Relevant kit information. + submission_type (str): Name of submission type. + + Returns: + dict: locations of reagent info for the kit. + """ if isinstance(extraction_kit, dict): extraction_kit = extraction_kit['value'] # kit = lookup_kit_types(ctx=self.ctx, name=extraction_kit) @@ -231,7 +235,13 @@ class ReagentParser(object): del reagent_map['info'] return reagent_map - def parse_reagents(self) -> list: + def parse_reagents(self) -> List[PydReagent]: + """ + Extracts reagent information from the excel form. + + Returns: + List[PydReagent]: List of parsed reagents. + """ listo = [] for sheet in self.xl.sheet_names: df = self.xl.parse(sheet, header=None, dtype=object) @@ -271,11 +281,10 @@ class SampleParser(object): convert sample sub-dataframe to dictionary of records Args: - ctx (Settings): settings object passed down from gui df (pd.DataFrame): input sample dataframe elution_map (pd.DataFrame | None, optional): optional map of elution plate. Defaults to None. """ - logger.debug("\n\nHello from SampleParser!") + logger.debug("\n\nHello from SampleParser!\n\n") self.samples = [] # self.ctx = ctx self.xl = xl @@ -454,40 +463,6 @@ class SampleParser(object): new_samples.append(PydSample(**translated_dict)) return result, new_samples - # def generate_sample_object(self, input_dict) -> BasicSample: - # """ - # Constructs sample object from dict. - # NOTE: Depreciated due to using Pydantic object up until db saving. - - # Args: - # input_dict (dict): sample information - - # Returns: - # models.BasicSample: Sample object - # """ - # database_obj = BasicSample.find_polymorphic_subclass(polymorphic_identity=input_dict['sample_type']) - # # query = input_dict['sample_type'].replace(" ", "") - # # try: - # # # database_obj = getattr(models, query) - - # # except AttributeError as e: - # # logger.error(f"Could not find the model {query}. Using generic.") - # # database_obj = models.BasicSample - # logger.debug(f"Searching database for {input_dict['submitter_id']}...") - # # instance = lookup_samples(ctx=self.ctx, submitter_id=str(input_dict['submitter_id'])) - # instance = BasicSample.query(submitter_id=str(input_dict['submitter_id'])) - # if instance == None: - # logger.debug(f"Couldn't find sample {input_dict['submitter_id']}. Creating new sample.") - # instance = database_obj() - # for k,v in input_dict.items(): - # try: - # instance.set_attribute(k, v) - # except Exception as e: - # logger.error(f"Failed to set {k} due to {type(e).__name__}: {e}") - # else: - # logger.debug(f"Sample {instance.submitter_id} already exists, will run update.") - # return dict(sample=instance, row=input_dict['row'], column=input_dict['column']) - def grab_plates(self) -> List[str]: """ Parse plate names from @@ -514,7 +489,6 @@ class PCRParser(object): Initializes object. Args: - ctx (dict): settings passed down from gui. filepath (Path | None, optional): file to parse. Defaults to None. """ # self.ctx = ctx diff --git a/src/submissions/backend/excel/reports.py b/src/submissions/backend/excel/reports.py index 5347a08..0cdaa1d 100644 --- a/src/submissions/backend/excel/reports.py +++ b/src/submissions/backend/excel/reports.py @@ -5,7 +5,7 @@ from pandas import DataFrame import logging from datetime import date, timedelta import re -from typing import Tuple +from typing import List, Tuple from tools import jinja_template_loading, Settings logger = logging.getLogger(f"submissions.{__name__}") @@ -27,7 +27,7 @@ def make_report_xlsx(records:list[dict]) -> Tuple[DataFrame, DataFrame]: df = df.sort_values("Submitting Lab") # aggregate cost and sample count columns df2 = df.groupby(["Submitting Lab", "Extraction Kit"]).agg({'Extraction Kit':'count', 'Cost': 'sum', 'Sample Count':'sum'}) - df2 = df2.rename(columns={"Extraction Kit": 'Plate Count'}) + df2 = df2.rename(columns={"Extraction Kit": 'Run Count'}) logger.debug(f"Output daftaframe for xlsx: {df2.columns}") df = df.drop('id', axis=1) df = df.sort_values(['Submitting Lab', "Submitted Date"]) @@ -57,16 +57,16 @@ def make_report_html(df:DataFrame, start_date:date, end_date:date) -> str: logger.debug(f"Old lab: {old_lab}, Current lab: {lab}") logger.debug(f"Name: {row[0][1]}") data = [item for item in row[1]] - kit = dict(name=row[0][1], cost=data[1], plate_count=int(data[0]), sample_count=int(data[2])) + kit = dict(name=row[0][1], cost=data[1], run_count=int(data[0]), sample_count=int(data[2])) # if this is the same lab as before add together if lab == old_lab: output[-1]['kits'].append(kit) output[-1]['total_cost'] += kit['cost'] output[-1]['total_samples'] += kit['sample_count'] - output[-1]['total_plates'] += kit['plate_count'] + output[-1]['total_runs'] += kit['run_count'] # if not the same lab, make a new one else: - adder = dict(lab=lab, kits=[kit], total_cost=kit['cost'], total_samples=kit['sample_count'], total_plates=kit['plate_count']) + adder = dict(lab=lab, kits=[kit], total_cost=kit['cost'], total_samples=kit['sample_count'], total_runs=kit['run_count']) output.append(adder) old_lab = lab logger.debug(output) @@ -83,10 +83,10 @@ def convert_data_list_to_df(input:list[dict], subtype:str|None=None) -> DataFram Args: ctx (dict): settings passed from gui input (list[dict]): list of dictionaries containing records - subtype (str | None, optional): _description_. Defaults to None. + subtype (str | None, optional): name of submission type. Defaults to None. Returns: - DataFrame: _description_ + DataFrame: dataframe of controls """ df = DataFrame.from_records(input) @@ -218,5 +218,14 @@ def drop_reruns_from_df(ctx:Settings, df: DataFrame) -> DataFrame: df = df.drop(df[df.name == first_run].index) return df -def make_hitpicks(input:list) -> DataFrame: +def make_hitpicks(input:List[dict]) -> DataFrame: + """ + Converts lsit of dictionaries constructed by hitpicking to dataframe + + Args: + input (List[dict]): list of hitpicked dictionaries + + Returns: + DataFrame: constructed dataframe. + """ return DataFrame.from_records(input) \ No newline at end of file diff --git a/src/submissions/backend/validators/__init__.py b/src/submissions/backend/validators/__init__.py index 20421eb..1410149 100644 --- a/src/submissions/backend/validators/__init__.py +++ b/src/submissions/backend/validators/__init__.py @@ -12,7 +12,6 @@ class RSLNamer(object): """ def __init__(self, instr:str, sub_type:str|None=None, data:dict|None=None): self.submission_type = sub_type - if self.submission_type == None: self.submission_type = self.retrieve_submission_type(instr=instr) logger.debug(f"got submission type: {self.submission_type}") @@ -23,6 +22,15 @@ class RSLNamer(object): @classmethod def retrieve_submission_type(cls, instr:str|Path) -> str: + """ + Gets submission type from excel file properties or sheet names or regex pattern match or user input + + Args: + instr (str | Path): filename + + Returns: + str: parsed submission type + """ match instr: case Path(): logger.debug(f"Using path method for {instr}.") @@ -32,7 +40,8 @@ class RSLNamer(object): submission_type = [item.strip().title() for item in wb.properties.category.split(";")][0] except AttributeError: try: - sts = {item.name:item.info_map['all_sheets'] for item in SubmissionType.query(key="all_sheets")} + # sts = {item.name:item.info_map['all_sheets'] for item in SubmissionType.query(key="all_sheets")} + sts = {item.name:item.get_template_file_sheets() for item in SubmissionType.query()} for k,v in sts.items(): # This gets the *first* submission type that matches the sheet names in the workbook if wb.sheetnames == v: diff --git a/src/submissions/backend/validators/pydant.py b/src/submissions/backend/validators/pydant.py index 61f5b8b..7d41fe5 100644 --- a/src/submissions/backend/validators/pydant.py +++ b/src/submissions/backend/validators/pydant.py @@ -1,22 +1,24 @@ ''' Contains pydantic models and accompanying validators ''' +from operator import attrgetter import uuid from pydantic import BaseModel, field_validator, Field from datetime import date, datetime, timedelta from dateutil.parser import parse from dateutil.parser._parser import ParserError -from typing import List, Any, Tuple, Literal +from typing import List, Any, Tuple from . import RSLNamer from pathlib import Path import re import logging -from tools import check_not_nan, convert_nans_to_nones, jinja_template_loading, Report, Result +from tools import check_not_nan, convert_nans_to_nones, jinja_template_loading, Report, Result, row_map from backend.db.models import * from sqlalchemy.exc import StatementError, IntegrityError from PyQt6.QtWidgets import QComboBox, QWidget -from pprint import pformat -from openpyxl import load_workbook +# from pprint import pformat +from openpyxl import load_workbook, Workbook +from io import BytesIO logger = logging.getLogger(f"submissions.{__name__}") @@ -87,9 +89,14 @@ class PydReagent(BaseModel): return values.data['type'] def toSQL(self) -> Tuple[Reagent, Report]: + """ + Converts this instance into a backend.db.models.kit.Reagent instance + + Returns: + Tuple[Reagent, Report]: Reagent instance and result of function + """ report = Report() logger.debug(f"Reagent SQL constructor is looking up type: {self.type}, lot: {self.lot}") - # reagent = lookup_reagents(ctx=self.ctx, lot_number=self.lot) reagent = Reagent.query(lot_number=self.lot) logger.debug(f"Result: {reagent}") if reagent == None: @@ -105,7 +112,6 @@ class PydReagent(BaseModel): case "expiry": reagent.expiry = value case "type": - # reagent_type = lookup_reagent_types(ctx=self.ctx, name=value) reagent_type = ReagentType.query(name=value) if reagent_type != None: reagent.type.append(reagent_type) @@ -116,6 +122,16 @@ class PydReagent(BaseModel): return reagent, report def toForm(self, parent:QWidget, extraction_kit:str) -> QComboBox: + """ + Converts this instance into a form widget + + Args: + parent (QWidget): Parent widget of the constructed object + extraction_kit (str): Name of extraction kit used + + Returns: + QComboBox: Form object. + """ from frontend.widgets.submission_widget import ReagentFormWidget return ReagentFormWidget(parent=parent, reagent=self, extraction_kit=extraction_kit) @@ -138,16 +154,19 @@ class PydSample(BaseModel, extra='allow'): def int_to_str(cls, value): return str(value) - def toSQL(self, submission=None): - result = None + def toSQL(self, submission:BasicSubmission|str=None) -> Tuple[BasicSample, Result]: + """ + Converts this instance into a backend.db.models.submissions.Sample object + + Args: + submission (BasicSubmission | str, optional): Submission joined to this sample. Defaults to None. + + Returns: + Tuple[BasicSample, Result]: Sample object and result object. + """ + report = None self.__dict__.update(self.model_extra) logger.debug(f"Here is the incoming sample dict: \n{self.__dict__}") - # instance = lookup_samples(ctx=ctx, submitter_id=self.submitter_id) - # instance = BasicSample.query(submitter_id=self.submitter_id) - # if instance == None: - # logger.debug(f"Sample {self.submitter_id} doesn't exist yet. Looking up sample object with polymorphic identity: {self.sample_type}") - # instance = BasicSample.find_polymorphic_subclass(polymorphic_identity=self.sample_type)() - # instance = BasicSample.query_or_create(**{k:v for k,v in self.__dict__.items() if k not in ['row', 'column']}) instance = BasicSample.query_or_create(sample_type=self.sample_type, submitter_id=self.submitter_id) for key, value in self.__dict__.items(): # logger.debug(f"Setting sample field {key} to {value}") @@ -161,13 +180,6 @@ class PydSample(BaseModel, extra='allow'): for row, column in zip(self.row, self.column): # logger.debug(f"Looking up association with identity: ({submission.submission_type_name} Association)") logger.debug(f"Looking up association with identity: ({assoc_type} Association)") - # association = lookup_submission_sample_association(ctx=ctx, submission=submission, row=row, column=column) - # association = SubmissionSampleAssociation.query(submission=submission, row=row, column=column) - # logger.debug(f"Returned association: {association}") - # if association == None or association == []: - # logger.debug(f"Looked up association at row {row}, column {column} didn't exist, creating new association.") - # association = SubmissionSampleAssociation.find_polymorphic_subclass(polymorphic_identity=f"{submission.submission_type_name} Association") - # association = association(submission=submission, sample=instance, row=row, column=column) association = SubmissionSampleAssociation.query_or_create(association_type=f"{assoc_type} Association", submission=submission, sample=instance, @@ -176,7 +188,7 @@ class PydSample(BaseModel, extra='allow'): instance.sample_submission_associations.append(association) except IntegrityError: instance.metadata.session.rollback() - return instance, result + return instance, report class PydSubmission(BaseModel, extra='allow'): filepath: Path @@ -185,7 +197,7 @@ class PydSubmission(BaseModel, extra='allow'): submitter_plate_num: dict|None = Field(default=dict(value=None, missing=True), validate_default=True) submitted_date: dict|None rsl_plate_num: dict|None = Field(default=dict(value=None, missing=True), validate_default=True) - # submitted_date: dict|None + submitted_date: dict|None submitting_lab: dict|None sample_count: dict|None extraction_kit: dict|None @@ -197,7 +209,7 @@ class PydSubmission(BaseModel, extra='allow'): @field_validator("submitter_plate_num") @classmethod def enforce_with_uuid(cls, value): - logger.debug(f"submitter plate id: {value}") + # logger.debug(f"submitter_plate_num coming into pydantic: {value}") if value['value'] == None or value['value'] == "None": return dict(value=uuid.uuid4().hex.upper(), missing=True) else: @@ -250,14 +262,6 @@ class PydSubmission(BaseModel, extra='allow'): logger.debug(f"RSL-plate initial value: {value['value']} and other values: {values.data}") sub_type = values.data['submission_type']['value'] if check_not_nan(value['value']): - # if lookup_submissions(ctx=values.data['ctx'], rsl_number=value['value']) == None: - # if BasicSubmission.query(rsl_number=value['value']) == None: - # return dict(value=value['value'], missing=False) - # else: - # logger.warning(f"Submission number {value} already exists in DB, attempting salvage with filepath") - # # output = RSLNamer(ctx=values.data['ctx'], instr=values.data['filepath'].__str__(), sub_type=sub_type).parsed_name - # output = RSLNamer(instr=values.data['filepath'].__str__(), sub_type=sub_type).parsed_name - # return dict(value=output, missing=True) return value else: output = RSLNamer(instr=values.data['filepath'].__str__(), sub_type=sub_type, data=values.data).parsed_name @@ -278,7 +282,6 @@ class PydSubmission(BaseModel, extra='allow'): return value else: return dict(value=convert_nans_to_nones(value['value']), missing=True) - return value @field_validator("sample_count", mode='before') @classmethod @@ -290,7 +293,6 @@ class PydSubmission(BaseModel, extra='allow'): @field_validator("extraction_kit", mode='before') @classmethod def rescue_kit(cls, value): - if check_not_nan(value): if isinstance(value, str): return dict(value=value, missing=False) @@ -305,6 +307,7 @@ class PydSubmission(BaseModel, extra='allow'): @field_validator("submission_type", mode='before') @classmethod def make_submission_type(cls, value, values): + logger.debug(f"Submission type coming into pydantic: {value}") if not isinstance(value, dict): value = {"value": value} if check_not_nan(value['value']): @@ -313,6 +316,12 @@ class PydSubmission(BaseModel, extra='allow'): else: return dict(value=RSLNamer(instr=values.data['filepath'].__str__()).submission_type.title(), missing=True) + @field_validator("submission_category", mode="before") + def create_category(cls, value): + if not isinstance(value, dict): + return dict(value=value, missing=True) + return value + @field_validator("submission_category") @classmethod def rescue_category(cls, value, values): @@ -321,6 +330,10 @@ class PydSubmission(BaseModel, extra='allow'): return value def handle_duplicate_samples(self): + """ + Collapses multiple samples with same submitter id into one with lists for rows, columns + TODO: Find out if this is really necessary + """ submitter_ids = list(set([sample.submitter_id for sample in self.samples])) output = [] for id in submitter_ids: @@ -336,7 +349,16 @@ class PydSubmission(BaseModel, extra='allow'): output.append(dummy) self.samples = output - def improved_dict(self, dictionaries:bool=True): + def improved_dict(self, dictionaries:bool=True) -> dict: + """ + Adds model_extra to fields. + + Args: + dictionaries (bool, optional): Are dictionaries expected as input? i.e. Should key['value'] be retrieved. Defaults to True. + + Returns: + dict: This instance as a dictionary + """ fields = list(self.model_fields.keys()) + list(self.model_extra.keys()) if dictionaries: output = {k:getattr(self, k) for k in fields} @@ -344,14 +366,25 @@ class PydSubmission(BaseModel, extra='allow'): output = {k:(getattr(self, k) if not isinstance(getattr(self, k), dict) else getattr(self, k)['value']) for k in fields} return output - def find_missing(self): + def find_missing(self) -> Tuple[dict, dict]: + """ + Retrieves info and reagents marked as missing. + + Returns: + Tuple[dict, dict]: Dict for missing info, dict for missing reagents. + """ info = {k:v for k,v in self.improved_dict().items() if isinstance(v, dict)} missing_info = {k:v for k,v in info.items() if v['missing']} missing_reagents = [reagent for reagent in self.reagents if reagent.missing] return missing_info, missing_reagents def toSQL(self) -> Tuple[BasicSubmission, Result]: - + """ + Converts this instance into a backend.db.models.submissions.BasicSubmission instance + + Returns: + Tuple[BasicSubmission, Result]: BasicSubmission instance, result object + """ self.__dict__.update(self.model_extra) instance, code, msg = BasicSubmission.query_or_create(submission_type=self.submission_type['value'], rsl_plate_num=self.rsl_plate_num['value']) result = Result(msg=msg, code=code) @@ -395,10 +428,42 @@ class PydSubmission(BaseModel, extra='allow'): return instance, result def toForm(self, parent:QWidget): + """ + Converts this instance into a frontend.widgets.submission_widget.SubmissionFormWidget + + Args: + parent (QWidget): parent widget of the constructed object + + Returns: + SubmissionFormWidget: Submission form widget + """ from frontend.widgets.submission_widget import SubmissionFormWidget return SubmissionFormWidget(parent=parent, **self.improved_dict()) - def autofill_excel(self, missing_only:bool=True): + def autofill_excel(self, missing_only:bool=True, backup:bool=False) -> Workbook: + """ + Fills in relevant information/reagent cells in an excel workbook. + + Args: + missing_only (bool, optional): Only fill missing items or all. Defaults to True. + backup (bool, optional): Do a full backup of the submission (adds samples). Defaults to False. + + Returns: + Workbook: Filled in workbook + """ + # open a new workbook using openpyxl + if self.filepath.stem.startswith("tmp"): + template = SubmissionType.query(name=self.submission_type['value']).template_file + workbook = load_workbook(BytesIO(template)) + missing_only = False + else: + try: + workbook = load_workbook(self.filepath) + except Exception as e: + logger.error(f"Couldn't open workbook due to {e}") + template = SubmissionType.query(name=self.submission_type).template_file + workbook = load_workbook(BytesIO(template)) + missing_only = False if missing_only: info, reagents = self.find_missing() else: @@ -442,8 +507,6 @@ class PydSubmission(BaseModel, extra='allow'): logger.error(f"Unable to fill in {k}, not found in relevant info.") logger.debug(f"New reagents: {new_reagents}") logger.debug(f"New info: {new_info}") - # open a new workbook using openpyxl - workbook = load_workbook(self.filepath) # get list of sheet names sheets = workbook.sheetnames # logger.debug(workbook.sheetnames) @@ -468,12 +531,48 @@ class PydSubmission(BaseModel, extra='allow'): logger.debug(f"Attempting: {item['type']} in row {item['location']['row']}, column {item['location']['column']}") worksheet.cell(row=item['location']['row'], column=item['location']['column'], value=item['value']) # Hacky way to pop in 'signed by' - # custom_parser = get_polymorphic_subclass(BasicSubmission, info['submission_type']) custom_parser = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type['value']) - workbook = custom_parser.custom_autofill(workbook) + workbook = custom_parser.custom_autofill(workbook, info=self.improved_dict(), backup=backup) + return workbook + + def autofill_samples(self, workbook:Workbook) -> Workbook: + """ + Fill in sample rows on the excel sheet + + Args: + workbook (Workbook): Input excel workbook + + Returns: + Workbook: Updated excel workbook + """ + sample_info = SubmissionType.query(name=self.submission_type['value']).info_map['samples'] + worksheet = workbook[sample_info["lookup_table"]['sheet']] + samples = sorted(self.samples, key=attrgetter('column', 'row')) + logger.debug(f"Samples: {samples}") + # Fail safe against multiple instances of the same sample + for iii, sample in enumerate(samples, start=1): + row = sample_info['lookup_table']['start_row'] + iii + fields = [field for field in list(sample.model_fields.keys()) + list(sample.model_extra.keys()) if field in sample_info['sample_columns'].keys()] + for field in fields: + column = sample_info['sample_columns'][field] + value = getattr(sample, field) + match value: + case list(): + value = value[0] + case _: + value = value + if field == "row": + value = row_map[value] + worksheet.cell(row=row, column=column, value=value) return workbook - def construct_filename(self): + def construct_filename(self) -> str: + """ + Creates filename for this instance + + Returns: + str: Output filename + """ env = jinja_template_loading() template = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type).filename_template() logger.debug(f"Using template string: {template}") @@ -483,12 +582,19 @@ class PydSubmission(BaseModel, extra='allow'): return render class PydContact(BaseModel): + name: str phone: str|None email: str|None - def toSQL(self): + def toSQL(self) -> Contact: + """ + Converts this instance into a backend.db.models.organization.Contact instance + + Returns: + Contact: Contact instance + """ return Contact(name=self.name, phone=self.phone, email=self.email) class PydOrganization(BaseModel): @@ -497,7 +603,13 @@ class PydOrganization(BaseModel): cost_centre: str contacts: List[PydContact]|None - def toSQL(self): + def toSQL(self) -> Organization: + """ + Converts this instance into a backend.db.models.organization.Organization instance. + + Returns: + Organization: Organization instance + """ instance = Organization() for field in self.model_fields: match field: @@ -522,7 +634,16 @@ class PydReagentType(BaseModel): return timedelta(days=value) return value - def toSQL(self, kit:KitType): + def toSQL(self, kit:KitType) -> ReagentType: + """ + Converts this instance into a backend.db.models.ReagentType instance + + Args: + kit (KitType): KitType joined to the reagenttype + + Returns: + ReagentType: ReagentType instance + """ # instance: ReagentType = lookup_reagent_types(ctx=ctx, name=self.name) instance: ReagentType = ReagentType.query(name=self.name) if instance == None: @@ -543,14 +664,21 @@ class PydKit(BaseModel): name: str reagent_types: List[PydReagentType] = [] - def toSQL(self): - result = dict(message=None, status='Information') + def toSQL(self) -> Tuple[KitType, Report]: + """ + Converts this instance into a backend.db.models.kits.KitType instance + + Returns: + Tuple[KitType, Report]: KitType instance and report of results. + """ + # result = dict(message=None, status='Information') + report = Report() # instance = lookup_kit_types(ctx=ctx, name=self.name) instance = KitType.query(name=self.name) if instance == None: instance = KitType(name=self.name) # instance.reagent_types = [item.toSQL(ctx, instance) for item in self.reagent_types] [item.toSQL(instance) for item in self.reagent_types] - return instance, result + return instance, report diff --git a/src/submissions/frontend/visualizations/barcode.py b/src/submissions/frontend/visualizations/barcode.py index 5f1b1e1..83470eb 100644 --- a/src/submissions/frontend/visualizations/barcode.py +++ b/src/submissions/frontend/visualizations/barcode.py @@ -4,5 +4,16 @@ from reportlab.lib.units import mm def make_plate_barcode(text:str, width:int=100, height:int=25) -> Drawing: + """ + Creates a barcode image for a given str. + + Args: + text (str): Input string + width (int, optional): Width (pixels) of image. Defaults to 100. + height (int, optional): Height (pixels) of image. Defaults to 25. + + Returns: + Drawing: image object + """ # return createBarcodeDrawing('Code128', value=text, width=200, height=50, humanReadable=True) return createBarcodeImageInMemory('Code128', value=text, width=width*mm, height=height*mm, humanReadable=True, format="png") \ No newline at end of file diff --git a/src/submissions/frontend/visualizations/control_charts.py b/src/submissions/frontend/visualizations/control_charts.py index 7c5a8be..cb9c1b9 100644 --- a/src/submissions/frontend/visualizations/control_charts.py +++ b/src/submissions/frontend/visualizations/control_charts.py @@ -4,25 +4,26 @@ Functions for constructing controls graphs using plotly. import plotly import plotly.express as px import pandas as pd -from pathlib import Path from plotly.graph_objects import Figure import logging from backend.excel import get_unique_values_in_df_column +from tools import Settings +from frontend.widgets.functions import select_save_file logger = logging.getLogger(f"submissions.{__name__}") -def create_charts(ctx:dict, df:pd.DataFrame, ytitle:str|None=None) -> Figure: +def create_charts(ctx:Settings, df:pd.DataFrame, ytitle:str|None=None) -> Figure: """ Constructs figures based on parsed pandas dataframe. Args: - settings (dict): settings passed down from gui + ctx (Settings): settings passed down from gui df (pd.DataFrame): input dataframe - group_name (str): controltype + ytitle (str | None, optional): title for the y-axis. Defaults to None. Returns: - Figure: plotly figure + Figure: Plotly figure """ from backend.excel import drop_reruns_from_df # converts starred genera to normal and splits off list of starred @@ -54,8 +55,6 @@ def create_charts(ctx:dict, df:pd.DataFrame, ytitle:str|None=None) -> Figure: fig = construct_chart(df=df, modes=modes, ytitle=ytitle) return fig - - def generic_figure_markers(fig:Figure, modes:list=[], ytitle:str|None=None) -> Figure: """ Adds standard layout to figure. @@ -63,6 +62,7 @@ def generic_figure_markers(fig:Figure, modes:list=[], ytitle:str|None=None) -> F Args: fig (Figure): Input figure. modes (list, optional): List of modes included in figure. Defaults to []. + ytitle (str, optional): Title for the y-axis. Defaults to None. Returns: Figure: Output figure with updated titles, rangeslider, buttons. @@ -102,7 +102,6 @@ def generic_figure_markers(fig:Figure, modes:list=[], ytitle:str|None=None) -> F assert type(fig) == Figure return fig - def make_buttons(modes:list, fig_len:int) -> list: """ Creates list of buttons with one for each mode to be used in showing/hiding mode traces. @@ -135,7 +134,7 @@ def make_buttons(modes:list, fig_len:int) -> list: )) return buttons -def output_figures(settings:dict, figs:list, group_name:str): +def output_figures(figs:list, group_name:str): """ Writes plotly figure to html file. @@ -144,21 +143,19 @@ def output_figures(settings:dict, figs:list, group_name:str): fig (Figure): input figure object group_name (str): controltype """ - with open(Path(settings['folder']['output']).joinpath(f'{group_name}.html'), "w") as f: + output = select_save_file(None, default_name=group_name, extension="html") + with open(output, "w") as f: for fig in figs: try: f.write(fig.to_html(full_html=False, include_plotlyjs='cdn')) except AttributeError: logger.error(f"The following figure was a string: {fig}") - - def construct_chart(df:pd.DataFrame, modes:list, ytitle:str|None=None) -> Figure: """ Creates a plotly chart for controls from a pandas dataframe Args: - ctx (dict): settings passed down from gui df (pd.DataFrame): input dataframe of controls modes (list): analysis modes to construct charts for ytitle (str | None, optional): title on the y-axis. Defaults to None. @@ -200,72 +197,69 @@ def construct_chart(df:pd.DataFrame, modes:list, ytitle:str|None=None) -> Figure # Below are the individual construction functions. They must be named "construct_{mode}_chart" and # take only json_in and mode to hook into the main processor. -def construct_refseq_chart(settings:dict, df:pd.DataFrame, group_name:str, mode:str) -> Figure: - """ - Constructs intial refseq chart for both contains and matches (depreciated). +# def construct_refseq_chart(df:pd.DataFrame, group_name:str, mode:str) -> Figure: +# """ +# Constructs intial refseq chart for both contains and matches (depreciated). - Args: - settings (dict): settings passed down from gui. - df (pd.DataFrame): dataframe containing all sample data for the group. - group_name (str): name of the group being processed. - mode (str): contains or matches, overwritten by hardcoding, so don't think about it too hard. +# Args: +# df (pd.DataFrame): dataframe containing all sample data for the group. +# group_name (str): name of the group being processed. +# mode (str): contains or matches, overwritten by hardcoding, so don't think about it too hard. - Returns: - Figure: initial figure with contains and matches traces. - """ - # This overwrites the mode from the signature, might get confusing. - fig = Figure() - modes = ['contains', 'matches'] - for ii, mode in enumerate(modes): - bar = px.bar(df, x="submitted_date", - y=f"{mode}_ratio", - color="target", - title=f"{group_name}_{mode}", - barmode='stack', - hover_data=["genus", "name", f"{mode}_hashes"], - text="genera" - ) - bar.update_traces(visible = ii == 0) - # Plotly express returns a full figure, so we have to use the data from that figure only. - fig.add_traces(bar.data) - # sys.exit(f"number of traces={len(fig.data)}") - return generic_figure_markers(fig=fig, modes=modes) +# Returns: +# Figure: initial figure with contains and matches traces. +# """ +# # This overwrites the mode from the signature, might get confusing. +# fig = Figure() +# modes = ['contains', 'matches'] +# for ii, mode in enumerate(modes): +# bar = px.bar(df, x="submitted_date", +# y=f"{mode}_ratio", +# color="target", +# title=f"{group_name}_{mode}", +# barmode='stack', +# hover_data=["genus", "name", f"{mode}_hashes"], +# text="genera" +# ) +# bar.update_traces(visible = ii == 0) +# # Plotly express returns a full figure, so we have to use the data from that figure only. +# fig.add_traces(bar.data) +# # sys.exit(f"number of traces={len(fig.data)}") +# return generic_figure_markers(fig=fig, modes=modes) +# def construct_kraken_chart(settings:dict, df:pd.DataFrame, group_name:str, mode:str) -> Figure: +# """ +# Constructs intial refseq chart for each mode in the kraken config settings. (depreciated) -def construct_kraken_chart(settings:dict, df:pd.DataFrame, group_name:str, mode:str) -> Figure: - """ - Constructs intial refseq chart for each mode in the kraken config settings. (depreciated) +# Args: +# settings (dict): settings passed down from click. +# df (pd.DataFrame): dataframe containing all sample data for the group. +# group_name (str): name of the group being processed. +# mode (str): kraken modes retrieved from config file by setup. - Args: - settings (dict): settings passed down from click. - df (pd.DataFrame): dataframe containing all sample data for the group. - group_name (str): name of the group being processed. - mode (str): kraken modes retrieved from config file by setup. - - Returns: - Figure: initial figure with traces for modes - """ - df[f'{mode}_count'] = pd.to_numeric(df[f'{mode}_count'],errors='coerce') - df = df.groupby('submitted_date')[f'{mode}_count'].nlargest(2) +# Returns: +# Figure: initial figure with traces for modes +# """ +# df[f'{mode}_count'] = pd.to_numeric(df[f'{mode}_count'],errors='coerce') +# df = df.groupby('submitted_date')[f'{mode}_count'].nlargest(2) - # The actual percentage from kraken was off due to exclusion of NaN, recalculating. - df[f'{mode}_percent'] = 100 * df[f'{mode}_count'] / df.groupby('submitted_date')[f'{mode}_count'].transform('sum') - modes = settings['modes'][mode] - # This overwrites the mode from the signature, might get confusing. - fig = Figure() - for ii, entry in enumerate(modes): - bar = px.bar(df, x="submitted_date", - y=entry, - color="genus", - title=f"{group_name}_{entry}", - barmode="stack", - hover_data=["genus", "name", "target"], - text="genera", - ) - bar.update_traces(visible = ii == 0) - fig.add_traces(bar.data) - return generic_figure_markers(fig=fig, modes=modes) - +# # The actual percentage from kraken was off due to exclusion of NaN, recalculating. +# df[f'{mode}_percent'] = 100 * df[f'{mode}_count'] / df.groupby('submitted_date')[f'{mode}_count'].transform('sum') +# modes = settings['modes'][mode] +# # This overwrites the mode from the signature, might get confusing. +# fig = Figure() +# for ii, entry in enumerate(modes): +# bar = px.bar(df, x="submitted_date", +# y=entry, +# color="genus", +# title=f"{group_name}_{entry}", +# barmode="stack", +# hover_data=["genus", "name", "target"], +# text="genera", +# ) +# bar.update_traces(visible = ii == 0) +# fig.add_traces(bar.data) +# return generic_figure_markers(fig=fig, modes=modes) def divide_chunks(input_list:list, chunk_count:int): """ @@ -281,7 +275,6 @@ def divide_chunks(input_list:list, chunk_count:int): k, m = divmod(len(input_list), chunk_count) return (input_list[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(chunk_count)) - def construct_html(figure:Figure) -> str: """ Creates final html code from plotly diff --git a/src/submissions/frontend/visualizations/plate_map.py b/src/submissions/frontend/visualizations/plate_map.py index 0a67e27..dec580f 100644 --- a/src/submissions/frontend/visualizations/plate_map.py +++ b/src/submissions/frontend/visualizations/plate_map.py @@ -84,14 +84,17 @@ def make_plate_map(sample_list:list) -> Image: return new_img def make_plate_map_html(sample_list:list, plate_rows:int=8, plate_columns=12) -> str: - try: - plate_num = sample_list[0]['plate_name'] - except IndexError as e: - logger.error(f"Couldn't get a plate number. Will not make plate.") - return None - except TypeError as e: - logger.error(f"No samples for this plate. Nothing to do.") - return None + """ + Constructs an html based plate map. + + Args: + sample_list (list): List of submission samples + plate_rows (int, optional): Number of rows in the plate. Defaults to 8. + plate_columns (int, optional): Number of columns in the plate. Defaults to 12. + + Returns: + str: html output string. + """ for sample in sample_list: if sample['positive']: sample['background_color'] = "#f10f07" @@ -108,4 +111,5 @@ def make_plate_map_html(sample_list:list, plate_rows:int=8, plate_columns=12) -> env = jinja_template_loading() template = env.get_template("plate_map.html") html = template.render(samples=output_samples, PLATE_ROWS=plate_rows, PLATE_COLUMNS=plate_columns) - return html \ No newline at end of file + return html + diff --git a/src/submissions/frontend/widgets/app.py b/src/submissions/frontend/widgets/app.py index 03495ec..cbcc2db 100644 --- a/src/submissions/frontend/widgets/app.py +++ b/src/submissions/frontend/widgets/app.py @@ -11,9 +11,6 @@ from PyQt6.QtWidgets import ( from PyQt6.QtGui import QAction from pathlib import Path from backend.validators import PydReagent -# from frontend.functions import ( -# add_kit_function, add_org_function, link_controls_function, export_csv_function -# ) from tools import check_if_app, Settings, Report from .pop_ups import AlertPop from .misc import AddReagentForm, LogParser @@ -149,17 +146,12 @@ class App(QMainWindow): webbrowser.get('windows-default').open(f"file://{url.__str__()}") def result_reporter(self): - # def result_reporter(self, result:TypedDict[]|None=None): """ Report any anomolous results - if any - to the user Args: result (dict | None, optional): The result from a function. Defaults to None. """ - # logger.info(f"We got the result: {result}") - # if result != None: - # msg = AlertPop(message=result['message'], status=result['status']) - # msg.exec() logger.debug(f"Running results reporter for: {self.report.results}") if len(self.report.results) > 0: logger.debug(f"We've got some results!") @@ -173,43 +165,6 @@ class App(QMainWindow): else: self.statusBar().showMessage("Action completed sucessfully.", 5000) - # def importSubmission(self, fname:Path|None=None): - # """ - # import submission from excel sheet into form - # """ - # # from .main_window_functions import import_submission_function - # self.raise_() - # self.activateWindow() - # self = import_submission_function(self, fname) - # logger.debug(f"Result from result reporter: {self.report.results}") - # self.result_reporter() - - # def kit_reload(self): - # """ - # Removes all reagents from form before running kit integrity completion. - # """ - # # from .main_window_functions import kit_reload_function - # self = kit_reload_function(self) - # self.result_reporter() - - # def kit_integrity_completion(self): - # """ - # Performs check of imported reagents - # NOTE: this will not change self.reagents which should be fine - # since it's only used when looking up - # """ - # # from .main_window_functions import kit_integrity_completion_function - # self = kit_integrity_completion_function(self) - # self.result_reporter() - - # def submit_new_sample(self): - # """ - # Attempt to add sample to database when 'submit' button clicked - # """ - # # from .main_window_functions import submit_new_sample_function - # self = submit_new_sample_function(self) - # self.result_reporter() - def add_reagent(self, reagent_lot:str|None=None, reagent_type:str|None=None, expiry:date|None=None, name:str|None=None): """ Action to create new reagent in DB. @@ -217,6 +172,8 @@ class App(QMainWindow): Args: reagent_lot (str | None, optional): Parsed reagent from import form. Defaults to None. reagent_type (str | None, optional): Parsed reagent type from import form. Defaults to None. + expiry (date | None, optional): Parsed reagent expiry data. Defaults to None. + name (str | None, optional): Parsed reagent name. Defaults to None. Returns: models.Reagent: the constructed reagent object to add to submission @@ -225,117 +182,20 @@ class App(QMainWindow): if isinstance(reagent_lot, bool): reagent_lot = "" # create form - dlg = AddReagentForm(ctx=self.ctx, reagent_lot=reagent_lot, reagent_type=reagent_type, expiry=expiry, reagent_name=name) + dlg = AddReagentForm(reagent_lot=reagent_lot, reagent_type=reagent_type, expiry=expiry, reagent_name=name) if dlg.exec(): # extract form info - # info = extract_form_info(dlg) info = dlg.parse_form() logger.debug(f"Reagent info: {info}") # create reagent object - # reagent = construct_reagent(ctx=self.ctx, info_dict=info) reagent = PydReagent(ctx=self.ctx, **info) # send reagent to db - # store_reagent(ctx=self.ctx, reagent=reagent) sqlobj, result = reagent.toSQL() sqlobj.save() - # result = store_object(ctx=self.ctx, object=reagent.toSQL()[0]) report.add_result(result) self.result_reporter() return reagent - # def generateReport(self): - # """ - # Action to create a summary of sheet data per client - # """ - # # from .main_window_functions import generate_report_function - # self, result = generate_report_function(self) - # self.result_reporter(result) - - # def add_kit(self): - # """ - # Constructs new kit from yaml and adds to DB. - # """ - # # from .main_window_functions import add_kit_function - # self, result = add_kit_function(self) - # self.result_reporter(result) - - # def add_org(self): - # """ - # Constructs new kit from yaml and adds to DB. - # """ - # # from .main_window_functions import add_org_function - # self, result = add_org_function(self) - # self.result_reporter(result) - - # def _controls_getter(self): - # """ - # Lookup controls from database and send to chartmaker - # """ - # # from .main_window_functions import controls_getter_function - # self = controls_getter_function(self) - # self.result_reporter() - - # def _chart_maker(self): - # """ - # Creates plotly charts for webview - # """ - # # from .main_window_functions import chart_maker_function - # self = chart_maker_function(self) - # self.result_reporter() - - # def linkControls(self): - # """ - # Adds controls pulled from irida to relevant submissions - # NOTE: Depreciated due to improvements in controls scraper. - # """ - # # from .main_window_functions import link_controls_function - # self, result = link_controls_function(self) - # self.result_reporter(result) - - # def linkExtractions(self): - # """ - # Links extraction logs from .csv files to relevant submissions. - # """ - # # from .main_window_functions import link_extractions_function - # self, result = link_extractions_function(self) - # self.result_reporter(result) - - # def linkPCR(self): - # """ - # Links PCR logs from .csv files to relevant submissions. - # """ - # # from .main_window_functions import link_pcr_function - # self, result = link_pcr_function(self) - # self.result_reporter(result) - - # def importPCRResults(self): - # """ - # Imports results exported from Design and Analysis .eds files - # """ - # # from .main_window_functions import import_pcr_results_function - # self, result = import_pcr_results_function(self) - # self.result_reporter(result) - - # def construct_first_strand(self): - # """ - # Converts first strand excel sheet to Biomek CSV - # """ - # from .main_window_functions import construct_first_strand_function - # self, result = construct_first_strand_function(self) - # self.result_reporter(result) - - # def scrape_reagents(self, *args, **kwargs): - # # from .main_window_functions import scrape_reagents - # logger.debug(f"Args: {args}") - # logger.debug(F"kwargs: {kwargs}") - # self = scrape_reagents(self, args[0]) - # self.kit_integrity_completion() - # self.result_reporter() - - # def export_csv(self, fname:Path|None=None): - # # from .main_window_functions import export_csv_function - # export_csv_function(self, fname) - def runSearch(self): dlg = LogParser(self) dlg.exec() @@ -377,32 +237,7 @@ class AddSubForm(QWidget): self.tab1.setLayout(self.tab1.layout) self.tab1.layout.addWidget(self.interior) self.tab1.layout.addWidget(self.sheetwidget) - # create widgets for tab 2 - # self.datepicker = ControlsDatePicker() - # self.webengineview = QWebEngineView() - # set tab2 layout self.tab2.layout = QVBoxLayout(self) - # self.control_typer = QComboBox() - # fetch types of controls - # con_types = get_all_Control_Types_names(ctx=parent.ctx) - # con_types = [item.name for item in lookup_control_types(ctx=parent.ctx)] - # con_types = [item.name for item in ControlType.query()] - # self.control_typer.addItems(con_types) - # create custom widget to get types of analysis - # self.mode_typer = QComboBox() - # mode_types = get_all_available_modes(ctx=parent.ctx) - # mode_types = lookup_modes(ctx=parent.ctx) - # mode_types = Control.get_modes() - # self.mode_typer.addItems(mode_types) - # create custom widget to get subtypes of analysis - # self.sub_typer = QComboBox() - # self.sub_typer.setEnabled(False) - # add widgets to tab2 layout - # self.tab2.layout.addWidget(self.datepicker) - # self.tab2.layout.addWidget(self.control_typer) - # self.tab2.layout.addWidget(self.mode_typer) - # self.tab2.layout.addWidget(self.sub_typer) - # self.tab2.layout.addWidget(self.webengineview) self.controls_viewer = ControlsViewer(self) self.tab2.layout.addWidget(self.controls_viewer) self.tab2.setLayout(self.tab2.layout) diff --git a/src/submissions/frontend/widgets/controls_chart.py b/src/submissions/frontend/widgets/controls_chart.py index fbba766..540ef5a 100644 --- a/src/submissions/frontend/widgets/controls_chart.py +++ b/src/submissions/frontend/widgets/controls_chart.py @@ -3,10 +3,10 @@ from PyQt6.QtWidgets import ( QWidget, QVBoxLayout, QComboBox, QHBoxLayout, QDateEdit, QLabel, QSizePolicy ) -from PyQt6.QtCore import QSignalBlocker +from PyQt6.QtCore import QSignalBlocker, QLoggingCategory from backend.db import ControlType, Control, get_control_subtypes from PyQt6.QtCore import QDate, QSize -import logging +import logging, sys from tools import Report, Result from backend.excel.reports import convert_data_list_to_df from frontend.visualizations.control_charts import create_charts, construct_html @@ -26,14 +26,10 @@ class ControlsViewer(QWidget): self.layout = QVBoxLayout(self) self.control_typer = QComboBox() # fetch types of controls - # con_types = get_all_Control_Types_names(ctx=parent.ctx) - # con_types = [item.name for item in lookup_control_types(ctx=parent.ctx)] con_types = [item.name for item in ControlType.query()] self.control_typer.addItems(con_types) # create custom widget to get types of analysis self.mode_typer = QComboBox() - # mode_types = get_all_available_modes(ctx=parent.ctx) - # mode_types = lookup_modes(ctx=parent.ctx) mode_types = Control.get_modes() self.mode_typer.addItems(mode_types) # create custom widget to get subtypes of analysis @@ -56,27 +52,17 @@ class ControlsViewer(QWidget): """ Lookup controls from database and send to chartmaker """ - # from .main_window_functions import controls_getter_function self.controls_getter_function() - # self.result_reporter() def chart_maker(self): """ Creates plotly charts for webview """ - # from .main_window_functions import chart_maker_function self.chart_maker_function() - # self.result_reporter() def controls_getter_function(self): """ Get controls based on start/end dates - - Args: - obj (QMainWindow): original app window - - Returns: - Tuple[QMainWindow, dict]: Collection of new main app window and result dict """ report = Report() # subtype defaults to disabled @@ -136,8 +122,6 @@ class ControlsViewer(QWidget): self.subtype = self.sub_typer.currentText() logger.debug(f"Subtype: {self.subtype}") # query all controls using the type/start and end dates from the gui - # controls = get_all_controls_by_type(ctx=obj.ctx, con_type=obj.con_type, start_date=obj.start_date, end_date=obj.end_date) - # controls = lookup_controls(ctx=obj.ctx, control_type=obj.con_type, start_date=obj.start_date, end_date=obj.end_date) controls = Control.query(control_type=self.con_type, start_date=self.start_date, end_date=self.end_date) # if no data found from query set fig to none for reporting in webview if controls == None: @@ -174,7 +158,6 @@ class ControlsDatePicker(QWidget): """ def __init__(self) -> None: super().__init__() - self.start_date = QDateEdit(calendarPopup=True) # start date is two months prior to end date by default twomonthsago = QDate.currentDate().addDays(-60) diff --git a/src/submissions/frontend/widgets/functions.py b/src/submissions/frontend/widgets/functions.py index 871efa8..e2b16b0 100644 --- a/src/submissions/frontend/widgets/functions.py +++ b/src/submissions/frontend/widgets/functions.py @@ -20,14 +20,12 @@ def select_open_file(obj:QMainWindow, file_extension:str) -> Path: Path: Path of file to be opened """ try: - # home_dir = Path(obj.ctx.directory_path).resolve().__str__() home_dir = obj.last_dir.resolve().__str__() except FileNotFoundError: home_dir = Path.home().resolve().__str__() except AttributeError: home_dir = obj.app.last_dir.resolve().__str__() fname = Path(QFileDialog.getOpenFileName(obj, 'Open file', home_dir, filter = f"{file_extension}(*.{file_extension})")[0]) - # fname = Path(QFileDialog.getOpenFileName(obj, 'Open file', filter = f"{file_extension}(*.{file_extension})")[0]) obj.last_dir = fname.parent return fname @@ -44,13 +42,11 @@ def select_save_file(obj:QMainWindow, default_name:str, extension:str) -> Path: Path: Path of file to be opened """ try: - # home_dir = Path(obj.ctx.directory_path).joinpath(default_name).resolve().__str__() home_dir = obj.last_dir.joinpath(default_name).resolve().__str__() except FileNotFoundError: home_dir = Path.home().joinpath(default_name).resolve().__str__() except AttributeError: home_dir = obj.app.last_dir.joinpath(default_name).resolve().__str__() fname = Path(QFileDialog.getSaveFileName(obj, "Save File", home_dir, filter = f"{extension}(*.{extension})")[0]) - # fname = Path(QFileDialog.getSaveFileName(obj, "Save File", filter = f"{extension}(*.{extension})")[0]) obj.last_dir = fname.parent return fname \ No newline at end of file diff --git a/src/submissions/frontend/widgets/kit_creator.py b/src/submissions/frontend/widgets/kit_creator.py index 6fa4e3a..f2d60c3 100644 --- a/src/submissions/frontend/widgets/kit_creator.py +++ b/src/submissions/frontend/widgets/kit_creator.py @@ -9,7 +9,7 @@ from backend.db import SubmissionTypeKitTypeAssociation, SubmissionType, Reagent from backend.validators import PydReagentType, PydKit import logging from pprint import pformat -from tools import Report, Result +from tools import Report from typing import Tuple logger = logging.getLogger(f"submissions.{__name__}") @@ -21,7 +21,6 @@ class KitAdder(QWidget): """ def __init__(self, parent) -> None: super().__init__(parent) - # self.ctx = parent_ctx self.report = Report() self.app = parent.parent main_box = QVBoxLayout(self) @@ -30,7 +29,6 @@ class KitAdder(QWidget): scroll.setWidgetResizable(True) scrollContent = QWidget(scroll) self.grid = QGridLayout() - # self.setLayout(self.grid) scrollContent.setLayout(self.grid) # insert submit button at top self.submit_btn = QPushButton("Submit") @@ -45,7 +43,6 @@ class KitAdder(QWidget): used_for = QComboBox() used_for.setObjectName("used_for") # Insert all existing sample types - # used_for.addItems([item.name for item in lookup_submission_type(ctx=parent_ctx)]) used_for.addItems([item.name for item in SubmissionType.query()]) used_for.setEditable(True) self.grid.addWidget(used_for,3,1) @@ -97,7 +94,6 @@ class KitAdder(QWidget): report = Report() # get form info info, reagents = self.parse_form() - # info, reagents = extract_form_info(self) info = {k:v for k,v in info.items() if k in [column.name for column in self.columns] + ['kit_name', 'used_for']} logger.debug(f"kit info: {pformat(info)}") logger.debug(f"kit reagents: {pformat(reagents)}") @@ -115,7 +111,6 @@ class KitAdder(QWidget): }} kit.reagent_types.append(PydReagentType(name=reagent['rtname'], eol_ext=reagent['eol'], uses=uses)) logger.debug(f"Output pyd object: {kit.__dict__}") - # result = construct_kit_from_yaml(ctx=self.ctx, kit_dict=info) sqlobj, result = kit.toSQL(self.ctx) report.add_result(result=result) sqlobj.save() @@ -153,10 +148,9 @@ class ReagentTypeForm(QWidget): self.reagent_getter = QComboBox() self.reagent_getter.setObjectName("rtname") # lookup all reagent type names from db - # lookup = lookup_reagent_types(ctx=ctx) lookup = ReagentType.query() logger.debug(f"Looked up ReagentType names: {lookup}") - self.reagent_getter.addItems([item.__str__() for item in lookup]) + self.reagent_getter.addItems([item.name for item in lookup]) self.reagent_getter.setEditable(True) grid.addWidget(self.reagent_getter,0,1) grid.addWidget(QLabel("Extension of Life (months):"),0,2) @@ -221,3 +215,4 @@ class ReagentTypeForm(QWidget): logger.debug(f"Adding key {key}, {sub_key} and value {widget.value()} to {info}") info[key][sub_key] = widget.value() return info + diff --git a/src/submissions/frontend/widgets/misc.py b/src/submissions/frontend/widgets/misc.py index 9ceb2ab..fa40859 100644 --- a/src/submissions/frontend/widgets/misc.py +++ b/src/submissions/frontend/widgets/misc.py @@ -13,7 +13,7 @@ from backend.db.models import * import logging from .pop_ups import AlertPop from .functions import select_open_file -from tools import readInChunks +from tools import readInChunks, Settings logger = logging.getLogger(f"submissions.{__name__}") @@ -23,9 +23,9 @@ class AddReagentForm(QDialog): """ dialog to add gather info about new reagent """ - def __init__(self, ctx:dict, reagent_lot:str|None=None, reagent_type:str|None=None, expiry:date|None=None, reagent_name:str|None=None) -> None: + def __init__(self, reagent_lot:str|None=None, reagent_type:str|None=None, expiry:date|None=None, reagent_name:str|None=None) -> None: super().__init__() - self.ctx = ctx + # self.ctx = ctx if reagent_lot == None: reagent_lot = reagent_type @@ -81,7 +81,13 @@ class AddReagentForm(QDialog): self.setLayout(self.layout) self.type_input.currentTextChanged.connect(self.update_names) - def parse_form(self): + def parse_form(self) -> dict: + """ + Converts information in form to dict. + + Returns: + dict: Output info + """ return dict(name=self.name_input.currentText(), lot=self.lot_input.text(), expiry=self.exp_input.date().toPyDate(), @@ -93,7 +99,6 @@ class AddReagentForm(QDialog): """ logger.debug(self.type_input.currentText()) self.name_input.clear() - # lookup = lookup_reagents(ctx=self.ctx, reagent_type=self.type_input.currentText()) lookup = Reagent.query(reagent_type=self.type_input.currentText()) self.name_input.addItems(list(set([item.name for item in lookup]))) @@ -103,7 +108,6 @@ class ReportDatePicker(QDialog): """ def __init__(self) -> None: super().__init__() - self.setWindowTitle("Select Report Date Range") # make confirm/reject buttons QBtn = QDialogButtonBox.StandardButton.Ok | QDialogButtonBox.StandardButton.Cancel @@ -125,7 +129,13 @@ class ReportDatePicker(QDialog): self.layout.addWidget(self.buttonBox) self.setLayout(self.layout) - def parse_form(self): + def parse_form(self) -> dict: + """ + Converts information in this object to a dict + + Returns: + dict: output dict. + """ return dict(start_date=self.start_date.date().toPyDate(), end_date = self.end_date.date().toPyDate()) class FirstStrandSalvage(QDialog): @@ -162,35 +172,6 @@ class FirstStrandSalvage(QDialog): def parse_form(self): return dict(plate=self.rsl_plate_num.text(), submitter_id=self.submitter_id_input.text(), well=f"{self.row_letter.currentText()}{self.column_number.currentText()}") -class FirstStrandPlateList(QDialog): - - def __init__(self, ctx:Settings) -> None: - super().__init__() - self.setWindowTitle("First Strand Plates") - - QBtn = QDialogButtonBox.StandardButton.Ok | QDialogButtonBox.StandardButton.Cancel - - self.buttonBox = QDialogButtonBox(QBtn) - self.buttonBox.accepted.connect(self.accept) - self.buttonBox.rejected.connect(self.reject) - # ww = [item.rsl_plate_num for item in lookup_submissions(ctx=ctx, submission_type="Wastewater")] - ww = [item.rsl_plate_num for item in BasicSubmission.query(submission_type="Wastewater")] - self.plate1 = QComboBox() - self.plate2 = QComboBox() - self.plate3 = QComboBox() - self.layout = QFormLayout() - for ii, plate in enumerate([self.plate1, self.plate2, self.plate3]): - plate.addItems(ww) - self.layout.addRow(self.tr(f"&Plate {ii+1}:"), plate) - self.layout.addWidget(self.buttonBox) - self.setLayout(self.layout) - - def parse_form(self): - output = [] - for plate in [self.plate1, self.plate2, self.plate3]: - output.append(plate.currentText()) - return output - class LogParser(QDialog): def __init__(self, parent): diff --git a/src/submissions/frontend/widgets/pop_ups.py b/src/submissions/frontend/widgets/pop_ups.py index 7df5cd0..ac6c7d6 100644 --- a/src/submissions/frontend/widgets/pop_ups.py +++ b/src/submissions/frontend/widgets/pop_ups.py @@ -53,7 +53,6 @@ class KitSelector(QDialog): super().__init__() self.setWindowTitle(title) self.widget = QComboBox() - # kits = [item.__str__() for item in lookup_kit_types(ctx=ctx)] kits = [item.__str__() for item in KitType.query()] self.widget.addItems(kits) self.widget.setEditable(False) diff --git a/src/submissions/frontend/widgets/submission_table.py b/src/submissions/frontend/widgets/submission_table.py index 1acccf4..78b0ae7 100644 --- a/src/submissions/frontend/widgets/submission_table.py +++ b/src/submissions/frontend/widgets/submission_table.py @@ -1,15 +1,15 @@ ''' Contains widgets specific to the submission summary and submission details. ''' -import base64 +import base64, logging, json from datetime import datetime from io import BytesIO -import pprint +from pprint import pformat from PyQt6 import QtPrintSupport from PyQt6.QtWidgets import ( QVBoxLayout, QDialog, QTableView, QTextEdit, QPushButton, QScrollArea, - QMessageBox, QFileDialog, QMenu, QLabel, + QMessageBox, QMenu, QLabel, QDialogButtonBox, QToolBar ) from PyQt6.QtWebEngineWidgets import QWebEngineView @@ -17,19 +17,16 @@ from PyQt6.QtCore import Qt, QAbstractTableModel, QSortFilterProxyModel from PyQt6.QtGui import QAction, QCursor, QPixmap, QPainter from backend.db.functions import submissions_to_df from backend.db.models import BasicSubmission -from backend.excel import make_hitpicks, make_report_html, make_report_xlsx -from tools import check_if_app, Report, Result -from tools import jinja_template_loading +from backend.excel import make_report_html, make_report_xlsx +from tools import check_if_app, Report, Result, jinja_template_loading, get_first_blank_df_row, row_map from xhtml2pdf import pisa -from pathlib import Path -import logging -from .pop_ups import QuestionAsker, AlertPop +from .pop_ups import QuestionAsker from ..visualizations import make_plate_barcode, make_plate_map, make_plate_map_html from .functions import select_save_file, select_open_file from .misc import ReportDatePicker import pandas as pd +from openpyxl.worksheet.worksheet import Worksheet from getpass import getuser -import json logger = logging.getLogger(f"submissions.{__name__}") @@ -161,16 +158,19 @@ class SubmissionsSheet(QTableView): detailsAction = QAction('Details', self) # barcodeAction = QAction("Print Barcode", self) commentAction = QAction("Add Comment", self) + backupAction = QAction("Backup", self) # hitpickAction = QAction("Hitpicks", self) renameAction.triggered.connect(lambda: self.delete_item(event)) detailsAction.triggered.connect(lambda: self.show_details()) # barcodeAction.triggered.connect(lambda: self.create_barcode()) commentAction.triggered.connect(lambda: self.add_comment()) + backupAction.triggered.connect(lambda: self.regenerate_submission_form()) # hitpickAction.triggered.connect(lambda: self.hit_pick()) self.menu.addAction(detailsAction) self.menu.addAction(renameAction) # self.menu.addAction(barcodeAction) self.menu.addAction(commentAction) + self.menu.addAction(backupAction) # self.menu.addAction(hitpickAction) # add other required actions self.menu.popup(QCursor.pos()) @@ -193,64 +193,64 @@ class SubmissionsSheet(QTableView): return self.setData() - def hit_pick(self): - """ - Extract positive samples from submissions with PCR results and export to csv. - NOTE: For this to work for arbitrary samples, positive samples must have 'positive' in their name - """ - # Get all selected rows - indices = self.selectionModel().selectedIndexes() - # convert to id numbers - indices = [index.sibling(index.row(), 0).data() for index in indices] - # biomek can handle 4 plates maximum - if len(indices) > 4: - logger.error(f"Error: Had to truncate number of plates to 4.") - indices = indices[:4] - # lookup ids in the database - # subs = [lookup_submissions(ctx=self.ctx, id=id) for id in indices] - subs = [BasicSubmission.query(id=id) for id in indices] - # full list of samples - dicto = [] - # list to contain plate images - images = [] - for iii, sub in enumerate(subs): - # second check to make sure there aren't too many plates - if iii > 3: - logger.error(f"Error: Had to truncate number of plates to 4.") - continue - plate_dicto = sub.hitpick_plate(plate_number=iii+1) - if plate_dicto == None: - continue - image = make_plate_map(plate_dicto) - images.append(image) - for item in plate_dicto: - if len(dicto) < 94: - dicto.append(item) - else: - logger.error(f"We had to truncate the number of samples to 94.") - logger.debug(f"We found {len(dicto)} to hitpick") - # convert all samples to dataframe - df = make_hitpicks(dicto) - df = df[df.positive != False] - logger.debug(f"Size of the dataframe: {df.shape[0]}") - msg = AlertPop(message=f"We found {df.shape[0]} samples to hitpick", status="INFORMATION") - msg.exec() - if df.size == 0: - return - date = datetime.strftime(datetime.today(), "%Y-%m-%d") - # ask for filename and save as csv. - home_dir = Path(self.ctx.directory_path).joinpath(f"Hitpicks_{date}.csv").resolve().__str__() - fname = Path(QFileDialog.getSaveFileName(self, "Save File", home_dir, filter=".csv")[0]) - if fname.__str__() == ".": - logger.debug("Saving csv was cancelled.") - return - df.to_csv(fname.__str__(), index=False) - # show plate maps - for image in images: - try: - image.show() - except Exception as e: - logger.error(f"Could not show image: {e}.") + # def hit_pick(self): + # """ + # Extract positive samples from submissions with PCR results and export to csv. + # NOTE: For this to work for arbitrary samples, positive samples must have 'positive' in their name + # """ + # # Get all selected rows + # indices = self.selectionModel().selectedIndexes() + # # convert to id numbers + # indices = [index.sibling(index.row(), 0).data() for index in indices] + # # biomek can handle 4 plates maximum + # if len(indices) > 4: + # logger.error(f"Error: Had to truncate number of plates to 4.") + # indices = indices[:4] + # # lookup ids in the database + # # subs = [lookup_submissions(ctx=self.ctx, id=id) for id in indices] + # subs = [BasicSubmission.query(id=id) for id in indices] + # # full list of samples + # dicto = [] + # # list to contain plate images + # images = [] + # for iii, sub in enumerate(subs): + # # second check to make sure there aren't too many plates + # if iii > 3: + # logger.error(f"Error: Had to truncate number of plates to 4.") + # continue + # plate_dicto = sub.hitpick_plate(plate_number=iii+1) + # if plate_dicto == None: + # continue + # image = make_plate_map(plate_dicto) + # images.append(image) + # for item in plate_dicto: + # if len(dicto) < 94: + # dicto.append(item) + # else: + # logger.error(f"We had to truncate the number of samples to 94.") + # logger.debug(f"We found {len(dicto)} to hitpick") + # # convert all samples to dataframe + # df = make_hitpicks(dicto) + # df = df[df.positive != False] + # logger.debug(f"Size of the dataframe: {df.shape[0]}") + # msg = AlertPop(message=f"We found {df.shape[0]} samples to hitpick", status="INFORMATION") + # msg.exec() + # if df.size == 0: + # return + # date = datetime.strftime(datetime.today(), "%Y-%m-%d") + # # ask for filename and save as csv. + # home_dir = Path(self.ctx.directory_path).joinpath(f"Hitpicks_{date}.csv").resolve().__str__() + # fname = Path(QFileDialog.getSaveFileName(self, "Save File", home_dir, filter=".csv")[0]) + # if fname.__str__() == ".": + # logger.debug("Saving csv was cancelled.") + # return + # df.to_csv(fname.__str__(), index=False) + # # show plate maps + # for image in images: + # try: + # image.show() + # except Exception as e: + # logger.error(f"Could not show image: {e}.") def link_extractions(self): self.link_extractions_function() @@ -420,6 +420,7 @@ class SubmissionsSheet(QTableView): subs = BasicSubmission.query(start_date=info['start_date'], end_date=info['end_date']) # convert each object to dict records = [item.report_dict() for item in subs] + logger.debug(f"Records: {pformat(records)}") # make dataframe from record dictionaries detailed_df, summary_df = make_report_xlsx(records=records) html = make_report_html(df=summary_df, start_date=info['start_date'], end_date=info['end_date']) @@ -430,23 +431,42 @@ class SubmissionsSheet(QTableView): writer = pd.ExcelWriter(fname.with_suffix(".xlsx"), engine='openpyxl') summary_df.to_excel(writer, sheet_name="Report") detailed_df.to_excel(writer, sheet_name="Details", index=False) - worksheet = writer.sheets['Report'] - for idx, col in enumerate(summary_df): # loop through all columns + worksheet: Worksheet = writer.sheets['Report'] + for idx, col in enumerate(summary_df, start=1): # loop through all columns series = summary_df[col] max_len = max(( series.astype(str).map(len).max(), # len of largest item len(str(series.name)) # len of column name/header )) + 20 # adding a little extra space try: - worksheet.column_dimensions[get_column_letter(idx)].width = max_len + # worksheet.column_dimensions[get_column_letter(idx=idx)].width = max_len + # Convert idx to letter + col_letter = chr(ord('@') + idx) + worksheet.column_dimensions[col_letter].width = max_len except ValueError: pass + blank_row = get_first_blank_df_row(summary_df) + 1 + logger.debug(f"Blank row index = {blank_row}") + for col in range(3,6): + col_letter = row_map[col] + worksheet.cell(row=blank_row, column=col, value=f"=SUM({col_letter}2:{col_letter}{str(blank_row-1)})") for cell in worksheet['D']: if cell.row > 1: cell.style = 'Currency' writer.close() self.report.add_result(report) - + + def regenerate_submission_form(self): + index = (self.selectionModel().currentIndex()) + value = index.sibling(index.row(),0).data() + logger.debug(index) + # msg = QuestionAsker(title="Delete?", message=f"Are you sure you want to delete {index.sibling(index.row(),1).data()}?\n") + # if msg.exec(): + # delete_submission(id=value) + sub = BasicSubmission.query(id=value) + fname = select_save_file(self, default_name=sub.to_pydantic().construct_filename(), extension="xlsx") + sub.backup(fname=fname) + class SubmissionDetails(QDialog): """ a window showing text details of submission @@ -466,7 +486,7 @@ class SubmissionDetails(QDialog): # get submision from db # sub = lookup_submissions(ctx=ctx, id=id) sub = BasicSubmission.query(id=id) - logger.debug(f"Submission details data:\n{pprint.pformat(sub.to_dict())}") + logger.debug(f"Submission details data:\n{pformat(sub.to_dict())}") self.base_dict = sub.to_dict(full_data=True) # don't want id del self.base_dict['id'] @@ -611,8 +631,11 @@ class SubmissionComment(QDialog): super().__init__(parent) # self.ctx = ctx - self.app = parent.parent().parent().parent().parent().parent().parent - print(f"App: {self.app}") + try: + self.app = parent.parent().parent().parent().parent().parent().parent + print(f"App: {self.app}") + except AttributeError: + pass self.rsl = rsl self.setWindowTitle(f"{self.rsl} Submission Comment") # create text field diff --git a/src/submissions/frontend/widgets/submission_widget.py b/src/submissions/frontend/widgets/submission_widget.py index 5159684..1542c45 100644 --- a/src/submissions/frontend/widgets/submission_widget.py +++ b/src/submissions/frontend/widgets/submission_widget.py @@ -65,9 +65,6 @@ class SubmissionFormContainer(QWidget): self.app.result_reporter() def scrape_reagents(self, *args, **kwargs): - # from .main_window_functions import scrape_reagents - # logger.debug(f"Args: {args}") - # logger.debug(F"kwargs: {kwargs}") print(f"\n\n{inspect.stack()[1].function}\n\n") self.scrape_reagents_function(args[0]) self.kit_integrity_completion() @@ -140,7 +137,7 @@ class SubmissionFormContainer(QWidget): return # create sheetparser using excel sheet and context from gui try: - self.prsr = SheetParser(ctx=self.ctx, filepath=fname) + self.prsr = SheetParser(ctx=self.app.ctx, filepath=fname) except PermissionError: logger.error(f"Couldn't get permission to access file: {fname}") return @@ -519,7 +516,7 @@ class SubmissionFormWidget(QWidget): case 'submitting_lab': add_widget = QComboBox() # lookup organizations suitable for submitting_lab (ctx: self.InfoItem.SubmissionFormWidget.SubmissionFormContainer.AddSubForm ) - labs = [item.__str__() for item in Organization.query()] + labs = [item.name for item in Organization.query()] # try to set closest match to top of list try: labs = difflib.get_close_matches(value, labs, len(labs), 0) @@ -536,7 +533,7 @@ class SubmissionFormWidget(QWidget): add_widget = QComboBox() # lookup existing kits by 'submission_type' decided on by sheetparser logger.debug(f"Looking up kits used for {submission_type}") - uses = [item.__str__() for item in KitType.query(used_for=submission_type)] + uses = [item.name for item in KitType.query(used_for=submission_type)] obj.uses = uses logger.debug(f"Kits received for {submission_type}: {uses}") if check_not_nan(value): @@ -616,6 +613,8 @@ class ReagentFormWidget(QWidget): def __init__(self, parent:QWidget, reagent:PydReagent, extraction_kit:str): super().__init__(parent) # self.setParent(parent) + self.app = self.parent().parent().parent().parent().parent().parent().parent().parent() + self.reagent = reagent self.extraction_kit = extraction_kit # self.ctx = reagent.ctx @@ -640,7 +639,8 @@ class ReagentFormWidget(QWidget): if wanted_reagent == None: dlg = QuestionAsker(title=f"Add {lot}?", message=f"Couldn't find reagent type {self.reagent.type}: {lot} in the database.\n\nWould you like to add it?") if dlg.exec(): - wanted_reagent = self.parent().parent().parent().parent().parent().parent().parent().parent().parent.add_reagent(reagent_lot=lot, reagent_type=self.reagent.type, expiry=self.reagent.expiry, name=self.reagent.name) + print(self.app) + wanted_reagent = self.app.add_reagent(reagent_lot=lot, reagent_type=self.reagent.type, expiry=self.reagent.expiry, name=self.reagent.name) return wanted_reagent, None else: # In this case we will have an empty reagent and the submission will fail kit integrity check @@ -690,7 +690,7 @@ class ReagentFormWidget(QWidget): # below was lookup_reagent_by_type_name_and_kit_name, but I couldn't get it to work. # lookup = lookup_reagents(ctx=self.ctx, reagent_type=reagent.type) lookup = Reagent.query(reagent_type=reagent.type) - relevant_reagents = [item.__str__() for item in lookup] + relevant_reagents = [str(item.lot) for item in lookup] output_reg = [] for rel_reagent in relevant_reagents: # extract strings from any sets. diff --git a/src/submissions/templates/submission_details.html b/src/submissions/templates/submission_details.html index 3e603b5..40f4515 100644 --- a/src/submissions/templates/submission_details.html +++ b/src/submissions/templates/submission_details.html @@ -35,7 +35,7 @@ Submission Details for {{ sub['Plate Number'] }} - {% set excluded = ['reagents', 'samples', 'controls', 'ext_info', 'pcr_info', 'comments', 'barcode', 'platemap', 'export_map'] %} + {% set excluded = ['reagents', 'samples', 'controls', 'extraction_info', 'pcr_info', 'comment', 'barcode', 'platemap', 'export_map'] %}

Submission Details for {{ sub['Plate Number'] }}

   {% if sub['barcode'] %}{% endif %}

{% for key, value in sub.items() if key not in excluded %} diff --git a/src/submissions/templates/summary_report.html b/src/submissions/templates/summary_report.html index 2d91cdb..713c468 100644 --- a/src/submissions/templates/summary_report.html +++ b/src/submissions/templates/summary_report.html @@ -12,10 +12,10 @@

{{ lab['lab'] }}:

{% for kit in lab['kits'] %}

{{ kit['name'] }}

-

Runs: {{ kit['plate_count'] }}, Samples: {{ kit['sample_count'] }}, Cost: {{ "${:,.2f}".format(kit['cost']) }}

+

Runs: {{ kit['run_count'] }}, Samples: {{ kit['sample_count'] }}, Cost: {{ "${:,.2f}".format(kit['cost']) }}

{% endfor %}

Lab total:

-

Runs: {{ lab['total_plates'] }}, Samples: {{ lab['total_samples'] }}, Cost: {{ "${:,.2f}".format(lab['total_cost']) }}

+

Runs: {{ lab['total_runs'] }}, Samples: {{ lab['total_samples'] }}, Cost: {{ "${:,.2f}".format(lab['total_cost']) }}


{% endfor %} diff --git a/src/submissions/tools.py b/src/submissions/tools.py index 37d1e49..971bb2f 100644 --- a/src/submissions/tools.py +++ b/src/submissions/tools.py @@ -13,13 +13,14 @@ import sys, os, stat, platform, getpass import logging from logging import handlers from pathlib import Path -from sqlalchemy.orm import Session, declarative_base, DeclarativeMeta, Query +from sqlalchemy.orm import Query, Session from sqlalchemy import create_engine from pydantic import field_validator, BaseModel, Field from pydantic_settings import BaseSettings, SettingsConfigDict from typing import Any, Tuple, Literal, List import inspect + logger = logging.getLogger(f"submissions.{__name__}") package_dir = Path(__file__).parents[2].resolve() @@ -39,9 +40,6 @@ LOGDIR = main_aux_dir.joinpath("logs") row_map = {1:"A", 2:"B", 3:"C", 4:"D", 5:"E", 6:"F", 7:"G", 8:"H"} -Base: DeclarativeMeta = declarative_base() -metadata = Base.metadata - def check_not_nan(cell_contents) -> bool: """ Check to ensure excel sheet cell contents are not blank. @@ -106,12 +104,12 @@ def check_regex_match(pattern:str, check:str) -> bool: except TypeError: return False -def massage_common_reagents(reagent_name:str): - logger.debug(f"Attempting to massage {reagent_name}") - if reagent_name.endswith("water") or "H2O" in reagent_name.upper(): - reagent_name = "molecular_grade_water" - reagent_name = reagent_name.replace("µ", "u") - return reagent_name +# def massage_common_reagents(reagent_name:str): +# logger.debug(f"Attempting to massage {reagent_name}") +# if reagent_name.endswith("water") or "H2O" in reagent_name.upper(): +# reagent_name = "molecular_grade_water" +# reagent_name = reagent_name.replace("µ", "u") +# return reagent_name class GroupWriteRotatingFileHandler(handlers.RotatingFileHandler): @@ -170,7 +168,7 @@ class Settings(BaseSettings): def set_backup_path(cls, value): if isinstance(value, str): value = Path(value) - metadata.backup_path = value + # metadata.backup_path = value return value @field_validator('directory_path', mode="before") @@ -180,7 +178,7 @@ class Settings(BaseSettings): value = Path(value) if not value.exists(): value = Path().home() - metadata.directory_path = value + # metadata.directory_path = value return value @field_validator('database_path', mode="before") @@ -223,7 +221,7 @@ class Settings(BaseSettings): logger.debug(f"Using {database_path} for database file.") engine = create_engine(f"sqlite:///{database_path}")#, echo=True, future=True) session = Session(engine) - metadata.session = session + # metadata.session = session return session @field_validator('package', mode="before") @@ -239,6 +237,7 @@ def get_config(settings_path: Path|str|None=None) -> Settings: Args: settings_path (Path | str | None, optional): Path to config.yml Defaults to None. + override (dict | None, optional): dictionary of settings to be used instead of file. Defaults to None. Returns: Settings: Pydantic settings object @@ -273,7 +272,6 @@ def get_config(settings_path: Path|str|None=None) -> Settings: settings_path = Path.home().joinpath(".submissions", "config.yml") # finally look in the local config else: - # if getattr(sys, 'frozen', False): if check_if_app(): settings_path = Path(sys._MEIPASS).joinpath("files", "config.yml") else: @@ -281,7 +279,6 @@ def get_config(settings_path: Path|str|None=None) -> Settings: with open(settings_path, "r") as dset: default_settings = yaml.load(dset, Loader=yaml.Loader) # Tell program we need to copy the config.yml to the user directory - # copy_settings_trigger = True # copy settings to config directory return Settings(**copy_settings(settings_path=CONFIGDIR.joinpath("config.yml"), settings=default_settings)) else: @@ -390,6 +387,12 @@ def jinja_template_loading(): return env def check_authorization(func): + """ + Decorator to check if user is authorized to access function + + Args: + func (_type_): Function to be used. + """ def wrapper(*args, **kwargs): logger.debug(f"Checking authorization") if getpass.getuser() in kwargs['ctx'].power_users: @@ -399,7 +402,7 @@ def check_authorization(func): return dict(code=1, message="This user does not have permission for this function.", status="warning") return wrapper -def check_if_app(ctx:Settings=None) -> bool: +def check_if_app() -> bool: """ Checks if the program is running from pyinstaller compiled @@ -484,15 +487,6 @@ class Report(BaseModel): results: List[Result] = Field(default=[]) - # def __init__(self, *args, **kwargs): - # if 'msg' in kwargs.keys(): - # res = Result(msg=kwargs['msg']) - # for k,v in kwargs.items(): - # if k in ['code', 'status']: - # setattr(res, k, v) - # self.results.append(res) - - def __repr__(self): return f"Report(result_count:{len(self.results)})" @@ -523,3 +517,8 @@ def readInChunks(fileObj, chunkSize=2048): if not data: break yield data + +def get_first_blank_df_row(df:pd.DataFrame) -> int: + return len(df) + 1 + +ctx = get_config(None)