Removed logger.debugs for proven functions.

This commit is contained in:
lwark
2024-12-12 12:17:21 -06:00
parent b174eb1221
commit 67520cb784
32 changed files with 80 additions and 758 deletions

View File

@@ -18,12 +18,8 @@ def set_sqlite_pragma(dbapi_connection, connection_record):
connection_record (_type_): _description_
"""
cursor = dbapi_connection.cursor()
# print(ctx.database_schema)
if ctx.database_schema == "sqlite":
execution_phrase = "PRAGMA foreign_keys=ON"
# cursor.execute(execution_phrase)
# elif ctx.database_schema == "mssql+pyodbc":
# execution_phrase = "SET IDENTITY_INSERT dbo._wastewater ON;"
else:
print("Nothing to execute, returning")
cursor.close()
@@ -37,12 +33,9 @@ from .models import *
def update_log(mapper, connection, target):
# logger.debug("\n\nBefore update\n\n")
state = inspect(target)
# logger.debug(state)
object_name = state.object.truncated_name()
update = dict(user=getuser(), time=datetime.now(), object=object_name, changes=[])
# logger.debug(update)
for attr in state.attrs:
hist = attr.load_history()
if not hist.has_changes():
@@ -56,24 +49,19 @@ def update_log(mapper, connection, target):
continue
deleted = [str(item) for item in hist.deleted]
change = dict(field=attr.key, added=added, deleted=deleted)
# logger.debug(f"Adding: {pformat(change)}")
if added != deleted:
try:
update['changes'].append(change)
except Exception as e:
logger.error(f"Something went wrong adding attr: {attr.key}: {e}")
continue
# logger.debug(f"Adding to audit logs: {pformat(update)}")
if update['changes']:
# Note: must use execute as the session will be busy at this point.
# https://medium.com/@singh.surbhicse/creating-audit-table-to-log-insert-update-and-delete-changes-in-flask-sqlalchemy-f2ca53f7b02f
table = AuditLog.__table__
# logger.debug(f"Adding to {table}")
connection.execute(table.insert().values(**update))
# logger.debug("Here is where I would insert values, if I was able.")
else:
logger.info(f"No changes detected, not updating logs.")
# if ctx.logging_enabled:
event.listen(LogMixin, 'after_update', update_log, propagate=True)
event.listen(LogMixin, 'after_insert', update_log, propagate=True)

View File

@@ -3,7 +3,6 @@ Contains all models for sqlalchemy
"""
from __future__ import annotations
import sys, logging
from pandas import DataFrame
from sqlalchemy import Column, INTEGER, String, JSON
from sqlalchemy.orm import DeclarativeMeta, declarative_base, Query, Session
@@ -131,7 +130,6 @@ class BaseClass(Base):
search = name.title().replace(" ", "")
else:
search = name
logger.debug(f"Searching for subclass: {search}")
return next((item for item in cls.__subclasses__() if item.__name__ == search), cls)
@classmethod
@@ -146,9 +144,7 @@ class BaseClass(Base):
List[Any]: Results of sqlalchemy query.
"""
query: Query = cls.__database_session__.query(cls)
# logger.debug(f"Queried model. Now running searches in {kwargs}")
for k, v in kwargs.items():
# logger.debug(f"Running fuzzy search for attribute: {k} with value {v}")
# NOTE: Not sure why this is necessary, but it is.
search = f"%{v}%"
try:
@@ -200,9 +196,7 @@ class BaseClass(Base):
model = cls
if query is None:
query: Query = cls.__database_session__.query(model)
# logger.debug(f"Grabbing singles using {model.get_default_info}")
singles = model.get_default_info('singles')
# logger.info(f"Querying: {model}, with kwargs: {kwargs}")
for k, v in kwargs.items():
logger.info(f"Using key: {k} with value: {v}")
try:
@@ -227,7 +221,6 @@ class BaseClass(Base):
"""
Add the object to the database and commit
"""
# logger.debug(f"Saving object: {pformat(self.__dict__)}")
report = Report()
try:
self.__database_session__.add(self)

View File

@@ -2,7 +2,6 @@
Contains the audit log class and functions.
"""
from typing import List
from dateutil.parser import parse
from sqlalchemy.orm import declarative_base, DeclarativeMeta, Query
from . import BaseClass
@@ -48,32 +47,24 @@ class AuditLog(Base):
logger.warning(f"End date with no start date, using Jan 1, 2023")
start_date = session.query(cls, func.min(cls.time)).first()[1]
if start_date is not None:
# logger.debug(f"Querying with start date: {start_date} and end date: {end_date}")
match start_date:
case date():
# logger.debug(f"Lookup BasicSubmission by start_date({start_date})")
start_date = start_date.strftime("%Y-%m-%d")
case int():
# logger.debug(f"Lookup BasicSubmission by ordinal start_date {start_date}")
start_date = datetime.fromordinal(
datetime(1900, 1, 1).toordinal() + start_date - 2).date().strftime("%Y-%m-%d")
case _:
# logger.debug(f"Lookup BasicSubmission by parsed str start_date {start_date}")
start_date = parse(start_date).strftime("%Y-%m-%d")
match end_date:
case date() | datetime():
# logger.debug(f"Lookup BasicSubmission by end_date({end_date})")
end_date = end_date + timedelta(days=1)
end_date = end_date.strftime("%Y-%m-%d")
case int():
# logger.debug(f"Lookup BasicSubmission by ordinal end_date {end_date}")
end_date = datetime.fromordinal(datetime(1900, 1, 1).toordinal() + end_date - 2).date() + timedelta(days=1)
end_date = end_date.strftime("%Y-%m-%d")
case _:
# logger.debug(f"Lookup BasicSubmission by parsed str end_date {end_date}")
end_date = parse(end_date) + timedelta(days=1)
end_date = end_date.strftime("%Y-%m-%d")
# logger.debug(f"Compensating for same date by using time")
if start_date == end_date:
start_date = datetime.strptime(start_date, "%Y-%m-%d").strftime("%Y-%m-%d %H:%M:%S.%f")
query = query.filter(cls.time == start_date)

View File

@@ -171,11 +171,9 @@ class Control(BaseClass):
match submission_type:
case str():
from backend import BasicSubmission, SubmissionType
# logger.debug(f"Lookup controls by SubmissionType str: {submission_type}")
query = query.join(BasicSubmission).join(SubmissionType).filter(SubmissionType.name == submission_type)
case SubmissionType():
from backend import BasicSubmission
# logger.debug(f"Lookup controls by SubmissionType: {submission_type}")
query = query.join(BasicSubmission).filter(BasicSubmission.submission_type_name == submission_type.name)
case _:
pass
@@ -203,31 +201,23 @@ class Control(BaseClass):
if start_date is not None:
match start_date:
case date():
# logger.debug(f"Lookup control by start date({start_date})")
start_date = start_date.strftime("%Y-%m-%d")
case int():
# logger.debug(f"Lookup control by ordinal start date {start_date}")
start_date = datetime.fromordinal(
datetime(1900, 1, 1).toordinal() + start_date - 2).date().strftime("%Y-%m-%d")
case _:
# logger.debug(f"Lookup control with parsed start date {start_date}")
start_date = parse(start_date).strftime("%Y-%m-%d")
match end_date:
case date():
# logger.debug(f"Lookup control by end date({end_date})")
end_date = end_date.strftime("%Y-%m-%d")
case int():
# logger.debug(f"Lookup control by ordinal end date {end_date}")
end_date = datetime.fromordinal(datetime(1900, 1, 1).toordinal() + end_date - 2).date().strftime(
"%Y-%m-%d")
case _:
# logger.debug(f"Lookup control with parsed end date {end_date}")
end_date = parse(end_date).strftime("%Y-%m-%d")
# logger.debug(f"Looking up BasicSubmissions from start date: {start_date} and end date: {end_date}")
query = query.filter(cls.submitted_date.between(start_date, end_date))
match name:
case str():
# logger.debug(f"Lookup control by name {control_name}")
query = query.filter(cls.name.startswith(name))
limit = 1
case _:
@@ -273,7 +263,6 @@ class Control(BaseClass):
except StopIteration as e:
raise AttributeError(
f"Couldn't find existing class/subclass of {cls} with all attributes:\n{pformat(attrs.keys())}")
# logger.info(f"Recruiting model: {model}")
return model
@classmethod
@@ -343,7 +332,6 @@ class PCRControl(Control):
parent.mode_typer.clear()
parent.mode_typer.setEnabled(False)
report = Report()
# logger.debug(f"Chart settings: {pformat(chart_settings)}")
controls = cls.query(submission_type=chart_settings['sub_type'], start_date=chart_settings['start_date'],
end_date=chart_settings['end_date'])
data = [control.to_sub_dict() for control in controls]
@@ -411,21 +399,16 @@ class IridaControl(Control):
kraken = self.kraken
except TypeError:
kraken = {}
# logger.debug("calculating kraken count total to use in percentage")
kraken_cnt_total = sum([kraken[item]['kraken_count'] for item in kraken])
# logger.debug("Creating new kraken.")
new_kraken = [dict(name=item, kraken_count=kraken[item]['kraken_count'],
kraken_percent="{0:.0%}".format(kraken[item]['kraken_count'] / kraken_cnt_total),
target=item in self.controltype.targets)
for item in kraken]
# logger.debug(f"New kraken before sort: {new_kraken}")
new_kraken = sorted(new_kraken, key=itemgetter('kraken_count'), reverse=True)
# logger.debug("setting targets")
if self.controltype.targets:
targets = self.controltype.targets
else:
targets = ["None"]
# logger.debug("constructing output dictionary")
output = dict(
name=self.name,
type=self.controltype.name,
@@ -447,7 +430,6 @@ class IridaControl(Control):
Returns:
List[dict]: list of records
"""
# logger.debug("load json string for mode (i.e. contains, matches, kraken2)")
try:
data = self.__getattribute__(mode)
except TypeError:
@@ -460,12 +442,10 @@ class IridaControl(Control):
else:
if consolidate:
on_tar = {k: v for k, v in data.items() if k.strip("*") in self.controltype.targets[control_sub_type]}
# logger.debug(f"Consolidating off-targets to: {self.controltype.targets[control_sub_type]}")
off_tar = sum(v[f'{mode}_ratio'] for k, v in data.items() if
k.strip("*") not in self.controltype.targets[control_sub_type])
on_tar['Off-target'] = {f"{mode}_ratio": off_tar}
data = on_tar
# logger.debug("dict keys are genera of bacteria, e.g. 'Streptococcus'")
for genus in data:
_dict = dict(
name=self.name,
@@ -473,7 +453,6 @@ class IridaControl(Control):
genus=genus,
target='Target' if genus.strip("*") in self.controltype.targets[control_sub_type] else "Off-target"
)
# logger.debug("get Target or Off-target of genus")
for key in data[genus]:
_dict[key] = data[genus][key]
yield _dict
@@ -487,7 +466,6 @@ class IridaControl(Control):
List[str]: List of control mode names.
"""
try:
# logger.debug("Creating a list of JSON columns in _controls table")
cols = [item.name for item in list(cls.__table__.columns) if isinstance(item.type, JSON)]
except AttributeError as e:
logger.error(f"Failed to get available modes from db: {e}")
@@ -504,7 +482,6 @@ class IridaControl(Control):
"""
super().make_parent_buttons(parent=parent)
rows = parent.layout.rowCount() - 2
# logger.debug(f"Parent rows: {rows}")
checker = QCheckBox(parent)
checker.setChecked(True)
checker.setObjectName("irida_check")
@@ -539,10 +516,8 @@ class IridaControl(Control):
except AttributeError:
consolidate = False
report = Report()
# logger.debug(f"settings: {pformat(chart_settings)}")
controls = cls.query(subtype=chart_settings['sub_type'], start_date=chart_settings['start_date'],
end_date=chart_settings['end_date'])
# logger.debug(f"Controls found: {controls}")
if not controls:
report.add_result(Result(status="Critical", msg="No controls found in given date range."))
return report, None
@@ -552,19 +527,16 @@ class IridaControl(Control):
control in controls]
# NOTE: flatten data to one dimensional list
data = [item for sublist in data for item in sublist]
# logger.debug(f"Control objects going into df conversion: {pformat(data)}")
if not data:
report.add_result(Result(status="Critical", msg="No data found for controls in given date range."))
return report, None
df = cls.convert_data_list_to_df(input_df=data, sub_mode=chart_settings['sub_mode'])
# logger.debug(f"Chart df: \n {df}")
if chart_settings['sub_mode'] is None:
title = chart_settings['sub_mode']
else:
title = f"{chart_settings['mode']} - {chart_settings['sub_mode']}"
# NOTE: send dataframe to chart maker
df, modes = cls.prep_df(ctx=ctx, df=df)
# logger.debug(f"prepped df: \n {df}")
fig = IridaFigure(df=df, ytitle=title, modes=modes, parent=parent,
settings=chart_settings)
return report, fig
@@ -581,9 +553,7 @@ class IridaControl(Control):
Returns:
DataFrame: dataframe of controls
"""
# logger.debug(f"Subtype: {sub_mode}")
df = DataFrame.from_records(input_df)
# logger.debug(f"DF from records: {df}")
safe = ['name', 'submitted_date', 'genus', 'target']
for column in df.columns:
if column not in safe:
@@ -636,7 +606,6 @@ class IridaControl(Control):
Returns:
DataFrame: output dataframe with dates incremented.
"""
# logger.debug(f"Unique items: {df['name'].unique()}")
# NOTE: get submitted dates for each control
dict_list = [dict(name=item, date=df[df.name == item].iloc[0]['submitted_date']) for item in
sorted(df['name'].unique())]
@@ -664,7 +633,6 @@ class IridaControl(Control):
check = False
previous_dates.add(item['date'])
if check:
# logger.debug(f"We found one! Increment date!\n\t{item['date']} to {item['date'] + timedelta(days=1)}")
# NOTE: get df locations where name == item name
mask = df['name'] == item['name']
# NOTE: increment date in dataframe
@@ -673,15 +641,12 @@ class IridaControl(Control):
passed = False
else:
passed = True
# logger.debug(f"\n\tCurrent date: {item['date']}\n\tPrevious dates:{previous_dates}")
# logger.debug(f"DF: {type(df)}, previous_dates: {type(previous_dates)}")
# NOTE: if run didn't lead to changed date, return values
if passed:
# logger.debug(f"Date check passed, returning.")
return df, previous_dates
# NOTE: if date was changed, rerun with new date
else:
# logger.warning(f"Date check failed, running recursion")
logger.warning(f"Date check failed, running recursion")
df, previous_dates = cls.check_date(df, item, previous_dates)
return df, previous_dates
@@ -708,13 +673,10 @@ class IridaControl(Control):
# NOTE: sort by and exclude from
sorts = ['submitted_date', "target", "genus"]
exclude = ['name', 'genera']
# logger.debug(df.columns)
modes = [item for item in df.columns if item not in sorts and item not in exclude]
# logger.debug(f"Modes coming out: {modes}")
# NOTE: Set descending for any columns that have "{mode}" in the header.
ascending = [False if item == "target" else True for item in sorts]
df = df.sort_values(by=sorts, ascending=ascending)
# logger.debug(df[df.isna().any(axis=1)])
# NOTE: actual chart construction is done by
return df, modes

View File

@@ -17,7 +17,6 @@ from io import BytesIO
logger = logging.getLogger(f'submissions.{__name__}')
# logger.debug("Table for ReagentType/Reagent relations")
reagentroles_reagents = Table(
"_reagentroles_reagents",
Base.metadata,
@@ -26,7 +25,6 @@ reagentroles_reagents = Table(
extend_existing=True
)
# logger.debug("Table for EquipmentRole/Equipment relations")
equipmentroles_equipment = Table(
"_equipmentroles_equipment",
Base.metadata,
@@ -35,7 +33,6 @@ equipmentroles_equipment = Table(
extend_existing=True
)
# logger.debug("Table for Equipment/Process relations")
equipment_processes = Table(
"_equipment_processes",
Base.metadata,
@@ -44,7 +41,6 @@ equipment_processes = Table(
extend_existing=True
)
# logger.debug("Table for EquipmentRole/Process relations")
equipmentroles_processes = Table(
"_equipmentroles_processes",
Base.metadata,
@@ -53,7 +49,6 @@ equipmentroles_processes = Table(
extend_existing=True
)
# logger.debug("Table for SubmissionType/Process relations")
submissiontypes_processes = Table(
"_submissiontypes_processes",
Base.metadata,
@@ -62,7 +57,6 @@ submissiontypes_processes = Table(
extend_existing=True
)
# logger.debug("Table for KitType/Process relations")
kittypes_processes = Table(
"_kittypes_processes",
Base.metadata,
@@ -71,7 +65,6 @@ kittypes_processes = Table(
extend_existing=True
)
# logger.debug("Table for TipRole/Tips relations")
tiproles_tips = Table(
"_tiproles_tips",
Base.metadata,
@@ -80,7 +73,6 @@ tiproles_tips = Table(
extend_existing=True
)
# logger.debug("Table for Process/TipRole relations")
process_tiprole = Table(
"_process_tiprole",
Base.metadata,
@@ -89,7 +81,6 @@ process_tiprole = Table(
extend_existing=True
)
# logger.debug("Table for Equipment/Tips relations")
equipment_tips = Table(
"_equipment_tips",
Base.metadata,
@@ -116,7 +107,7 @@ class KitType(BaseClass):
cascade="all, delete-orphan",
)
# creator function: https://stackoverflow.com/questions/11091491/keyerror-when-adding-objects-to-sqlalchemy-association-object/11116291#11116291
# NOTE: creator function: https://stackoverflow.com/questions/11091491/keyerror-when-adding-objects-to-sqlalchemy-association-object/11116291#11116291
reagent_roles = association_proxy("kit_reagentrole_associations", "reagent_role",
creator=lambda RT: KitTypeReagentRoleAssociation(
reagent_role=RT)) #: Association proxy to KitTypeReagentRoleAssociation
@@ -152,18 +143,14 @@ class KitType(BaseClass):
"""
match submission_type:
case SubmissionType():
# logger.debug(f"Getting reagents by SubmissionType {submission_type}")
relevant_associations = [item for item in self.kit_reagentrole_associations if
item.submission_type == submission_type]
case str():
# logger.debug(f"Getting reagents by str {submission_type}")
relevant_associations = [item for item in self.kit_reagentrole_associations if
item.submission_type.name == submission_type]
case _:
# logger.debug(f"Getting reagents")
relevant_associations = [item for item in self.kit_reagentrole_associations]
if required:
# logger.debug(f"Filtering by required.")
return (item.reagent_role for item in relevant_associations if item.required == 1)
else:
return (item.reagent_role for item in relevant_associations)
@@ -181,18 +168,14 @@ class KitType(BaseClass):
# NOTE: Account for submission_type variable type.
match submission_type:
case str():
# logger.debug(f"Constructing xl map with str {submission_type}")
assocs = [item for item in self.kit_reagentrole_associations if
item.submission_type.name == submission_type]
case SubmissionType():
# logger.debug(f"Constructing xl map with SubmissionType {submission_type}")
assocs = [item for item in self.kit_reagentrole_associations if item.submission_type == submission_type]
case _:
raise ValueError(f"Wrong variable type: {type(submission_type)} used!")
# logger.debug("Get all KitTypeReagentTypeAssociation for SubmissionType")
for assoc in assocs:
try:
# logger.debug(f"Yielding: {assoc.reagent_role.name}, {assoc.uses}")
yield assoc.reagent_role.name, assoc.uses
except TypeError:
continue
@@ -220,27 +203,22 @@ class KitType(BaseClass):
query: Query = cls.__database_session__.query(cls)
match used_for:
case str():
# logger.debug(f"Looking up kit type by used_for str: {used_for}")
query = query.filter(cls.used_for.any(name=used_for))
case SubmissionType():
# logger.debug(f"Looking up kit type by used_for SubmissionType: {used_for}")
query = query.filter(cls.used_for.contains(used_for))
case _:
pass
match name:
case str():
# logger.debug(f"Looking up kit type by name str: {name}")
query = query.filter(cls.name == name)
limit = 1
case _:
pass
match id:
case int():
# logger.debug(f"Looking up kit type by id int: {id}")
query = query.filter(cls.id == id)
limit = 1
case str():
# logger.debug(f"Looking up kit type by id str: {id}")
query = query.filter(cls.id == int(id))
limit = 1
case _:
@@ -262,10 +240,7 @@ class KitType(BaseClass):
dict: Dictionary containing relevant info for SubmissionType construction
"""
base_dict = dict(name=self.name, reagent_roles=[], equipment_roles=[])
# base_dict['reagent roles'] = []
# base_dict['equipment roles'] = []
for k, v in self.construct_xl_map_for_use(submission_type=submission_type):
# logger.debug(f"Value: {v}")
try:
assoc = next(item for item in self.kit_reagentrole_associations if item.reagent_role.name == k)
except StopIteration as e:
@@ -280,10 +255,8 @@ class KitType(BaseClass):
except StopIteration:
continue
for kk, vv in assoc.to_export_dict(extraction_kit=self).items():
# logger.debug(f"{kk}:{vv}")
v[kk] = vv
base_dict['equipment_roles'].append(v)
# logger.debug(f"KT returning {base_dict}")
return base_dict
@@ -347,28 +320,19 @@ class ReagentRole(BaseClass):
else:
match kit_type:
case str():
# logger.debug(f"Lookup ReagentType by kittype str {kit_type}")
kit_type = KitType.query(name=kit_type)
case _:
pass
match reagent:
case str():
# logger.debug(f"Lookup ReagentType by reagent str {reagent}")
reagent = Reagent.query(lot=reagent)
case _:
pass
assert reagent.role
# logger.debug(f"Looking up reagent type for {type(kit_type)} {kit_type} and {type(reagent)} {reagent}")
# logger.debug(f"Kit reagent types: {kit_type.reagent_types}")
result = set(kit_type.reagent_roles).intersection(reagent.role)
# logger.debug(f"Result: {result}")
# try:
return next((item for item in result), None)
# except IndexError:
# return None
match name:
case str():
# logger.debug(f"Looking up reagent type by name str: {name}")
query = query.filter(cls.name == name)
limit = 1
case _:
@@ -457,7 +421,6 @@ class Reagent(BaseClass, LogMixin):
rtype = reagent_role.name.replace("_", " ")
except AttributeError:
rtype = "Unknown"
# logger.debug(f"Role for {self.name}: {rtype}")
# NOTE: Calculate expiry with EOL from ReagentType
try:
place_holder = self.expiry + reagent_role.eol_ext
@@ -493,14 +456,11 @@ class Reagent(BaseClass, LogMixin):
Report: Result of operation
"""
report = Report()
# logger.debug(f"Attempting update of last used reagent type at intersection of ({self}), ({kit})")
rt = ReagentRole.query(kit_type=kit, reagent=self, limit=1)
if rt is not None:
# logger.debug(f"got reagenttype {rt}")
assoc = KitTypeReagentRoleAssociation.query(kit_type=kit, reagent_role=rt)
if assoc is not None:
if assoc.last_used != self.lot:
# logger.debug(f"Updating {assoc} last used to {self.lot}")
assoc.last_used = self.lot
result = assoc.save()
report.add_result(result)
@@ -539,23 +499,19 @@ class Reagent(BaseClass, LogMixin):
pass
match role:
case str():
# logger.debug(f"Looking up reagents by reagent type str: {reagent_type}")
query = query.join(cls.role).filter(ReagentRole.name == role)
case ReagentRole():
# logger.debug(f"Looking up reagents by reagent type ReagentType: {reagent_type}")
query = query.filter(cls.role.contains(role))
case _:
pass
match name:
case str():
# logger.debug(f"Looking up reagent by name str: {name}")
# NOTE: Not limited due to multiple reagents having same name.
query = query.filter(cls.name == name)
case _:
pass
match lot:
case str():
# logger.debug(f"Looking up reagent by lot number str: {lot}")
query = query.filter(cls.lot == lot)
# NOTE: In this case limit number returned.
limit = 1
@@ -579,7 +535,6 @@ class Reagent(BaseClass, LogMixin):
case "expiry":
if isinstance(value, str):
field_value = datetime.strptime(value, "%Y-%m-%d")
# field_value.replace(tzinfo=timezone)
elif isinstance(value, date):
field_value = datetime.combine(value, datetime.min.time())
else:
@@ -589,7 +544,6 @@ class Reagent(BaseClass, LogMixin):
continue
case _:
field_value = value
# logger.debug(f"Setting reagent {key} to {field_value}")
self.__setattr__(key, field_value)
self.save()
@@ -634,25 +588,19 @@ class Discount(BaseClass):
query: Query = cls.__database_session__.query(cls)
match organization:
case Organization():
# logger.debug(f"Looking up discount with organization Organization: {organization}")
query = query.filter(cls.client == Organization)
case str():
# logger.debug(f"Looking up discount with organization str: {organization}")
query = query.join(Organization).filter(Organization.name == organization)
case int():
# logger.debug(f"Looking up discount with organization id: {organization}")
query = query.join(Organization).filter(Organization.id == organization)
case _:
pass
match kit_type:
case KitType():
# logger.debug(f"Looking up discount with kit type KitType: {kit_type}")
query = query.filter(cls.kit == kit_type)
case str():
# logger.debug(f"Looking up discount with kit type str: {kit_type}")
query = query.join(KitType).filter(KitType.name == kit_type)
case int():
# logger.debug(f"Looking up discount with kit type id: {kit_type}")
query = query.join(KitType).filter(KitType.id == kit_type)
case _:
pass
@@ -723,7 +671,6 @@ class SubmissionType(BaseClass):
return submission_type.template_file
def get_template_file_sheets(self) -> List[str]:
logger.debug(f"Submission type to get sheets for: {self.name}")
"""
Gets names of sheet in the stored blank form.
@@ -768,7 +715,6 @@ class SubmissionType(BaseClass):
dict: Map of locations
"""
info = {k: v for k, v in self.info_map.items() if k != "custom"}
logger.debug(f"Info map: {info}")
match mode:
case "read":
output = {k: v[mode] for k, v in info.items() if v[mode]}
@@ -844,11 +790,9 @@ class SubmissionType(BaseClass):
"""
match equipment_role:
case str():
# logger.debug(f"Getting processes for equipmentrole str {equipment_role}")
relevant = [item.get_all_processes(kit) for item in self.submissiontype_equipmentrole_associations if
item.equipment_role.name == equipment_role]
case EquipmentRole():
# logger.debug(f"Getting processes for equipmentrole EquipmentRole {equipment_role}")
relevant = [item.get_all_processes(kit) for item in self.submissiontype_equipmentrole_associations if
item.equipment_role == equipment_role]
case _:
@@ -886,14 +830,12 @@ class SubmissionType(BaseClass):
query: Query = cls.__database_session__.query(cls)
match name:
case str():
# logger.debug(f"Looking up submission type by name str: {name}")
query = query.filter(cls.name == name)
limit = 1
case _:
pass
match key:
case str():
# logger.debug(f"Looking up submission type by info-map key str: {key}")
query = query.filter(cls.info_map.op('->')(key) is not None)
case _:
pass
@@ -946,7 +888,6 @@ class SubmissionType(BaseClass):
import_dict = yaml.load(stream=f, Loader=yaml.Loader)
else:
raise Exception(f"Filetype {filepath.suffix} not supported.")
# logger.debug(pformat(import_dict))
try:
submission_type = cls.query(name=import_dict['name'])
except KeyError:
@@ -1076,23 +1017,17 @@ class SubmissionTypeKitTypeAssociation(BaseClass):
query: Query = cls.__database_session__.query(cls)
match submission_type:
case SubmissionType():
# logger.debug(f"Looking up {cls.__name__} by SubmissionType {submission_type}")
query = query.filter(cls.submission_type == submission_type)
case str():
# logger.debug(f"Looking up {cls.__name__} by name {submission_type}")
query = query.join(SubmissionType).filter(SubmissionType.name == submission_type)
case int():
# logger.debug(f"Looking up {cls.__name__} by id {submission_type}")
query = query.join(SubmissionType).filter(SubmissionType.id == submission_type)
match kit_type:
case KitType():
# logger.debug(f"Looking up {cls.__name__} by KitType {kit_type}")
query = query.filter(cls.kit_type == kit_type)
case str():
# logger.debug(f"Looking up {cls.__name__} by name {kit_type}")
query = query.join(KitType).filter(KitType.name == kit_type)
case int():
# logger.debug(f"Looking up {cls.__name__} by id {kit_type}")
query = query.join(KitType).filter(KitType.id == kit_type)
limit = query.count()
return cls.execute_query(query=query, limit=limit)
@@ -1107,7 +1042,6 @@ class SubmissionTypeKitTypeAssociation(BaseClass):
exclude = ['_sa_instance_state', 'submission_types_id', 'kits_id', 'submission_type', 'kit_type']
base_dict = {k: v for k, v in self.__dict__.items() if k not in exclude}
base_dict['kit_type'] = self.kit_type.to_export_dict(submission_type=self.submission_type)
# logger.debug(f"STKTA returning: {base_dict}")
return base_dict
@@ -1128,10 +1062,11 @@ class KitTypeReagentRoleAssociation(BaseClass):
kit_type = relationship(KitType,
back_populates="kit_reagentrole_associations") #: relationship to associated KitType
# reference to the "ReagentType" object
# NOTE: reference to the "ReagentType" object
reagent_role = relationship(ReagentRole,
back_populates="reagentrole_kit_associations") #: relationship to associated ReagentType
# NOTE: reference to the "SubmissionType" object
submission_type = relationship(SubmissionType,
back_populates="submissiontype_kit_rt_associations") #: relationship to associated SubmissionType
@@ -1203,19 +1138,15 @@ class KitTypeReagentRoleAssociation(BaseClass):
query: Query = cls.__database_session__.query(cls)
match kit_type:
case KitType():
# logger.debug(f"Lookup KitTypeReagentTypeAssociation by kit_type KitType {kit_type}")
query = query.filter(cls.kit_type == kit_type)
case str():
# logger.debug(f"Lookup KitTypeReagentTypeAssociation by kit_type str {kit_type}")
query = query.join(KitType).filter(KitType.name == kit_type)
case _:
pass
match reagent_role:
case ReagentRole():
# logger.debug(f"Lookup KitTypeReagentTypeAssociation by reagent_type ReagentType {reagent_type}")
query = query.filter(cls.reagent_role == reagent_role)
case str():
# logger.debug(f"Lookup KitTypeReagentTypeAssociation by reagent_type ReagentType {reagent_type}")
query = query.join(ReagentRole).filter(ReagentRole.name == reagent_role)
case _:
pass
@@ -1242,7 +1173,6 @@ class KitTypeReagentRoleAssociation(BaseClass):
Returns:
Generator: Generates of reagents.
"""
# logger.debug(f"Attempting lookup of reagents by type: {reagent.type}")
reagents = self.reagent_role.instances
try:
regex = self.uses['exclude_regex']
@@ -1309,7 +1239,6 @@ class SubmissionReagentAssociation(BaseClass):
query: Query = cls.__database_session__.query(cls)
match reagent:
case Reagent() | str():
# logger.debug(f"Lookup SubmissionReagentAssociation by reagent Reagent {reagent}")
if isinstance(reagent, str):
reagent = Reagent.query(lot=reagent)
query = query.filter(cls.reagent == reagent)
@@ -1319,10 +1248,8 @@ class SubmissionReagentAssociation(BaseClass):
case BasicSubmission() | str():
if isinstance(submission, str):
submission = BasicSubmission.query(rsl_plate_num=submission)
# logger.debug(f"Lookup SubmissionReagentAssociation by submission BasicSubmission {submission}")
query = query.filter(cls.submission == submission)
case int():
# logger.debug(f"Lookup SubmissionReagentAssociation by submission id {submission}")
submission = BasicSubmission.query(id=submission)
query = query.join(BasicSubmission).filter(BasicSubmission.id == submission)
case _:
@@ -1439,21 +1366,18 @@ class Equipment(BaseClass, LogMixin):
query = cls.__database_session__.query(cls)
match name:
case str():
# logger.debug(f"Lookup Equipment by name str {name}")
query = query.filter(cls.name == name)
limit = 1
case _:
pass
match nickname:
case str():
# logger.debug(f"Lookup Equipment by nickname str {nickname}")
query = query.filter(cls.nickname == nickname)
limit = 1
case _:
pass
match asset_number:
case str():
# logger.debug(f"Lookup Equipment by asset_number str {asset_number}")
query = query.filter(cls.asset_number == asset_number)
limit = 1
case _:
@@ -1569,11 +1493,9 @@ class EquipmentRole(BaseClass):
PydEquipmentRole: This EquipmentRole as PydEquipmentRole
"""
from backend.validators.pydant import PydEquipmentRole
# logger.debug("Creating list of PydEquipment in this role")
equipment = [item.to_pydantic(submission_type=submission_type, extraction_kit=extraction_kit) for item in
self.instances]
pyd_dict = self.to_dict()
# logger.debug("Creating list of Processes in this role")
pyd_dict['processes'] = self.get_processes(submission_type=submission_type, extraction_kit=extraction_kit)
return PydEquipmentRole(equipment=equipment, **pyd_dict)
@@ -1595,14 +1517,12 @@ class EquipmentRole(BaseClass):
query = cls.__database_session__.query(cls)
match id:
case int():
# logger.debug(f"Lookup EquipmentRole by id {id}")
query = query.filter(cls.id == id)
limit = 1
case _:
pass
match name:
case str():
# logger.debug(f"Lookup EquipmentRole by name str {name}")
query = query.filter(cls.name == name)
limit = 1
case _:
@@ -1622,7 +1542,6 @@ class EquipmentRole(BaseClass):
List[Process]: List of processes
"""
if isinstance(submission_type, str):
# logger.debug(f"Checking if str {submission_type} exists")
submission_type = SubmissionType.query(name=submission_type)
if isinstance(extraction_kit, str):
extraction_kit = KitType.query(name=extraction_kit)
@@ -1808,7 +1727,6 @@ class Process(BaseClass):
query = cls.__database_session__.query(cls)
match name:
case str():
# logger.debug(f"Lookup Process with name str {name}")
query = query.filter(cls.name == name)
limit = 1
case _:
@@ -1892,13 +1810,11 @@ class Tips(BaseClass, LogMixin):
query = cls.__database_session__.query(cls)
match name:
case str():
# logger.debug(f"Lookup Equipment by name str {name}")
query = query.filter(cls.name == name)
case _:
pass
match lot:
case str():
# logger.debug(f"Lookup Equipment by nickname str {nickname}")
query = query.filter(cls.lot == lot)
limit = 1
case _:

View File

@@ -65,7 +65,6 @@ class Organization(BaseClass):
pass
match name:
case str():
# logger.debug(f"Looking up organization with name starting with: {name}")
query = query.filter(cls.name.startswith(name))
limit = 1
case _:
@@ -159,21 +158,18 @@ class Contact(BaseClass):
query: Query = cls.__database_session__.query(cls)
match name:
case str():
# logger.debug(f"Looking up contact with name: {name}")
query = query.filter(cls.name == name.title())
limit = 1
case _:
pass
match email:
case str():
# logger.debug(f"Looking up contact with email: {name}")
query = query.filter(cls.email == email)
limit = 1
case _:
pass
match phone:
case str():
# logger.debug(f"Looking up contact with phone: {name}")
query = query.filter(cls.phone == phone)
limit = 1
case _:

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
'''
"""
contains parser objects for pulling values from client generated submission sheets.
'''
"""
import logging
from copy import copy
from getpass import getuser
@@ -53,7 +53,6 @@ class SheetParser(object):
self.parse_samples()
self.parse_equipment()
self.parse_tips()
# logger.debug(f"Parser.sub after info scrape: {pformat(self.sub)}")
def parse_info(self):
"""
@@ -71,7 +70,6 @@ class SheetParser(object):
logger.info(
f"Checking for updated submission type: {self.submission_type.name} against new: {info['submission_type']['value']}")
if self.submission_type.name != info['submission_type']['value']:
# logger.debug(f"info submission type: {info}")
if check:
self.submission_type = SubmissionType.query(name=info['submission_type']['value'])
logger.info(f"Updated self.submission_type to {self.submission_type}. Rerunning parse.")
@@ -90,11 +88,9 @@ class SheetParser(object):
"""
if extraction_kit is None:
extraction_kit = self.sub['extraction_kit']
# logger.debug(f"Parsing reagents for {extraction_kit}")
parser = ReagentParser(xl=self.xl, submission_type=self.submission_type,
extraction_kit=extraction_kit)
self.sub['reagents'] = parser.parse_reagents()
# logger.debug(f"Reagents out of parser: {pformat(self.sub['reagents'])}")
def parse_samples(self):
"""
@@ -155,7 +151,6 @@ class InfoParser(object):
submission_type (str | SubmissionType): Type of submission expected (Wastewater, Bacterial Culture, etc.)
sub_object (BasicSubmission | None, optional): Submission object holding methods. Defaults to None.
"""
logger.info(f"\n\nHello from InfoParser!\n\n")
if isinstance(submission_type, str):
submission_type = SubmissionType.query(name=submission_type)
if sub_object is None:
@@ -164,7 +159,6 @@ class InfoParser(object):
self.sub_object = sub_object
self.map = self.fetch_submission_info_map()
self.xl = xl
# logger.debug(f"Info map for InfoParser: {pformat(self.map)}")
def fetch_submission_info_map(self) -> dict:
"""
@@ -174,7 +168,6 @@ class InfoParser(object):
dict: Location map of all info for this submission type
"""
self.submission_type = dict(value=self.submission_type_obj.name, missing=True)
# logger.debug(f"Looking up submission type: {self.submission_type['value']}")
info_map = self.sub_object.construct_info_map(submission_type=self.submission_type_obj, mode="read")
# NOTE: Get the parse_info method from the submission type specified
return info_map
@@ -188,7 +181,6 @@ class InfoParser(object):
"""
dicto = {}
# NOTE: This loop parses generic info
# logger.debug(f"Map: {self.map}")
for sheet in self.xl.sheetnames:
ws = self.xl[sheet]
relevant = []
@@ -197,11 +189,8 @@ class InfoParser(object):
if k == "custom":
continue
if isinstance(v, str):
logger.debug(f"Found string for {k}, setting value to {v}")
dicto[k] = dict(value=v, missing=False)
continue
# logger.debug(f"Looking for {k} in self.map")
# logger.debug(f"Locations: {v}")
for location in v:
try:
check = location['sheet'] == sheet
@@ -213,21 +202,18 @@ class InfoParser(object):
new = location
new['name'] = k
relevant.append(new)
# logger.debug(f"relevant map for {sheet}: {pformat(relevant)}")
# NOTE: make sure relevant is not an empty list.
if not relevant:
continue
for item in relevant:
# NOTE: Get cell contents at this location
value = ws.cell(row=item['row'], column=item['column']).value
# logger.debug(f"Value for {item['name']} = {value}")
match item['name']:
case "submission_type":
value, missing = is_missing(value)
value = value.title()
case "submitted_date":
value, missing = is_missing(value)
logger.debug(f"Parsed submitted date: {value}")
# NOTE: is field a JSON? Includes: Extraction info, PCR info, comment, custom
case thing if thing in self.sub_object.jsons():
value, missing = is_missing(value)
@@ -240,7 +226,6 @@ class InfoParser(object):
logger.error(f"New value for {item['name']}")
case _:
value, missing = is_missing(value)
# logger.debug(f"Setting {item} on {sheet} to {value}")
if item['name'] not in dicto.keys():
try:
dicto[item['name']] = dict(value=value, missing=missing)
@@ -264,7 +249,6 @@ class ReagentParser(object):
extraction_kit (str): Extraction kit used.
sub_object (BasicSubmission | None, optional): Submission object holding methods. Defaults to None.
"""
logger.info("\n\nHello from ReagentParser!\n\n")
if isinstance(submission_type, str):
submission_type = SubmissionType.query(name=submission_type)
self.submission_type_obj = submission_type
@@ -272,9 +256,7 @@ class ReagentParser(object):
if isinstance(extraction_kit, dict):
extraction_kit = extraction_kit['value']
self.kit_object = KitType.query(name=extraction_kit)
logger.debug(f"Got extraction kit object: {self.kit_object}")
self.map = self.fetch_kit_info_map(submission_type=submission_type)
logger.debug(f"Reagent Parser map: {self.map}")
self.xl = xl
@report_result
@@ -298,14 +280,11 @@ class ReagentParser(object):
del reagent_map['info']
except KeyError:
pass
# logger.debug(f"Reagent map: {pformat(reagent_map)}")
# NOTE: If reagent map is empty, maybe the wrong kit was given, check if there's only one kit for that submission type and use it if so.
if not reagent_map:
temp_kit_object = self.submission_type_obj.get_default_kit()
# logger.debug(f"Temp kit: {temp_kit_object}")
if temp_kit_object:
self.kit_object = temp_kit_object
# reagent_map = {k: v for k, v in self.kit_object.construct_xl_map_for_use(submission_type)}
logger.warning(f"Attempting to salvage with default kit {self.kit_object} and submission_type: {self.submission_type_obj}")
return self.fetch_kit_info_map(submission_type=self.submission_type_obj)
else:
@@ -331,18 +310,15 @@ class ReagentParser(object):
for sheet in self.xl.sheetnames:
ws = self.xl[sheet]
relevant = {k.strip(): v for k, v in self.map.items() if sheet in self.map[k]['sheet']}
# logger.debug(f"relevant map for {sheet}: {pformat(relevant)}")
if relevant == {}:
continue
for item in relevant:
# logger.debug(f"Attempting to scrape: {item}")
try:
reagent = relevant[item]
name = ws.cell(row=reagent['name']['row'], column=reagent['name']['column']).value
lot = ws.cell(row=reagent['lot']['row'], column=reagent['lot']['column']).value
expiry = ws.cell(row=reagent['expiry']['row'], column=reagent['expiry']['column']).value
if 'comment' in relevant[item].keys():
# logger.debug(f"looking for {relevant[item]} comment.")
comment = ws.cell(row=reagent['comment']['row'], column=reagent['comment']['column']).value
else:
comment = ""
@@ -353,10 +329,7 @@ class ReagentParser(object):
missing = False
else:
missing = True
# logger.debug(f"Got lot for {item}-{name}: {lot} as {type(lot)}")
lot = str(lot)
# logger.debug(
# f"Going into pydantic: name: {name}, lot: {lot}, expiry: {expiry}, type: {item.strip()}, comment: {comment}")
try:
check = name.lower() != "not applicable"
except AttributeError:
@@ -381,12 +354,10 @@ class SampleParser(object):
sample_map (dict | None, optional): Locations in database where samples are found. Defaults to None.
sub_object (BasicSubmission | None, optional): Submission object holding methods. Defaults to None.
"""
logger.info("\n\nHello from SampleParser!\n\n")
self.samples = []
self.xl = xl
if isinstance(submission_type, str):
submission_type = SubmissionType.query(name=submission_type)
# logger.debug(f"Sample parser is using submission type: {submission_type}")
self.submission_type = submission_type.name
self.submission_type_obj = submission_type
if sub_object is None:
@@ -395,7 +366,6 @@ class SampleParser(object):
sub_object = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type)
self.sub_object = sub_object
self.sample_info_map = self.fetch_sample_info_map(submission_type=submission_type, sample_map=sample_map)
# logger.debug(f"sample_info_map: {self.sample_info_map}")
self.plate_map_samples = self.parse_plate_map()
self.lookup_samples = self.parse_lookup_table()
@@ -409,11 +379,8 @@ class SampleParser(object):
Returns:
dict: Info locations.
"""
# logger.debug(f"Looking up submission type: {submission_type}")
self.sample_type = self.sub_object.get_default_info("sample_type", submission_type=submission_type)
self.samp_object = BasicSample.find_polymorphic_subclass(polymorphic_identity=self.sample_type)
# logger.debug(f"Got sample class: {self.samp_object.__name__}")
# logger.debug(f"info_map: {pformat(se)}")
if sample_map is None:
sample_info_map = self.sub_object.construct_sample_map(submission_type=self.submission_type_obj)
else:
@@ -432,9 +399,7 @@ class SampleParser(object):
ws = self.xl[smap['sheet']]
plate_map_samples = []
for ii, row in enumerate(range(smap['start_row'], smap['end_row'] + 1), start=1):
# logger.debug(f"Parsing row: {row}")
for jj, column in enumerate(range(smap['start_column'], smap['end_column'] + 1), start=1):
# logger.debug(f"Parsing column: {column}")
id = str(ws.cell(row=row, column=column).value)
if check_not_nan(id):
if id not in invalids:
@@ -442,10 +407,8 @@ class SampleParser(object):
sample_dict['sample_type'] = self.sample_type
plate_map_samples.append(sample_dict)
else:
# logger.error(f"Sample cell ({row}, {column}) has invalid value: {id}.")
pass
else:
# logger.error(f"Sample cell ({row}, {column}) has no info: {id}.")
pass
return plate_map_samples
@@ -507,7 +470,6 @@ class SampleParser(object):
except (KeyError, IndexError):
check = False
if check:
# logger.debug(f"Direct match found for {psample['id']}")
new = lookup_samples[ii] | psample
lookup_samples[ii] = {}
else:
@@ -516,7 +478,6 @@ class SampleParser(object):
if merge_on_id in sample.keys()]
jj, new = next(((jj, lsample | psample) for jj, lsample in searchables
if lsample[merge_on_id] == psample['id']), (-1, psample))
# logger.debug(f"Assigning from index {jj} - {new}")
if jj >= 0:
lookup_samples[jj] = {}
if not check_key_or_attr(key='submitter_id', interest=new, check_none=True):
@@ -540,7 +501,6 @@ class EquipmentParser(object):
xl (Workbook): Openpyxl workbook from submitted excel file.
submission_type (str | SubmissionType): Type of submission expected (Wastewater, Bacterial Culture, etc.)
"""
logger.info("\n\nHello from EquipmentParser!\n\n")
if isinstance(submission_type, str):
submission_type = SubmissionType.query(name=submission_type)
self.submission_type = submission_type
@@ -567,7 +527,6 @@ class EquipmentParser(object):
str: asset number
"""
regex = Equipment.get_regex()
# logger.debug(f"Using equipment regex: {regex} on {input}")
try:
return regex.search(input).group().strip("-")
except AttributeError as e:
@@ -581,8 +540,6 @@ class EquipmentParser(object):
Returns:
List[dict]: list of equipment
"""
# logger.debug(f"Equipment parser going into parsing: {pformat(self.__dict__)}")
# logger.debug(f"Sheets: {sheets}")
for sheet in self.xl.sheetnames:
ws = self.xl[sheet]
try:
@@ -590,17 +547,14 @@ class EquipmentParser(object):
except (TypeError, KeyError) as e:
logger.error(f"Error creating relevant equipment list: {e}")
continue
# logger.debug(f"Relevant equipment: {pformat(relevant)}")
previous_asset = ""
for k, v in relevant.items():
# logger.debug(f"Checking: {v}")
asset = ws.cell(v['name']['row'], v['name']['column']).value
if not check_not_nan(asset):
asset = previous_asset
else:
previous_asset = asset
asset = self.get_asset_number(input=asset)
# logger.debug(f"asset: {asset}")
eq = Equipment.query(asset_number=asset)
if eq is None:
eq = Equipment.query(name=asset)
@@ -623,7 +577,6 @@ class TipParser(object):
xl (Workbook): Openpyxl workbook from submitted excel file.
submission_type (str | SubmissionType): Type of submission expected (Wastewater, Bacterial Culture, etc.)
"""
logger.info("\n\nHello from TipParser!\n\n")
if isinstance(submission_type, str):
submission_type = SubmissionType.query(name=submission_type)
self.submission_type = submission_type
@@ -646,8 +599,6 @@ class TipParser(object):
Returns:
List[dict]: list of equipment
"""
# logger.debug(f"Equipment parser going into parsing: {pformat(self.__dict__)}")
# logger.debug(f"Sheets: {sheets}")
for sheet in self.xl.sheetnames:
ws = self.xl[sheet]
try:
@@ -655,7 +606,6 @@ class TipParser(object):
except (TypeError, KeyError) as e:
logger.error(f"Error creating relevant equipment list: {e}")
continue
# logger.debug(f"Relevant equipment: {pformat(relevant)}")
previous_asset = ""
for k, v in relevant.items():
asset = ws.cell(v['name']['row'], v['name']['column']).value
@@ -667,7 +617,6 @@ class TipParser(object):
asset = previous_asset
else:
previous_asset = asset
# logger.debug(f"asset: {asset}")
eq = Tips.query(lot=lot, name=asset, limit=1)
try:
yield dict(name=eq.name, role=k, lot=lot)
@@ -684,7 +633,6 @@ class PCRParser(object):
filepath (Path | None, optional): file to parse. Defaults to None.
submission (BasicSubmission | None, optional): Submission parsed data to be added to.
"""
# logger.debug(f'Parsing {filepath.__str__()}')
if filepath is None:
logger.error('No filepath given.')
self.xl = None
@@ -727,5 +675,4 @@ class PCRParser(object):
value = row[1].value or ""
pcr[key] = value
pcr['imported_by'] = getuser()
# logger.debug(f"PCR: {pformat(pcr)}")
return pcr

View File

@@ -32,7 +32,6 @@ class ReportArchetype(object):
filename = filename.absolute()
self.writer = ExcelWriter(filename.with_suffix(".xlsx"), engine='openpyxl')
self.df.to_excel(self.writer, sheet_name=self.sheet_name)
# logger.debug(f"Writing report to: {filename}")
self.writer.close()
@@ -43,7 +42,6 @@ class ReportMaker(object):
self.end_date = end_date
# NOTE: Set page size to zero to override limiting query size.
self.subs = BasicSubmission.query(start_date=start_date, end_date=end_date, page_size=0)
# logger.debug(f"Number of subs returned: {len(self.subs)}")
if organizations is not None:
self.subs = [sub for sub in self.subs if sub.submitting_lab.name in organizations]
self.detailed_df, self.summary_df = self.make_report_xlsx()
@@ -65,10 +63,8 @@ class ReportMaker(object):
df2 = df.groupby(["submitting_lab", "extraction_kit"]).agg(
{'extraction_kit': 'count', 'cost': 'sum', 'sample_count': 'sum'})
df2 = df2.rename(columns={"extraction_kit": 'run_count'})
# logger.debug(f"Output daftaframe for xlsx: {df2.columns}")
df = df.drop('id', axis=1)
df = df.sort_values(['submitting_lab', "submitted_date"])
# logger.debug(f"Details dataframe:\n{df2}")
return df, df2
def make_report_html(self, df: DataFrame) -> str:
@@ -86,12 +82,8 @@ class ReportMaker(object):
"""
old_lab = ""
output = []
# logger.debug(f"Report DataFrame: {df}")
for row in df.iterrows():
# logger.debug(f"Row {ii}: {row}")
lab = row[0][0]
# logger.debug(f"Old lab: {old_lab}, Current lab: {lab}")
# logger.debug(f"Name: {row[0][1]}")
data = [item for item in row[1]]
kit = dict(name=row[0][1], cost=data[1], run_count=int(data[0]), sample_count=int(data[2]))
# NOTE: if this is the same lab as before add together
@@ -106,7 +98,6 @@ class ReportMaker(object):
total_runs=kit['run_count'])
output.append(adder)
old_lab = lab
# logger.debug(output)
dicto = {'start_date': self.start_date, 'end_date': self.end_date, 'labs': output}
temp = env.get_template('summary_report.html')
html = temp.render(input=dicto)
@@ -127,14 +118,12 @@ class ReportMaker(object):
self.summary_df.to_excel(self.writer, sheet_name="Report")
self.detailed_df.to_excel(self.writer, sheet_name="Details", index=False)
self.fix_up_xl()
# logger.debug(f"Writing report to: {filename}")
self.writer.close()
def fix_up_xl(self):
"""
Handles formatting of xl file, mediocrely.
"""
# logger.debug(f"Updating worksheet")
worksheet: Worksheet = self.writer.sheets['Report']
for idx, col in enumerate(self.summary_df, start=1): # NOTE: loop through all columns
series = self.summary_df[col]
@@ -149,7 +138,6 @@ class ReportMaker(object):
except ValueError as e:
logger.error(f"Couldn't resize column {col} due to {e}")
blank_row = get_first_blank_df_row(self.summary_df) + 1
# logger.debug(f"Blank row index = {blank_row}")
for col in range(3, 6):
col_letter = row_map[col]
worksheet.cell(row=blank_row, column=col, value=f"=SUM({col_letter}2:{col_letter}{str(blank_row - 1)})")

View File

@@ -3,7 +3,6 @@ contains writer objects for pushing values to submission sheet templates.
"""
import logging
from copy import copy
from datetime import date
from operator import itemgetter
from pprint import pformat
from typing import List, Generator, Tuple
@@ -111,7 +110,6 @@ class InfoWriter(object):
info_dict (dict): Dictionary of information to write.
sub_object (BasicSubmission | None, optional): Submission object containing methods. Defaults to None.
"""
logger.debug(f"Info_dict coming into InfoWriter: {pformat(info_dict)}")
if isinstance(submission_type, str):
submission_type = SubmissionType.query(name=submission_type)
if sub_object is None:
@@ -121,7 +119,6 @@ class InfoWriter(object):
self.xl = xl
self.info_map = submission_type.construct_info_map(mode='write')
self.info = self.reconcile_map(info_dict, self.info_map)
# logger.debug(pformat(self.info))
def reconcile_map(self, info_dict: dict, info_map: dict) -> Generator[(Tuple[str, dict]), None, None]:
"""
@@ -170,7 +167,6 @@ class InfoWriter(object):
logger.error(f"No locations for {k}, skipping")
continue
for loc in locations:
logger.debug(f"Writing {k} to {loc['sheet']}, row: {loc['row']}, column: {loc['column']}")
sheet = self.xl[loc['sheet']]
try:
sheet.cell(row=loc['row'], column=loc['column'], value=v['value'])
@@ -247,8 +243,6 @@ class ReagentWriter(object):
for v in reagent.values():
if not isinstance(v, dict):
continue
# logger.debug(
# f"Writing {reagent['type']} {k} to {reagent['sheet']}, row: {v['row']}, column: {v['column']}")
sheet.cell(row=v['row'], column=v['column'], value=v['value'])
return self.xl
@@ -288,7 +282,6 @@ class SampleWriter(object):
multiples = ['row', 'column', 'assoc_id', 'submission_rank']
for sample in sample_list:
sample = self.submission_type.get_submission_class().custom_sample_writer(sample)
logger.debug(f"Writing sample: {sample}")
for assoc in zip(sample['row'], sample['column'], sample['submission_rank']):
new = dict(row=assoc[0], column=assoc[1], submission_rank=assoc[2])
for k, v in sample.items():
@@ -369,9 +362,8 @@ class EquipmentWriter(object):
mp_info = equipment_map[equipment['role']]
except KeyError:
logger.error(f"No {equipment['role']} in {pformat(equipment_map)}")
# logger.debug(f"{equipment['role']} map: {mp_info}")
mp_info = None
placeholder = copy(equipment)
# if mp_info == {}:
if not mp_info:
for jj, (k, v) in enumerate(equipment.items(), start=1):
dicto = dict(value=v, row=ii, column=jj)
@@ -381,7 +373,6 @@ class EquipmentWriter(object):
try:
dicto = dict(value=v, row=mp_info[k]['row'], column=mp_info[k]['column'])
except KeyError as e:
# logger.error(f"Keyerror: {e}")
continue
placeholder[k] = dicto
if "asset_number" not in mp_info.keys():
@@ -400,17 +391,12 @@ class EquipmentWriter(object):
Workbook: Workbook with equipment written
"""
for equipment in self.equipment:
try:
sheet = self.xl[equipment['sheet']]
except KeyError:
if not equipment['sheet'] in self.xl.sheetnames:
self.xl.create_sheet("Equipment")
finally:
sheet = self.xl[equipment['sheet']]
sheet = self.xl[equipment['sheet']]
for k, v in equipment.items():
if not isinstance(v, dict):
continue
# logger.debug(
# f"Writing {k}: {v['value']} to {equipment['sheet']}, row: {v['row']}, column: {v['column']}")
if isinstance(v['value'], list):
v['value'] = v['value'][0]
try:
@@ -455,7 +441,6 @@ class TipWriter(object):
return
for ii, tips in enumerate(tips_list, start=1):
mp_info = tips_map[tips.role]
# logger.debug(f"{tips['role']} map: {mp_info}")
placeholder = {}
if mp_info == {}:
for jj, (k, v) in enumerate(tips.__dict__.items(), start=1):
@@ -466,14 +451,12 @@ class TipWriter(object):
try:
dicto = dict(value=v, row=mp_info[k]['row'], column=mp_info[k]['column'])
except KeyError as e:
# logger.error(f"Keyerror: {e}")
continue
placeholder[k] = dicto
try:
placeholder['sheet'] = mp_info['sheet']
except KeyError:
placeholder['sheet'] = "Tips"
# logger.debug(f"Final output of {tips['role']} : {placeholder}")
yield placeholder
def write_tips(self) -> Workbook:
@@ -484,17 +467,12 @@ class TipWriter(object):
Workbook: Workbook with tips written
"""
for tips in self.tips:
try:
sheet = self.xl[tips['sheet']]
except KeyError:
if not tips['sheet'] in self.xl.sheetnames:
self.xl.create_sheet("Tips")
finally:
sheet = self.xl[tips['sheet']]
sheet = self.xl[tips['sheet']]
for k, v in tips.items():
if not isinstance(v, dict):
continue
# logger.debug(
# f"Writing {k}: {v['value']} to {equipment['sheet']}, row: {v['row']}, column: {v['column']}")
if isinstance(v['value'], list):
v['value'] = v['value'][0]
try:

View File

@@ -1,7 +1,7 @@
from .irida import import_irida
def hello(ctx):
print("\n\nHello!\n\n")
print("\n\nHello! Welcome to Robotics Submission Tracker.\n\n")
def goodbye(ctx):
print("\n\nGoodbye\n\n")
print("\n\nGoodbye. Thank you for using Robotics Submission Tracker.\n\n")

View File

@@ -19,11 +19,10 @@ def import_irida(ctx:Settings):
existing_controls = [item.name for item in IridaControl.query()]
prm_list = ", ".join([f"'{thing}'" for thing in existing_controls])
ctrl_db_path = ctx.directory_path.joinpath("submissions_parser_output", "submissions.db")
# print(f"Incoming settings: {pformat(ctx)}")
try:
conn = sqlite3.connect(ctrl_db_path)
except AttributeError as e:
print(f"Error, could not import from irida due to {e}")
logger.error(f"Error, could not import from irida due to {e}")
return
sql = f"SELECT name, submitted_date, submission_id, contains, matches, kraken, subtype, refseq_version, " \
f"kraken2_version, kraken2_db_version, sample_id FROM _iridacontrol INNER JOIN _control on _control.id " \
@@ -32,8 +31,6 @@ def import_irida(ctx:Settings):
records = [dict(name=row[0], submitted_date=row[1], submission_id=row[2], contains=row[3], matches=row[4], kraken=row[5],
subtype=row[6], refseq_version=row[7], kraken2_version=row[8], kraken2_db_version=row[9],
sample_id=row[10]) for row in cursor]
# incoming_controls = set(item['name'] for item in records)
# relevant = list(incoming_controls - existing_controls)
for record in records:
instance = IridaControl.query(name=record['name'])
if instance:
@@ -52,5 +49,4 @@ def import_irida(ctx:Settings):
if sample:
instance.sample = sample
instance.submission = sample.submissions[0]
# pprint(instance.__dict__)
instance.save()
instance.save()

View File

@@ -24,11 +24,9 @@ class RSLNamer(object):
filename = Path(filename) if Path(filename).exists() else filename
self.submission_type = sub_type
if not self.submission_type:
# logger.debug("Creating submission type because none exists")
self.submission_type = self.retrieve_submission_type(filename=filename)
logger.info(f"got submission type: {self.submission_type}")
if self.submission_type:
# logger.debug("Retrieving BasicSubmission subclass")
self.sub_object = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type)
self.parsed_name = self.retrieve_rsl_number(filename=filename, regex=self.sub_object.get_regex(submission_type=sub_type))
if not data:
@@ -52,7 +50,6 @@ class RSLNamer(object):
str: parsed submission type
"""
def st_from_path(filename:Path) -> str:
# logger.info(f"Using path method for {filename}.")
if filename.exists():
wb = load_workbook(filename)
try:
@@ -73,12 +70,9 @@ class RSLNamer(object):
if filename.startswith("tmp"):
return "Bacterial Culture"
regex = BasicSubmission.construct_regex()
# logger.info(f"Using string method for {filename}.")
# logger.debug(f"Using regex: {regex}")
m = regex.search(filename)
try:
submission_type = m.lastgroup
# logger.debug(f"Got submission type: {submission_type}")
except AttributeError as e:
submission_type = None
logger.critical(f"No submission type found or submission type found!: {e}")
@@ -98,7 +92,6 @@ class RSLNamer(object):
if check:
if "pytest" in sys.modules:
raise ValueError("Submission Type came back as None.")
# logger.debug("Final option, ask the user for submission type")
from frontend.widgets import ObjectSelector
dlg = ObjectSelector(title="Couldn't parse submission type.",
message="Please select submission type from list below.", obj_type=SubmissionType)
@@ -116,21 +109,17 @@ class RSLNamer(object):
regex (str): string to construct pattern
filename (str): string to be parsed
"""
logger.info(f"Input string to be parsed: {filename}")
if regex is None:
regex = BasicSubmission.construct_regex()
else:
# logger.debug(f"Incoming regex: {regex}")
try:
regex = re.compile(rf'{regex}', re.IGNORECASE | re.VERBOSE)
except re.error as e:
regex = BasicSubmission.construct_regex()
logger.info(f"Using regex: {regex}")
match filename:
case Path():
m = regex.search(filename.stem)
case str():
# logger.debug(f"Using string method.")
m = regex.search(filename)
case _:
m = None
@@ -141,7 +130,6 @@ class RSLNamer(object):
parsed_name = None
else:
parsed_name = None
# logger.debug(f"Got parsed submission name: {parsed_name}")
return parsed_name
@classmethod
@@ -187,8 +175,6 @@ class RSLNamer(object):
Returns:
str: output file name.
"""
# logger.debug(f"Kwargs: {kwargs}")
# logger.debug(f"Template: {template}")
environment = jinja_template_loading()
template = environment.from_string(template)
return template.render(**kwargs)

View File

@@ -1,6 +1,6 @@
'''
"""
Contains pydantic models and accompanying validators
'''
"""
from __future__ import annotations
import uuid, re, logging, csv, sys
from pydantic import BaseModel, field_validator, Field, model_validator
@@ -123,18 +123,14 @@ class PydReagent(BaseModel):
Tuple[Reagent, Report]: Reagent instance and result of function
"""
report = Report()
# logger.debug("Adding extra fields.")
if self.model_extra is not None:
self.__dict__.update(self.model_extra)
# logger.debug(f"Reagent SQL constructor is looking up type: {self.type}, lot: {self.lot}")
reagent = Reagent.query(lot=self.lot, name=self.name)
# logger.debug(f"Result: {reagent}")
if reagent is None:
reagent = Reagent()
for key, value in self.__dict__.items():
if isinstance(value, dict):
value = value['value']
# logger.debug(f"Reagent info item for {key}: {value}")
# NOTE: set fields based on keys in dictionary
match key:
case "lot":
@@ -149,7 +145,6 @@ class PydReagent(BaseModel):
if isinstance(value, str):
value = date(year=1970, month=1, day=1)
value = datetime.combine(value, datetime.min.time())
logger.debug(f"Expiry date coming into sql: {value} with type {type(value)}")
reagent.expiry = value.replace(tzinfo=timezone)
case _:
try:
@@ -179,14 +174,12 @@ class PydSample(BaseModel, extra='allow'):
@model_validator(mode='after')
@classmethod
def validate_model(cls, data):
# logger.debug(f"Data for pydsample: {data}")
model = BasicSample.find_polymorphic_subclass(polymorphic_identity=data.sample_type)
for k, v in data.model_extra.items():
if k in model.timestamps():
if isinstance(v, str):
v = datetime.strptime(v, "%Y-%m-%d")
data.__setattr__(k, v)
# logger.debug(f"Data coming out of validation: {pformat(data)}")
return data
@field_validator("row", "column", "assoc_id", "submission_rank")
@@ -238,7 +231,6 @@ class PydSample(BaseModel, extra='allow'):
"""
report = None
self.__dict__.update(self.model_extra)
# logger.debug(f"Here is the incoming sample dict: \n{self.__dict__}")
instance = BasicSample.query_or_create(sample_type=self.sample_type, submitter_id=self.submitter_id)
for key, value in self.__dict__.items():
match key:
@@ -246,7 +238,6 @@ class PydSample(BaseModel, extra='allow'):
case "row" | "column":
continue
case _:
# logger.debug(f"Setting sample field {key} to {value}")
instance.__setattr__(key, value)
out_associations = []
if submission is not None:
@@ -254,15 +245,12 @@ class PydSample(BaseModel, extra='allow'):
submission = BasicSubmission.query(rsl_plate_num=submission)
assoc_type = submission.submission_type_name
for row, column, aid, submission_rank in zip(self.row, self.column, self.assoc_id, self.submission_rank):
# logger.debug(f"Looking up association with identity: ({submission.submission_type_name} Association)")
# logger.debug(f"Looking up association with identity: ({assoc_type} Association)")
association = SubmissionSampleAssociation.query_or_create(association_type=f"{assoc_type} Association",
submission=submission,
sample=instance,
row=row, column=column, id=aid,
submission_rank=submission_rank,
**self.model_extra)
# logger.debug(f"Using submission_sample_association: {association}")
try:
out_associations.append(association)
except IntegrityError as e:
@@ -332,7 +320,6 @@ class PydEquipment(BaseModel, extra='ignore'):
@field_validator('processes', mode='before')
@classmethod
def make_empty_list(cls, value):
# logger.debug(f"Pydantic value: {value}")
if isinstance(value, GeneratorType):
value = [item.name for item in value]
value = convert_nans_to_nones(value)
@@ -355,7 +342,6 @@ class PydEquipment(BaseModel, extra='ignore'):
Tuple[Equipment, SubmissionEquipmentAssociation]: SQL objects
"""
if isinstance(submission, str):
# logger.debug(f"Got string, querying {submission}")
submission = BasicSubmission.query(rsl_plate_num=submission)
equipment = Equipment.query(asset_number=self.asset_number)
if equipment is None:
@@ -403,7 +389,6 @@ class PydEquipment(BaseModel, extra='ignore'):
class PydSubmission(BaseModel, extra='allow'):
filepath: Path
submission_type: dict | None
# For defaults
submitter_plate_num: dict | None = Field(default=dict(value=None, missing=True), validate_default=True)
submitted_date: dict | None
rsl_plate_num: dict | None = Field(default=dict(value=None, missing=True), validate_default=True)
@@ -427,7 +412,6 @@ class PydSubmission(BaseModel, extra='allow'):
if isinstance(value, dict):
value = value['value']
if isinstance(value, Generator):
# logger.debug("We have a generator")
return [PydTips(**tips) for tips in value]
if not value:
return []
@@ -436,9 +420,7 @@ class PydSubmission(BaseModel, extra='allow'):
@field_validator('equipment', mode='before')
@classmethod
def convert_equipment_dict(cls, value):
# logger.debug(f"Equipment: {value}")
if isinstance(value, Generator):
logger.debug("We have a generator")
return [PydEquipment(**equipment) for equipment in value]
if isinstance(value, dict):
return value['value']
@@ -454,7 +436,6 @@ class PydSubmission(BaseModel, extra='allow'):
@field_validator("submitter_plate_num")
@classmethod
def enforce_with_uuid(cls, value):
# logger.debug(f"submitter_plate_num coming into pydantic: {value}")
if value['value'] in [None, "None"]:
return dict(value=uuid.uuid4().hex.upper(), missing=True)
else:
@@ -464,7 +445,6 @@ class PydSubmission(BaseModel, extra='allow'):
@field_validator("submitted_date", mode="before")
@classmethod
def rescue_date(cls, value):
# logger.debug(f"\n\nDate coming into pydantic: {value}\n\n")
try:
check = value['value'] is None
except TypeError:
@@ -509,7 +489,6 @@ class PydSubmission(BaseModel, extra='allow'):
@classmethod
def lookup_submitting_lab(cls, value):
if isinstance(value['value'], str):
# logger.debug(f"Looking up organization {value['value']}")
try:
value['value'] = Organization.query(name=value['value']).name
except AttributeError:
@@ -540,13 +519,11 @@ class PydSubmission(BaseModel, extra='allow'):
@field_validator("rsl_plate_num")
@classmethod
def rsl_from_file(cls, value, values):
# logger.debug(f"RSL-plate initial value: {value['value']} and other values: {values.data}")
sub_type = values.data['submission_type']['value']
if check_not_nan(value['value']):
value['value'] = value['value'].strip()
return value
else:
# logger.debug("Constructing plate sub_type.")
if "pytest" in sys.modules and sub_type.replace(" ", "") == "BasicSubmission":
output = "RSL-BS-Test001"
else:
@@ -623,7 +600,6 @@ class PydSubmission(BaseModel, extra='allow'):
@classmethod
def expand_reagents(cls, value):
if isinstance(value, Generator):
# logger.debug("We have a generator")
return [PydReagent(**reagent) for reagent in value]
return value
@@ -631,7 +607,6 @@ class PydSubmission(BaseModel, extra='allow'):
@classmethod
def expand_samples(cls, value):
if isinstance(value, Generator):
# logger.debug("We have a generator")[
return [PydSample(**sample) for sample in value]
return value
@@ -656,7 +631,6 @@ class PydSubmission(BaseModel, extra='allow'):
@field_validator("cost_centre")
@classmethod
def get_cost_centre(cls, value, values):
# logger.debug(f"Value coming in for cost_centre: {value}")
match value['value']:
case None:
from backend.db.models import Organization
@@ -671,7 +645,6 @@ class PydSubmission(BaseModel, extra='allow'):
@field_validator("contact")
@classmethod
def get_contact_from_org(cls, value, values):
# logger.debug(f"Checking on value: {value}")
match value:
case dict():
if isinstance(value['value'], tuple):
@@ -684,7 +657,6 @@ class PydSubmission(BaseModel, extra='allow'):
if check is None:
org = Organization.query(name=values.data['submitting_lab']['value'])
contact = org.contacts[0].name
# logger.debug(f"Pulled: {contact}")
if isinstance(contact, tuple):
contact = contact[0]
return dict(value=contact, missing=True)
@@ -692,7 +664,6 @@ class PydSubmission(BaseModel, extra='allow'):
return value
def __init__(self, run_custom: bool = False, **data):
logger.debug(f"{__name__} input data: {data}")
super().__init__(**data)
# NOTE: this could also be done with default_factory
self.submission_object = BasicSubmission.find_polymorphic_subclass(
@@ -755,13 +726,11 @@ class PydSubmission(BaseModel, extra='allow'):
except TypeError:
pass
else:
# logger.debug("Extracting 'value' from attributes")
output = {k: self.filter_field(k) for k in fields}
return output
def filter_field(self, key: str):
item = getattr(self, key)
# logger.debug(f"Attempting deconstruction of {key}: {item} with type {type(item)}")
match item:
case dict():
try:
@@ -793,13 +762,10 @@ class PydSubmission(BaseModel, extra='allow'):
"""
report = Report()
dicto = self.improved_dict()
# logger.warning(f"\n\nQuery or create: {self.submission_type['value']}, {self.rsl_plate_num['value']}")
instance, result = BasicSubmission.query_or_create(submission_type=self.submission_type['value'],
rsl_plate_num=self.rsl_plate_num['value'])
logger.debug(f"Result of query or create: {instance}")
report.add_result(result)
self.handle_duplicate_samples()
# logger.debug(f"Here's our list of duplicate removed samples: {self.samples}")
for key, value in dicto.items():
if isinstance(value, dict):
try:
@@ -811,18 +777,13 @@ class PydSubmission(BaseModel, extra='allow'):
continue
if value is None:
continue
# logger.debug(f"Setting {key} to {value}")
match key:
case "reagents":
for reagent in self.reagents:
logger.debug(f"Checking reagent {reagent.lot}")
reagent, _ = reagent.toSQL(submission=instance)
# logger.debug(f"Association: {assoc}")
case "samples":
for sample in self.samples:
sample, associations, _ = sample.toSQL(submission=instance)
# logger.debug(f"Sample SQL object to be added to submission: {sample.__dict__}")
# logger.debug(associations)
for assoc in associations:
if assoc is not None:
if assoc not in instance.submission_sample_associations:
@@ -830,19 +791,16 @@ class PydSubmission(BaseModel, extra='allow'):
else:
logger.warning(f"Sample association {assoc} is already present in {instance}")
case "equipment":
# logger.debug(f"Equipment: {pformat(self.equipment)}")
for equip in self.equipment:
if equip is None:
continue
equip, association = equip.toSQL(submission=instance)
if association is not None:
instance.submission_equipment_associations.append(association)
logger.debug(f"Equipment associations: {instance.submission_equipment_associations}")
case "tips":
for tips in self.tips:
if tips is None:
continue
# logger.debug(f"Converting tips: {tips} to sql.")
try:
association = tips.to_sql(submission=instance)
except AttributeError:
@@ -864,14 +822,11 @@ class PydSubmission(BaseModel, extra='allow'):
value = value
instance.set_attribute(key=key, value=value)
case item if item in instance.jsons():
# logger.debug(f"{item} is a json.")
try:
ii = value.items()
except AttributeError:
ii = {}
logger.debug(f"ii is {ii}, value is {value}")
for k, v in ii:
logger.debug(f"k is {k}, v is {v}")
if isinstance(v, datetime):
value[k] = v.strftime("%Y-%m-%d %H:%M:%S")
else:
@@ -893,21 +848,17 @@ class PydSubmission(BaseModel, extra='allow'):
else:
logger.warning(f"{key} already == {value} so no updating.")
try:
# logger.debug(f"Calculating costs for procedure...")
instance.calculate_base_cost()
except (TypeError, AttributeError) as e:
logger.debug(f"Looks like that kit doesn't have cost breakdown yet due to: {e}, using 0.")
logger.error(f"Looks like that kit doesn't have cost breakdown yet due to: {e}, using 0.")
try:
instance.run_cost = instance.extraction_kit.cost_per_run
except AttributeError:
instance.run_cost = 0
# logger.debug(f"Calculated base run cost of: {instance.run_cost}")
# NOTE: Apply any discounts that are applicable for client and kit.
try:
# logger.debug("Checking and applying discounts...")
discounts = [item.amount for item in
Discount.query(kit_type=instance.extraction_kit, organization=instance.submitting_lab)]
# logger.debug(f"We got discounts: {discounts}")
if len(discounts) > 0:
instance.run_cost = instance.run_cost - sum(discounts)
except Exception as e:
@@ -925,7 +876,6 @@ class PydSubmission(BaseModel, extra='allow'):
SubmissionFormWidget: Submission form widget
"""
from frontend.widgets.submission_widget import SubmissionFormWidget
# logger.debug(f"Disable: {disable}")
return SubmissionFormWidget(parent=parent, submission=self, disable=disable)
def to_writer(self) -> "SheetWriter":
@@ -946,10 +896,8 @@ class PydSubmission(BaseModel, extra='allow'):
str: Output filename
"""
template = self.submission_object.filename_template()
# logger.debug(f"Using template string: {template}")
render = self.namer.construct_export_name(template=template, **self.improved_dict(dictionaries=False)).replace(
"/", "")
# logger.debug(f"Template rendered as: {render}")
return render
# @report_result
@@ -964,26 +912,20 @@ class PydSubmission(BaseModel, extra='allow'):
Report: Result object containing a message and any missing components.
"""
report = Report()
# logger.debug(f"Extraction kit: {extraction_kit}. Is it a string? {isinstance(extraction_kit, str)}")
if isinstance(extraction_kit, str):
extraction_kit = dict(value=extraction_kit)
if extraction_kit is not None and extraction_kit != self.extraction_kit['value']:
self.extraction_kit['value'] = extraction_kit['value']
# logger.debug(f"Looking up {self.extraction_kit['value']}")
ext_kit = KitType.query(name=self.extraction_kit['value'])
ext_kit_rtypes = [item.to_pydantic() for item in
ext_kit.get_reagents(required=True, submission_type=self.submission_type['value'])]
# logger.debug(f"Kit reagents: {ext_kit_rtypes}")
# logger.debug(f"Submission reagents: {self.reagents}")
# NOTE: Exclude any reagenttype found in this pyd not expected in kit.
expected_check = [item.role for item in ext_kit_rtypes]
output_reagents = [rt for rt in self.reagents if rt.role in expected_check]
logger.debug(f"Already have these reagent types: {output_reagents}")
missing_check = [item.role for item in output_reagents]
missing_reagents = [rt for rt in ext_kit_rtypes if rt.role not in missing_check]
missing_reagents += [rt for rt in output_reagents if rt.missing]
output_reagents += [rt for rt in missing_reagents if rt not in output_reagents]
# logger.debug(f"Missing reagents types: {missing_reagents}")
# NOTE: if lists are equal return no problem
if len(missing_reagents) == 0:
result = None
@@ -1072,7 +1014,6 @@ class PydReagentRole(BaseModel):
instance: ReagentRole = ReagentRole.query(name=self.name)
if instance is None:
instance = ReagentRole(name=self.name, eol_ext=self.eol_ext)
# logger.debug(f"This is the reagent type instance: {instance.__dict__}")
try:
assoc = KitTypeReagentRoleAssociation.query(reagent_role=instance, kit_type=kit)
except StatementError: