During code cleanup
This commit is contained in:
@@ -3,7 +3,6 @@ Contains all models for sqlalchemy
|
||||
'''
|
||||
from __future__ import annotations
|
||||
import sys, logging
|
||||
|
||||
from sqlalchemy import Column, INTEGER, String, JSON
|
||||
from sqlalchemy.orm import DeclarativeMeta, declarative_base, Query, Session
|
||||
from sqlalchemy.ext.declarative import declared_attr
|
||||
@@ -81,7 +80,13 @@ class BaseClass(Base):
|
||||
return ctx.backup_path
|
||||
|
||||
@classmethod
|
||||
def get_default_info(cls, *args) -> dict | List[str]:
|
||||
def get_default_info(cls, *args) -> dict | list | str:
|
||||
"""
|
||||
Returns default info for a model
|
||||
|
||||
Returns:
|
||||
dict | list | str: Output of key:value dict or single (list, str) desired variable
|
||||
"""
|
||||
dicto = dict(singles=['id'])
|
||||
output = {}
|
||||
for k, v in dicto.items():
|
||||
@@ -95,7 +100,13 @@ class BaseClass(Base):
|
||||
return output
|
||||
|
||||
@classmethod
|
||||
def query(cls, **kwargs):
|
||||
def query(cls, **kwargs) -> Any | List[Any]:
|
||||
"""
|
||||
Default query function for models
|
||||
|
||||
Returns:
|
||||
Any | List[Any]: Result of query execution.
|
||||
"""
|
||||
return cls.execute_query(**kwargs)
|
||||
|
||||
@classmethod
|
||||
@@ -119,13 +130,12 @@ class BaseClass(Base):
|
||||
singles = model.get_default_info('singles')
|
||||
logger.debug(f"Querying: {model}, with kwargs: {kwargs}")
|
||||
for k, v in kwargs.items():
|
||||
logger.debug(f"Using key: {k} with value: {v}")
|
||||
# logger.debug(f"That key found attribute: {attr} with type: {attr}")
|
||||
# logger.debug(f"Using key: {k} with value: {v}")
|
||||
try:
|
||||
attr = getattr(model, k)
|
||||
query = query.filter(attr == v)
|
||||
except (ArgumentError, AttributeError) as e:
|
||||
logger.error(f"Attribute {k} available due to:\n\t{e}\nSkipping.")
|
||||
logger.error(f"Attribute {k} unavailable due to:\n\t{e}\nSkipping.")
|
||||
if k in singles:
|
||||
limit = 1
|
||||
with query.session.no_autoflush:
|
||||
@@ -155,6 +165,9 @@ class BaseClass(Base):
|
||||
|
||||
|
||||
class ConfigItem(BaseClass):
|
||||
"""
|
||||
Key:JSON objects to store config settings in database.
|
||||
"""
|
||||
id = Column(INTEGER, primary_key=True)
|
||||
key = Column(String(32))
|
||||
value = Column(JSON)
|
||||
@@ -163,8 +176,18 @@ class ConfigItem(BaseClass):
|
||||
return f"ConfigItem({self.key} : {self.value})"
|
||||
|
||||
@classmethod
|
||||
def get_config_items(cls):
|
||||
return cls.__database_session__.query(cls).all()
|
||||
def get_config_items(cls, *args) -> ConfigItem|List[ConfigItem]:
|
||||
"""
|
||||
Get desired config items from database
|
||||
|
||||
Returns:
|
||||
ConfigItem|List[ConfigItem]: Config item(s)
|
||||
"""
|
||||
config_items = cls.__database_session__.query(cls).all()
|
||||
config_items = [item for item in config_items if item.key in args]
|
||||
if len(args) == 1:
|
||||
config_items = config_items[0]
|
||||
return config_items
|
||||
|
||||
|
||||
from .controls import *
|
||||
|
||||
@@ -13,6 +13,8 @@ from typing import List, Literal
|
||||
from pandas import ExcelFile
|
||||
from pathlib import Path
|
||||
from . import Base, BaseClass, Organization
|
||||
from io import BytesIO
|
||||
|
||||
|
||||
logger = logging.getLogger(f'submissions.{__name__}')
|
||||
|
||||
@@ -300,7 +302,7 @@ class ReagentType(BaseClass):
|
||||
# logger.debug(f"Looking up reagent type for {type(kit_type)} {kit_type} and {type(reagent)} {reagent}")
|
||||
# logger.debug(f"Kit reagent types: {kit_type.reagent_types}")
|
||||
result = list(set(kit_type.reagent_types).intersection(reagent.type))
|
||||
logger.debug(f"Result: {result}")
|
||||
# logger.debug(f"Result: {result}")
|
||||
try:
|
||||
return result[0]
|
||||
except IndexError:
|
||||
@@ -386,7 +388,7 @@ class Reagent(BaseClass):
|
||||
place_holder = self.expiry + reagent_role.eol_ext
|
||||
except (TypeError, AttributeError) as e:
|
||||
place_holder = date.today()
|
||||
logger.debug(f"We got a type error setting {self.lot} expiry: {e}. setting to today for testing")
|
||||
logger.error(f"We got a type error setting {self.lot} expiry: {e}. setting to today for testing")
|
||||
if self.expiry.year == 1970:
|
||||
place_holder = "NA"
|
||||
else:
|
||||
@@ -410,14 +412,14 @@ class Reagent(BaseClass):
|
||||
Report: Result of operation
|
||||
"""
|
||||
report = Report()
|
||||
logger.debug(f"Attempting update of reagent type at intersection of ({self}), ({kit})")
|
||||
# logger.debug(f"Attempting update of last used reagent type at intersection of ({self}), ({kit})")
|
||||
rt = ReagentType.query(kit_type=kit, reagent=self, limit=1)
|
||||
if rt is not None:
|
||||
logger.debug(f"got reagenttype {rt}")
|
||||
# logger.debug(f"got reagenttype {rt}")
|
||||
assoc = KitTypeReagentTypeAssociation.query(kit_type=kit, reagent_type=rt)
|
||||
if assoc is not None:
|
||||
if assoc.last_used != self.lot:
|
||||
logger.debug(f"Updating {assoc} last used to {self.lot}")
|
||||
# logger.debug(f"Updating {assoc} last used to {self.lot}")
|
||||
assoc.last_used = self.lot
|
||||
result = assoc.save()
|
||||
report.add_result(result)
|
||||
@@ -607,7 +609,7 @@ class SubmissionType(BaseClass):
|
||||
Returns:
|
||||
List[str]: List of sheet names
|
||||
"""
|
||||
return ExcelFile(self.template_file).sheet_names
|
||||
return ExcelFile(BytesIO(self.template_file)).sheet_names
|
||||
|
||||
def set_template_file(self, filepath: Path | str):
|
||||
"""
|
||||
@@ -633,7 +635,7 @@ class SubmissionType(BaseClass):
|
||||
|
||||
def construct_info_map(self, mode: Literal['read', 'write']) -> dict:
|
||||
info = self.info_map
|
||||
logger.debug(f"Info map: {info}")
|
||||
# logger.debug(f"Info map: {info}")
|
||||
output = {}
|
||||
# for k,v in info.items():
|
||||
# info[k]['write'] += info[k]['read']
|
||||
@@ -956,7 +958,7 @@ class SubmissionReagentAssociation(BaseClass):
|
||||
Returns:
|
||||
str: Representation of this SubmissionReagentAssociation
|
||||
"""
|
||||
return f"<{self.submission.rsl_plate_num}&{self.reagent.lot}>"
|
||||
return f"<{self.submission.rsl_plate_num} & {self.reagent.lot}>"
|
||||
|
||||
def __init__(self, reagent=None, submission=None):
|
||||
if isinstance(reagent, list):
|
||||
|
||||
@@ -226,7 +226,7 @@ class BasicSubmission(BaseClass):
|
||||
if report:
|
||||
return output
|
||||
if full_data:
|
||||
logger.debug(f"Attempting reagents.")
|
||||
# logger.debug(f"Attempting reagents.")
|
||||
try:
|
||||
reagents = [item.to_sub_dict(extraction_kit=self.extraction_kit) for item in
|
||||
self.submission_reagent_associations]
|
||||
@@ -240,9 +240,9 @@ class BasicSubmission(BaseClass):
|
||||
except Exception as e:
|
||||
logger.error(f"We got an error retrieving reagents: {e}")
|
||||
reagents = None
|
||||
logger.debug(f"Running samples.")
|
||||
# logger.debug(f"Running samples.")
|
||||
samples = self.adjust_to_dict_samples(backup=backup)
|
||||
logger.debug("Running equipment")
|
||||
# logger.debug("Running equipment")
|
||||
try:
|
||||
equipment = [item.to_sub_dict() for item in self.submission_equipment_associations]
|
||||
if len(equipment) == 0:
|
||||
@@ -297,7 +297,7 @@ class BasicSubmission(BaseClass):
|
||||
# Get kit associated with this submission
|
||||
assoc = [item for item in self.extraction_kit.kit_submissiontype_associations if
|
||||
item.submission_type == self.submission_type][0]
|
||||
logger.debug(f"Came up with association: {assoc}")
|
||||
# logger.debug(f"Came up with association: {assoc}")
|
||||
# If every individual cost is 0 this is probably an old plate.
|
||||
if all(item == 0.0 for item in [assoc.constant_cost, assoc.mutable_cost_column, assoc.mutable_cost_sample]):
|
||||
try:
|
||||
@@ -369,7 +369,7 @@ class BasicSubmission(BaseClass):
|
||||
return [item.role for item in self.submission_equipment_associations]
|
||||
|
||||
@classmethod
|
||||
def submissions_to_df(cls, submission_type: str | None = None, limit: int = 0) -> pd.DataFrame:
|
||||
def submissions_to_df(cls, submission_type: str | None = None, limit: int = 0, chronologic:bool=True) -> pd.DataFrame:
|
||||
"""
|
||||
Convert all submissions to dataframe
|
||||
|
||||
@@ -380,11 +380,11 @@ class BasicSubmission(BaseClass):
|
||||
Returns:
|
||||
pd.DataFrame: Pandas Dataframe of all relevant submissions
|
||||
"""
|
||||
logger.debug(f"Querying Type: {submission_type}")
|
||||
logger.debug(f"Using limit: {limit}")
|
||||
# logger.debug(f"Querying Type: {submission_type}")
|
||||
# logger.debug(f"Using limit: {limit}")
|
||||
# use lookup function to create list of dicts
|
||||
subs = [item.to_dict() for item in cls.query(submission_type=submission_type, limit=limit)]
|
||||
logger.debug(f"Got {len(subs)} submissions.")
|
||||
subs = [item.to_dict() for item in cls.query(submission_type=submission_type, limit=limit, chronologic=chronologic)]
|
||||
# logger.debug(f"Got {len(subs)} submissions.")
|
||||
df = pd.DataFrame.from_records(subs)
|
||||
# Exclude sub information
|
||||
for item in ['controls', 'extraction_info', 'pcr_info', 'comment', 'comments', 'samples', 'reagents',
|
||||
@@ -393,6 +393,8 @@ class BasicSubmission(BaseClass):
|
||||
df = df.drop(item, axis=1)
|
||||
except:
|
||||
logger.warning(f"Couldn't drop '{item}' column from submissionsheet df.")
|
||||
if chronologic:
|
||||
df.sort_values(by="Submitted Date", axis=0, inplace=True, ascending=False)
|
||||
return df
|
||||
|
||||
def set_attribute(self, key: str, value):
|
||||
@@ -466,7 +468,10 @@ class BasicSubmission(BaseClass):
|
||||
flag_modified(self, key)
|
||||
return
|
||||
case _:
|
||||
field_value = value
|
||||
try:
|
||||
field_value = value.strip()
|
||||
except AttributeError:
|
||||
field_value = value
|
||||
# insert into field
|
||||
try:
|
||||
self.__setattr__(key, field_value)
|
||||
@@ -502,7 +507,7 @@ class BasicSubmission(BaseClass):
|
||||
"""
|
||||
from backend.validators import PydSubmission, PydSample, PydReagent, PydEquipment
|
||||
dicto = self.to_dict(full_data=True, backup=backup)
|
||||
logger.debug("To dict complete")
|
||||
# logger.debug("To dict complete")
|
||||
new_dict = {}
|
||||
for key, value in dicto.items():
|
||||
# start = time()
|
||||
@@ -526,11 +531,11 @@ class BasicSubmission(BaseClass):
|
||||
case "id":
|
||||
pass
|
||||
case _:
|
||||
logger.debug(f"Setting dict {key} to {value}")
|
||||
# logger.debug(f"Setting dict {key} to {value}")
|
||||
new_dict[key.lower().replace(" ", "_")] = dict(value=value, missing=missing)
|
||||
# logger.debug(f"{key} complete after {time()-start}")
|
||||
new_dict['filepath'] = Path(tempfile.TemporaryFile().name)
|
||||
logger.debug("Done converting fields.")
|
||||
# logger.debug("Done converting fields.")
|
||||
return PydSubmission(**new_dict)
|
||||
|
||||
def save(self, original: bool = True):
|
||||
@@ -604,20 +609,20 @@ class BasicSubmission(BaseClass):
|
||||
|
||||
# Child class custom functions
|
||||
|
||||
@classmethod
|
||||
def custom_platemap(cls, xl: pd.ExcelFile, plate_map: pd.DataFrame) -> pd.DataFrame:
|
||||
"""
|
||||
Stupid stopgap solution to there being an issue with the Bacterial Culture plate map
|
||||
|
||||
Args:
|
||||
xl (pd.ExcelFile): original xl workbook, used for child classes mostly
|
||||
plate_map (pd.DataFrame): original plate map
|
||||
|
||||
Returns:
|
||||
pd.DataFrame: updated plate map.
|
||||
"""
|
||||
logger.info(f"Calling {cls.__mapper_args__['polymorphic_identity']} plate mapper.")
|
||||
return plate_map
|
||||
# @classmethod
|
||||
# def custom_platemap(cls, xl: pd.ExcelFile, plate_map: pd.DataFrame) -> pd.DataFrame:
|
||||
# """
|
||||
# Stupid stopgap solution to there being an issue with the Bacterial Culture plate map
|
||||
#
|
||||
# Args:
|
||||
# xl (pd.ExcelFile): original xl workbook, used for child classes mostly
|
||||
# plate_map (pd.DataFrame): original plate map
|
||||
#
|
||||
# Returns:
|
||||
# pd.DataFrame: updated plate map.
|
||||
# """
|
||||
# logger.info(f"Calling {cls.__mapper_args__['polymorphic_identity']} plate mapper.")
|
||||
# return plate_map
|
||||
|
||||
@classmethod
|
||||
def custom_info_parser(cls, input_dict: dict, xl: Workbook | None = None) -> dict:
|
||||
@@ -696,15 +701,15 @@ class BasicSubmission(BaseClass):
|
||||
# logger.info(f"Hello from {cls.__mapper_args__['polymorphic_identity']} Enforcer!")
|
||||
# return instr
|
||||
from backend.validators import RSLNamer
|
||||
logger.debug(f"instr coming into {cls}: {instr}")
|
||||
logger.debug(f"data coming into {cls}: {data}")
|
||||
# logger.debug(f"instr coming into {cls}: {instr}")
|
||||
# logger.debug(f"data coming into {cls}: {data}")
|
||||
defaults = cls.get_default_info("abbreviation", "submission_type")
|
||||
data['abbreviation'] = defaults['abbreviation']
|
||||
if 'submission_type' not in data.keys() or data['submission_type'] in [None, ""]:
|
||||
data['submission_type'] = defaults['submission_type']
|
||||
# outstr = super().enforce_name(instr=instr, data=data)
|
||||
if instr in [None, ""]:
|
||||
logger.debug("Sending to RSLNamer to make new plate name.")
|
||||
# logger.debug("Sending to RSLNamer to make new plate name.")
|
||||
outstr = RSLNamer.construct_new_plate_name(data=data)
|
||||
else:
|
||||
outstr = instr
|
||||
@@ -761,9 +766,9 @@ class BasicSubmission(BaseClass):
|
||||
Returns:
|
||||
list: _description_
|
||||
"""
|
||||
logger.debug(f"Hello from {cls.__mapper_args__['polymorphic_identity']} PCR parser!")
|
||||
# logger.debug(f"Hello from {cls.__mapper_args__['polymorphic_identity']} PCR parser!")
|
||||
pcr_sample_map = cls.get_submission_type().sample_map['pcr_samples']
|
||||
logger.debug(f'sample map: {pcr_sample_map}')
|
||||
# logger.debug(f'sample map: {pcr_sample_map}')
|
||||
main_sheet = xl[pcr_sample_map['main_sheet']]
|
||||
samples = []
|
||||
fields = {k: v for k, v in pcr_sample_map.items() if k not in ['main_sheet', 'start_row']}
|
||||
@@ -816,7 +821,7 @@ class BasicSubmission(BaseClass):
|
||||
Returns:
|
||||
List[dict]: Updated dictionaries
|
||||
"""
|
||||
logger.debug(f"Hello from {self.__class__.__name__} dictionary sample adjuster.")
|
||||
# logger.debug(f"Hello from {self.__class__.__name__} dictionary sample adjuster.")
|
||||
return [item.to_sub_dict() for item in self.submission_sample_associations]
|
||||
|
||||
@classmethod
|
||||
@@ -833,7 +838,7 @@ class BasicSubmission(BaseClass):
|
||||
base_dict['excluded'] = cls.get_default_info('details_ignore')
|
||||
env = jinja_template_loading()
|
||||
temp_name = f"{cls.__name__.lower()}_details.html"
|
||||
logger.debug(f"Returning template: {temp_name}")
|
||||
# logger.debug(f"Returning template: {temp_name}")
|
||||
try:
|
||||
template = env.get_template(temp_name)
|
||||
except TemplateNotFound as e:
|
||||
@@ -872,7 +877,7 @@ class BasicSubmission(BaseClass):
|
||||
Returns:
|
||||
models.BasicSubmission | List[models.BasicSubmission]: Submission(s) of interest
|
||||
"""
|
||||
logger.debug(f"Incoming kwargs: {kwargs}")
|
||||
# logger.debug(f"Incoming kwargs: {kwargs}")
|
||||
# NOTE: if you go back to using 'model' change the appropriate cls to model in the query filters
|
||||
if submission_type is not None:
|
||||
# if isinstance(submission_type, SubmissionType):
|
||||
@@ -882,7 +887,7 @@ class BasicSubmission(BaseClass):
|
||||
# model = cls.find_subclasses(submission_type=submission_type)
|
||||
elif len(kwargs) > 0:
|
||||
# find the subclass containing the relevant attributes
|
||||
logger.debug(f"Attributes for search: {kwargs}")
|
||||
# logger.debug(f"Attributes for search: {kwargs}")
|
||||
# model = cls.find_subclasses(attrs=kwargs)
|
||||
model = cls.find_polymorphic_subclass(attrs=kwargs)
|
||||
else:
|
||||
@@ -895,7 +900,7 @@ class BasicSubmission(BaseClass):
|
||||
logger.warning(f"End date with no start date, using Jan 1, 2023")
|
||||
start_date = date(2023, 1, 1)
|
||||
if start_date is not None:
|
||||
logger.debug(f"Querying with start date: {start_date} and end date: {end_date}")
|
||||
# logger.debug(f"Querying with start date: {start_date} and end date: {end_date}")
|
||||
match start_date:
|
||||
case date():
|
||||
# logger.debug(f"Lookup BasicSubmission by start_date({start_date})")
|
||||
@@ -919,7 +924,7 @@ class BasicSubmission(BaseClass):
|
||||
# logger.debug(f"Lookup BasicSubmission by parsed str end_date {end_date}")
|
||||
end_date = parse(end_date).strftime("%Y-%m-%d")
|
||||
# logger.debug(f"Looking up BasicSubmissions from start date: {start_date} and end date: {end_date}")
|
||||
logger.debug(f"Start date {start_date} == End date {end_date}: {start_date == end_date}")
|
||||
# logger.debug(f"Start date {start_date} == End date {end_date}: {start_date == end_date}")
|
||||
# logger.debug(f"Compensating for same date by using time")
|
||||
if start_date == end_date:
|
||||
start_date = datetime.strptime(start_date, "%Y-%m-%d").strftime("%Y-%m-%d %H:%M:%S.%f")
|
||||
@@ -999,7 +1004,7 @@ class BasicSubmission(BaseClass):
|
||||
f"{key} is not allowed as a query argument as it could lead to creation of duplicate objects. Use .query() instead.")
|
||||
instance = cls.query(submission_type=submission_type, limit=1, **kwargs)
|
||||
# logger.debug(f"Retrieved instance: {instance}")
|
||||
if instance == None:
|
||||
if instance is None:
|
||||
used_class = cls.find_polymorphic_subclass(attrs=kwargs, polymorphic_identity=submission_type)
|
||||
instance = used_class(**kwargs)
|
||||
match submission_type:
|
||||
@@ -1041,11 +1046,11 @@ class BasicSubmission(BaseClass):
|
||||
e: _description_
|
||||
"""
|
||||
from frontend.widgets.pop_ups import QuestionAsker
|
||||
logger.debug("Hello from delete")
|
||||
# logger.debug("Hello from delete")
|
||||
fname = self.__backup_path__.joinpath(f"{self.rsl_plate_num}-backup({date.today().strftime('%Y%m%d')})")
|
||||
msg = QuestionAsker(title="Delete?", message=f"Are you sure you want to delete {self.rsl_plate_num}?\n")
|
||||
if msg.exec():
|
||||
# self.backup(fname=fname, full_backup=True)
|
||||
self.backup(fname=fname, full_backup=True)
|
||||
self.__database_session__.delete(self)
|
||||
try:
|
||||
self.__database_session__.commit()
|
||||
@@ -1061,7 +1066,7 @@ class BasicSubmission(BaseClass):
|
||||
Args:
|
||||
obj (_type_): parent widget
|
||||
"""
|
||||
logger.debug("Hello from details")
|
||||
# logger.debug("Hello from details")
|
||||
from frontend.widgets.submission_details import SubmissionDetails
|
||||
dlg = SubmissionDetails(parent=obj, sub=self)
|
||||
if dlg.exec():
|
||||
@@ -1070,7 +1075,7 @@ class BasicSubmission(BaseClass):
|
||||
def edit(self, obj):
|
||||
from frontend.widgets.submission_widget import SubmissionFormWidget
|
||||
for widg in obj.app.table_widget.formwidget.findChildren(SubmissionFormWidget):
|
||||
logger.debug(widg)
|
||||
# logger.debug(widg)
|
||||
widg.setParent(None)
|
||||
pyd = self.to_pydantic(backup=True)
|
||||
form = pyd.to_form(parent=obj)
|
||||
@@ -1088,7 +1093,7 @@ class BasicSubmission(BaseClass):
|
||||
if dlg.exec():
|
||||
comment = dlg.parse_form()
|
||||
self.set_attribute(key='comment', value=comment)
|
||||
logger.debug(self.comment)
|
||||
# logger.debug(self.comment)
|
||||
self.save(original=False)
|
||||
|
||||
def add_equipment(self, obj):
|
||||
@@ -1102,11 +1107,11 @@ class BasicSubmission(BaseClass):
|
||||
dlg = EquipmentUsage(parent=obj, submission=self)
|
||||
if dlg.exec():
|
||||
equipment = dlg.parse_form()
|
||||
logger.debug(f"We've got equipment: {equipment}")
|
||||
# logger.debug(f"We've got equipment: {equipment}")
|
||||
for equip in equipment:
|
||||
logger.debug(f"Processing: {equip}")
|
||||
# logger.debug(f"Processing: {equip}")
|
||||
_, assoc = equip.toSQL(submission=self)
|
||||
logger.debug(f"Appending SubmissionEquipmentAssociation: {assoc}")
|
||||
# logger.debug(f"Appending SubmissionEquipmentAssociation: {assoc}")
|
||||
assoc.save()
|
||||
else:
|
||||
pass
|
||||
@@ -1120,14 +1125,14 @@ class BasicSubmission(BaseClass):
|
||||
fname (Path | None, optional): Filename of xlsx file. Defaults to None.
|
||||
full_backup (bool, optional): Whether or not to make yaml file. Defaults to False.
|
||||
"""
|
||||
logger.debug("Hello from backup.")
|
||||
# logger.debug("Hello from backup.")
|
||||
pyd = self.to_pydantic(backup=True)
|
||||
if fname == None:
|
||||
if fname is None:
|
||||
from frontend.widgets.functions import select_save_file
|
||||
fname = select_save_file(default_name=pyd.construct_filename(), extension="xlsx", obj=obj)
|
||||
logger.debug(fname.name)
|
||||
# logger.debug(fname.name)
|
||||
if fname.name == "":
|
||||
logger.debug(f"export cancelled.")
|
||||
# logger.debug(f"export cancelled.")
|
||||
return
|
||||
# pyd.filepath = fname
|
||||
if full_backup:
|
||||
@@ -1171,28 +1176,28 @@ class BacterialCulture(BasicSubmission):
|
||||
output['controls'] = [item.to_sub_dict() for item in self.controls]
|
||||
return output
|
||||
|
||||
@classmethod
|
||||
def custom_platemap(cls, xl: pd.ExcelFile, plate_map: pd.DataFrame) -> pd.DataFrame:
|
||||
"""
|
||||
Stupid stopgap solution to there being an issue with the Bacterial Culture plate map. Extends parent.
|
||||
|
||||
Args:
|
||||
xl (pd.ExcelFile): original xl workbook
|
||||
plate_map (pd.DataFrame): original plate map
|
||||
|
||||
Returns:
|
||||
pd.DataFrame: updated plate map.
|
||||
"""
|
||||
plate_map = super().custom_platemap(xl, plate_map)
|
||||
num1 = xl.parse("Sample List").iloc[40, 1]
|
||||
num2 = xl.parse("Sample List").iloc[41, 1]
|
||||
logger.debug(f"Broken: {plate_map.iloc[5, 0]}, {plate_map.iloc[6, 0]}")
|
||||
logger.debug(f"Replace: {num1}, {num2}")
|
||||
if not check_not_nan(plate_map.iloc[5, 0]):
|
||||
plate_map.iloc[5, 0] = num1
|
||||
if not check_not_nan(plate_map.iloc[6, 0]):
|
||||
plate_map.iloc[6, 0] = num2
|
||||
return plate_map
|
||||
# @classmethod
|
||||
# def custom_platemap(cls, xl: pd.ExcelFile, plate_map: pd.DataFrame) -> pd.DataFrame:
|
||||
# """
|
||||
# Stupid stopgap solution to there being an issue with the Bacterial Culture plate map. Extends parent.
|
||||
#
|
||||
# Args:
|
||||
# xl (pd.ExcelFile): original xl workbook
|
||||
# plate_map (pd.DataFrame): original plate map
|
||||
#
|
||||
# Returns:
|
||||
# pd.DataFrame: updated plate map.
|
||||
# """
|
||||
# plate_map = super().custom_platemap(xl, plate_map)
|
||||
# num1 = xl.parse("Sample List").iloc[40, 1]
|
||||
# num2 = xl.parse("Sample List").iloc[41, 1]
|
||||
# # logger.debug(f"Broken: {plate_map.iloc[5, 0]}, {plate_map.iloc[6, 0]}")
|
||||
# # logger.debug(f"Replace: {num1}, {num2}")
|
||||
# if not check_not_nan(plate_map.iloc[5, 0]):
|
||||
# plate_map.iloc[5, 0] = num1
|
||||
# if not check_not_nan(plate_map.iloc[6, 0]):
|
||||
# plate_map.iloc[6, 0] = num2
|
||||
# return plate_map
|
||||
|
||||
# @classmethod
|
||||
# def custom_writer(cls, input_excel: Workbook, info: dict | None = None, backup: bool = False) -> Workbook:
|
||||
@@ -1255,7 +1260,7 @@ class BacterialCulture(BasicSubmission):
|
||||
for sample in input_dict['samples']:
|
||||
matched = regex.match(sample['submitter_id'])
|
||||
if bool(matched):
|
||||
logger.debug(f"Control match found: {sample['submitter_id']}")
|
||||
# logger.debug(f"Control match found: {sample['submitter_id']}")
|
||||
new_lot = matched.group()
|
||||
try:
|
||||
pos_control_reg = \
|
||||
@@ -1272,8 +1277,8 @@ class BacterialCulture(BasicSubmission):
|
||||
"""
|
||||
Extends parent
|
||||
"""
|
||||
logger.debug(f"Checking {sample.well}")
|
||||
logger.debug(f"here's the worksheet: {worksheet}")
|
||||
# logger.debug(f"Checking {sample.well}")
|
||||
# logger.debug(f"here's the worksheet: {worksheet}")
|
||||
row = super().custom_sample_autofill_row(sample, worksheet)
|
||||
df = pd.DataFrame(list(worksheet.values))
|
||||
# logger.debug(f"Here's the dataframe: {df}")
|
||||
@@ -1282,7 +1287,7 @@ class BacterialCulture(BasicSubmission):
|
||||
new = f"{sample.well[0]}{sample.well[1:].zfill(2)}"
|
||||
logger.debug(f"Checking: {new}")
|
||||
idx = df[df[0] == new]
|
||||
logger.debug(f"Here is the row: {idx}")
|
||||
# logger.debug(f"Here is the row: {idx}")
|
||||
row = idx.index.to_list()[0]
|
||||
return row + 1
|
||||
|
||||
@@ -1386,14 +1391,19 @@ class Wastewater(BasicSubmission):
|
||||
Parse specific to wastewater samples.
|
||||
"""
|
||||
samples = super().parse_pcr(xl=xl, rsl_plate_num=rsl_plate_num)
|
||||
logger.debug(f'Samples from parent pcr parser: {pformat(samples)}')
|
||||
# logger.debug(f'Samples from parent pcr parser: {pformat(samples)}')
|
||||
output = []
|
||||
for sample in samples:
|
||||
# NOTE: remove '-{target}' from controls
|
||||
sample['sample'] = re.sub('-N\\d$', '', sample['sample'])
|
||||
# NOTE: if sample is already in output skip
|
||||
if sample['sample'] in [item['sample'] for item in output]:
|
||||
continue
|
||||
# NOTE: Set ct values
|
||||
sample[f"ct_{sample['target'].lower()}"] = sample['ct'] if isinstance(sample['ct'], float) else 0.0
|
||||
# NOTE: Set assessment
|
||||
sample[f"{sample['target'].lower()}_status"] = sample['assessment']
|
||||
# NOTE: Get sample having other target
|
||||
other_targets = [s for s in samples if re.sub('-N\\d$', '', s['sample']) == sample['sample']]
|
||||
for s in other_targets:
|
||||
sample[f"ct_{s['target'].lower()}"] = s['ct'] if isinstance(s['ct'], float) else 0.0
|
||||
@@ -1446,13 +1456,13 @@ class Wastewater(BasicSubmission):
|
||||
"""
|
||||
Extends parent
|
||||
"""
|
||||
logger.debug(f"Checking {sample.well}")
|
||||
logger.debug(f"here's the worksheet: {worksheet}")
|
||||
# logger.debug(f"Checking {sample.well}")
|
||||
# logger.debug(f"here's the worksheet: {worksheet}")
|
||||
row = super().custom_sample_autofill_row(sample, worksheet)
|
||||
df = pd.DataFrame(list(worksheet.values))
|
||||
logger.debug(f"Here's the dataframe: {df}")
|
||||
# logger.debug(f"Here's the dataframe: {df}")
|
||||
idx = df[df[1] == sample.sample_location]
|
||||
logger.debug(f"Here is the row: {idx}")
|
||||
# logger.debug(f"Here is the row: {idx}")
|
||||
row = idx.index.to_list()[0]
|
||||
return row + 1
|
||||
|
||||
@@ -1468,10 +1478,10 @@ class Wastewater(BasicSubmission):
|
||||
parser = PCRParser(filepath=fname)
|
||||
self.set_attribute("pcr_info", parser.pcr)
|
||||
self.save(original=False)
|
||||
logger.debug(f"Got {len(parser.samples)} samples to update!")
|
||||
logger.debug(f"Parser samples: {parser.samples}")
|
||||
# logger.debug(f"Got {len(parser.samples)} samples to update!")
|
||||
# logger.debug(f"Parser samples: {parser.samples}")
|
||||
for sample in self.samples:
|
||||
logger.debug(f"Running update on: {sample}")
|
||||
# logger.debug(f"Running update on: {sample}")
|
||||
try:
|
||||
sample_dict = [item for item in parser.samples if item['sample'] == sample.rsl_number][0]
|
||||
except IndexError:
|
||||
@@ -1551,19 +1561,18 @@ class WastewaterArtic(BasicSubmission):
|
||||
Extends parent
|
||||
"""
|
||||
try:
|
||||
# Deal with PCR file.
|
||||
# NOTE: Deal with PCR file.
|
||||
instr = re.sub(r"Artic", "", instr, flags=re.IGNORECASE)
|
||||
except (AttributeError, TypeError) as e:
|
||||
logger.error(f"Problem using regex: {e}")
|
||||
logger.debug(f"Before RSL addition: {instr}")
|
||||
# logger.debug(f"Before RSL addition: {instr}")
|
||||
try:
|
||||
instr = instr.replace("-", "")
|
||||
except AttributeError:
|
||||
instr = date.today().strftime("%Y%m%d")
|
||||
instr = re.sub(r"^(\d{6})", f"RSL-AR-\\1", instr)
|
||||
logger.debug(f"name coming out of Artic namer: {instr}")
|
||||
# logger.debug(f"name coming out of Artic namer: {instr}")
|
||||
outstr = super().enforce_name(instr=instr, data=data)
|
||||
|
||||
return outstr
|
||||
|
||||
@classmethod
|
||||
@@ -1611,39 +1620,39 @@ class WastewaterArtic(BasicSubmission):
|
||||
Returns:
|
||||
str: output name
|
||||
"""
|
||||
logger.debug(f"input string raw: {input_str}")
|
||||
# logger.debug(f"input string raw: {input_str}")
|
||||
# Remove letters.
|
||||
processed = input_str.replace("RSL", "")
|
||||
processed = re.sub(r"\(.*\)$", "", processed).strip()
|
||||
processed = re.sub(r"[A-QS-Z]+\d*", "", processed)
|
||||
# Remove trailing '-' if any
|
||||
processed = processed.strip("-")
|
||||
logger.debug(f"Processed after stripping letters: {processed}")
|
||||
# logger.debug(f"Processed after stripping letters: {processed}")
|
||||
try:
|
||||
en_num = re.search(r"\-\d{1}$", processed).group()
|
||||
processed = rreplace(processed, en_num, "")
|
||||
except AttributeError:
|
||||
en_num = "1"
|
||||
en_num = en_num.strip("-")
|
||||
logger.debug(f"Processed after en_num: {processed}")
|
||||
# logger.debug(f"Processed after en_num: {processed}")
|
||||
try:
|
||||
plate_num = re.search(r"\-\d{1}R?\d?$", processed).group()
|
||||
processed = rreplace(processed, plate_num, "")
|
||||
except AttributeError:
|
||||
plate_num = "1"
|
||||
plate_num = plate_num.strip("-")
|
||||
logger.debug(f"Processed after plate-num: {processed}")
|
||||
# logger.debug(f"Processed after plate-num: {processed}")
|
||||
day = re.search(r"\d{2}$", processed).group()
|
||||
processed = rreplace(processed, day, "")
|
||||
logger.debug(f"Processed after day: {processed}")
|
||||
# logger.debug(f"Processed after day: {processed}")
|
||||
month = re.search(r"\d{2}$", processed).group()
|
||||
processed = rreplace(processed, month, "")
|
||||
processed = processed.replace("--", "")
|
||||
logger.debug(f"Processed after month: {processed}")
|
||||
# logger.debug(f"Processed after month: {processed}")
|
||||
year = re.search(r'^(?:\d{2})?\d{2}', processed).group()
|
||||
year = f"20{year}"
|
||||
final_en_name = f"EN{en_num}-{year}{month}{day}"
|
||||
logger.debug(f"Final EN name: {final_en_name}")
|
||||
# logger.debug(f"Final EN name: {final_en_name}")
|
||||
return final_en_name
|
||||
|
||||
@classmethod
|
||||
@@ -1657,14 +1666,14 @@ class WastewaterArtic(BasicSubmission):
|
||||
Returns:
|
||||
str: output name
|
||||
"""
|
||||
logger.debug(f"input string raw: {input_str}")
|
||||
# logger.debug(f"input string raw: {input_str}")
|
||||
# Remove letters.
|
||||
processed = input_str.replace("RSL", "")
|
||||
processed = re.sub(r"\(.*\)$", "", processed).strip()
|
||||
processed = re.sub(r"[A-QS-Z]+\d*", "", processed)
|
||||
# Remove trailing '-' if any
|
||||
processed = processed.strip("-")
|
||||
logger.debug(f"Processed after stripping letters: {processed}")
|
||||
# logger.debug(f"Processed after stripping letters: {processed}")
|
||||
# try:
|
||||
# en_num = re.search(r"\-\d{1}$", processed).group()
|
||||
# processed = rreplace(processed, en_num, "")
|
||||
@@ -1678,23 +1687,23 @@ class WastewaterArtic(BasicSubmission):
|
||||
except AttributeError:
|
||||
plate_num = "1"
|
||||
plate_num = plate_num.strip("-")
|
||||
logger.debug(f"Plate num: {plate_num}")
|
||||
# logger.debug(f"Plate num: {plate_num}")
|
||||
repeat_num = re.search(r"R(?P<repeat>\d)?$", "PBS20240426-2R").groups()[0]
|
||||
if repeat_num is None and "R" in plate_num:
|
||||
repeat_num = "1"
|
||||
plate_num = re.sub(r"R", rf"R{repeat_num}", plate_num)
|
||||
logger.debug(f"Processed after plate-num: {processed}")
|
||||
# logger.debug(f"Processed after plate-num: {processed}")
|
||||
day = re.search(r"\d{2}$", processed).group()
|
||||
processed = rreplace(processed, day, "")
|
||||
logger.debug(f"Processed after day: {processed}")
|
||||
# logger.debug(f"Processed after day: {processed}")
|
||||
month = re.search(r"\d{2}$", processed).group()
|
||||
processed = rreplace(processed, month, "")
|
||||
processed = processed.replace("--", "")
|
||||
logger.debug(f"Processed after month: {processed}")
|
||||
# logger.debug(f"Processed after month: {processed}")
|
||||
year = re.search(r'^(?:\d{2})?\d{2}', processed).group()
|
||||
year = f"20{year}"
|
||||
final_en_name = f"PBS{year}{month}{day}-{plate_num}"
|
||||
logger.debug(f"Final EN name: {final_en_name}")
|
||||
# logger.debug(f"Final EN name: {final_en_name}")
|
||||
return final_en_name
|
||||
|
||||
@classmethod
|
||||
@@ -1722,12 +1731,12 @@ class WastewaterArtic(BasicSubmission):
|
||||
dict: Updated parser product.
|
||||
"""
|
||||
input_dict = super().finalize_parse(input_dict, xl, info_map)
|
||||
logger.debug(f"Incoming input_dict: {pformat(input_dict)}")
|
||||
# logger.debug(f"Incoming input_dict: {pformat(input_dict)}")
|
||||
# TODO: Move to validator?
|
||||
for sample in input_dict['samples']:
|
||||
logger.debug(f"Sample: {sample}")
|
||||
# logger.debug(f"Sample: {sample}")
|
||||
if re.search(r"^NTC", sample['submitter_id']):
|
||||
sample['submitter_id'] = sample['submitter_id'] + "-WWG-" + input_dict['rsl_plate_num']['value']
|
||||
sample['submitter_id'] = f"{sample['submitter_id']}-WWG-{input_dict['rsl_plate_num']['value']}"
|
||||
input_dict['csv'] = xl["hitpicks_csv_to_export"]
|
||||
return input_dict
|
||||
|
||||
@@ -1748,13 +1757,13 @@ class WastewaterArtic(BasicSubmission):
|
||||
# worksheet = input_excel["First Strand List"]
|
||||
# samples = cls.query(rsl_number=info['rsl_plate_num']['value']).submission_sample_associations
|
||||
# samples = sorted(samples, key=attrgetter('column', 'row'))
|
||||
logger.debug(f"Info:\n{pformat(info)}")
|
||||
# logger.debug(f"Info:\n{pformat(info)}")
|
||||
check = 'source_plates' in info.keys() and info['source_plates'] is not None
|
||||
if check:
|
||||
worksheet = input_excel['First Strand List']
|
||||
start_row = 8
|
||||
for iii, plate in enumerate(info['source_plates']['value']):
|
||||
logger.debug(f"Plate: {plate}")
|
||||
# logger.debug(f"Plate: {plate}")
|
||||
row = start_row + iii
|
||||
try:
|
||||
worksheet.cell(row=row, column=3, value=plate['plate'])
|
||||
@@ -1782,12 +1791,16 @@ class WastewaterArtic(BasicSubmission):
|
||||
# logger.debug(f"vj: {vj}")
|
||||
column = start_column + 2 + jjj
|
||||
worksheet.cell(row=start_row, column=column, value=kj['name'])
|
||||
worksheet.cell(row=row, column=column, value=kj['value'])
|
||||
# logger.debug(f"Writing {kj['name']} with value {kj['value']} to row {row}, column {column}")
|
||||
try:
|
||||
worksheet.cell(row=row, column=column, value=kj['value'])
|
||||
except AttributeError:
|
||||
logger.error(f"Failed {kj['name']} with value {kj['value']} to row {row}, column {column}")
|
||||
check = 'gel_image' in info.keys() and info['gel_image']['value'] is not None
|
||||
if check:
|
||||
if info['gel_image'] != None:
|
||||
worksheet = input_excel['Egel results']
|
||||
logger.debug(f"We got an image: {info['gel_image']}")
|
||||
# logger.debug(f"We got an image: {info['gel_image']}")
|
||||
with ZipFile(cls.__directory_path__.joinpath("submission_imgs.zip")) as zipped:
|
||||
z = zipped.extract(info['gel_image']['value'], Path(TemporaryDirectory().name))
|
||||
img = OpenpyxlImage(z)
|
||||
@@ -1817,7 +1830,7 @@ class WastewaterArtic(BasicSubmission):
|
||||
headers = [item['name'] for item in base_dict['gel_info'][0]['values']]
|
||||
base_dict['headers'] = [''] * (4 - len(headers))
|
||||
base_dict['headers'] += headers
|
||||
logger.debug(f"Gel info: {pformat(base_dict['headers'])}")
|
||||
# logger.debug(f"Gel info: {pformat(base_dict['headers'])}")
|
||||
check = 'gel_image' in base_dict.keys() and base_dict['gel_image'] != None
|
||||
if check:
|
||||
with ZipFile(cls.__directory_path__.joinpath("submission_imgs.zip")) as zipped:
|
||||
@@ -1834,7 +1847,7 @@ class WastewaterArtic(BasicSubmission):
|
||||
Returns:
|
||||
List[dict]: Updated dictionaries
|
||||
"""
|
||||
logger.debug(f"Hello from {self.__class__.__name__} dictionary sample adjuster.")
|
||||
# logger.debug(f"Hello from {self.__class__.__name__} dictionary sample adjuster.")
|
||||
output = []
|
||||
set_plate = None
|
||||
for assoc in self.submission_sample_associations:
|
||||
@@ -1888,7 +1901,7 @@ class WastewaterArtic(BasicSubmission):
|
||||
self.comment.append(com)
|
||||
else:
|
||||
self.comment = [com]
|
||||
logger.debug(pformat(self.gel_info))
|
||||
# logger.debug(pformat(self.gel_info))
|
||||
with ZipFile(self.__directory_path__.joinpath("submission_imgs.zip"), 'a') as zipf:
|
||||
# Add a file located at the source_path to the destination within the zip
|
||||
# file. It will overwrite existing files if the names collide, but it
|
||||
@@ -1966,7 +1979,7 @@ class BasicSample(BaseClass):
|
||||
Returns:
|
||||
dict: well location and name (sample id, organism) NOTE: keys must sync with WWSample to_sub_dict above
|
||||
"""
|
||||
logger.debug(f"Converting {self} to dict.")
|
||||
# logger.debug(f"Converting {self} to dict.")
|
||||
# start = time()
|
||||
sample = {}
|
||||
sample['Submitter ID'] = self.submitter_id
|
||||
@@ -2042,7 +2055,7 @@ class BasicSample(BaseClass):
|
||||
Returns:
|
||||
dict: Updated parser results.
|
||||
"""
|
||||
logger.debug(f"Hello from {cls.__name__} sample parser!")
|
||||
# logger.debug(f"Hello from {cls.__name__} sample parser!")
|
||||
return input_dict
|
||||
|
||||
@classmethod
|
||||
@@ -2059,7 +2072,7 @@ class BasicSample(BaseClass):
|
||||
base_dict['excluded'] = ['submissions', 'excluded', 'colour', 'tooltip']
|
||||
env = jinja_template_loading()
|
||||
temp_name = f"{cls.__name__.lower()}_details.html"
|
||||
logger.debug(f"Returning template: {temp_name}")
|
||||
# logger.debug(f"Returning template: {temp_name}")
|
||||
try:
|
||||
template = env.get_template(temp_name)
|
||||
except TemplateNotFound as e:
|
||||
@@ -2091,7 +2104,7 @@ class BasicSample(BaseClass):
|
||||
model = cls.find_polymorphic_subclass(attrs=kwargs)
|
||||
else:
|
||||
model = cls.find_polymorphic_subclass(polymorphic_identity=sample_type)
|
||||
logger.debug(f"Length of kwargs: {len(kwargs)}")
|
||||
# logger.debug(f"Length of kwargs: {len(kwargs)}")
|
||||
# model = models.BasicSample.find_subclasses(ctx=ctx, attrs=kwargs)
|
||||
# query: Query = setup_lookup(ctx=ctx, locals=locals()).query(model)
|
||||
query: Query = cls.__database_session__.query(model)
|
||||
@@ -2141,7 +2154,7 @@ class BasicSample(BaseClass):
|
||||
# f"{key} is not allowed as a query argument as it could lead to creation of duplicate objects.")
|
||||
sanitized_kwargs = {k:v for k,v in kwargs.items() if k not in disallowed}
|
||||
instance = cls.query(sample_type=sample_type, limit=1, **kwargs)
|
||||
logger.debug(f"Retrieved instance: {instance}")
|
||||
# logger.debug(f"Retrieved instance: {instance}")
|
||||
if instance is None:
|
||||
used_class = cls.find_polymorphic_subclass(attrs=sanitized_kwargs, polymorphic_identity=sample_type)
|
||||
instance = used_class(**sanitized_kwargs)
|
||||
@@ -2219,7 +2232,7 @@ class WastewaterSample(BasicSample):
|
||||
dict: Updated parser results.
|
||||
"""
|
||||
output_dict = super().parse_sample(input_dict)
|
||||
logger.debug(f"Initial sample dict: {pformat(output_dict)}")
|
||||
# logger.debug(f"Initial sample dict: {pformat(output_dict)}")
|
||||
disallowed = ["", None, "None"]
|
||||
try:
|
||||
check = output_dict['rsl_number'] in [None, "None"]
|
||||
@@ -2258,7 +2271,7 @@ class WastewaterSample(BasicSample):
|
||||
plates = [item.rsl_plate_num for item in
|
||||
self.submissions[:self.submissions.index(current_artic_submission)]]
|
||||
subs = [sub for sub in self.submissions if sub.rsl_plate_num in plates]
|
||||
logger.debug(f"Submissions: {subs}")
|
||||
# logger.debug(f"Submissions: {subs}")
|
||||
try:
|
||||
return subs[-1]
|
||||
except IndexError:
|
||||
@@ -2339,7 +2352,7 @@ class SubmissionSampleAssociation(BaseClass):
|
||||
self.id = id
|
||||
else:
|
||||
self.id = self.__class__.autoincrement_id()
|
||||
logger.debug(f"Using id: {self.id}")
|
||||
# logger.debug(f"Using submission sample association id: {self.id}")
|
||||
|
||||
def __repr__(self) -> str:
|
||||
try:
|
||||
@@ -2356,9 +2369,9 @@ class SubmissionSampleAssociation(BaseClass):
|
||||
dict: Updated dictionary with row, column and well updated
|
||||
"""
|
||||
# Get sample info
|
||||
logger.debug(f"Running {self.__repr__()}")
|
||||
# logger.debug(f"Running {self.__repr__()}")
|
||||
sample = self.sample.to_sub_dict()
|
||||
logger.debug("Sample conversion complete.")
|
||||
# logger.debug("Sample conversion complete.")
|
||||
sample['Name'] = self.sample.submitter_id
|
||||
sample['Row'] = self.row
|
||||
sample['Column'] = self.column
|
||||
@@ -2382,7 +2395,7 @@ class SubmissionSampleAssociation(BaseClass):
|
||||
"""
|
||||
# Since there is no PCR, negliable result is necessary.
|
||||
sample = self.to_sub_dict()
|
||||
logger.debug(f"Sample dict to hitpick: {sample}")
|
||||
# logger.debug(f"Sample dict to hitpick: {sample}")
|
||||
env = jinja_template_loading()
|
||||
template = env.get_template("tooltip.html")
|
||||
tooltip_text = template.render(fields=sample)
|
||||
@@ -2430,7 +2443,7 @@ class SubmissionSampleAssociation(BaseClass):
|
||||
except Exception as e:
|
||||
logger.error(f"Could not get polymorph {polymorphic_identity} of {cls} due to {e}")
|
||||
output = cls
|
||||
logger.debug(f"Using SubmissionSampleAssociation subclass: {output}")
|
||||
# logger.debug(f"Using SubmissionSampleAssociation subclass: {output}")
|
||||
return output
|
||||
|
||||
@classmethod
|
||||
@@ -2519,7 +2532,7 @@ class SubmissionSampleAssociation(BaseClass):
|
||||
Returns:
|
||||
SubmissionSampleAssociation: Queried or new association.
|
||||
"""
|
||||
logger.debug(f"Attempting create or query with {kwargs}")
|
||||
# logger.debug(f"Attempting create or query with {kwargs}")
|
||||
match submission:
|
||||
case BasicSubmission():
|
||||
pass
|
||||
|
||||
@@ -19,6 +19,7 @@ from datetime import date
|
||||
from dateutil.parser import parse, ParserError
|
||||
from tools import check_not_nan, convert_nans_to_nones, row_map, row_keys, is_missing, remove_key_from_list_of_dicts
|
||||
|
||||
|
||||
logger = logging.getLogger(f"submissions.{__name__}")
|
||||
|
||||
|
||||
@@ -44,26 +45,24 @@ class SheetParser(object):
|
||||
logger.error(f"No filepath given.")
|
||||
raise ValueError("No filepath given.")
|
||||
try:
|
||||
# self.xl = pd.ExcelFile(filepath)
|
||||
self.xl = load_workbook(filepath, data_only=True)
|
||||
except ValueError as e:
|
||||
logger.error(f"Incorrect value: {e}")
|
||||
raise FileNotFoundError(f"Couldn't parse file {self.filepath}")
|
||||
self.sub = OrderedDict()
|
||||
# make decision about type of sample we have
|
||||
# NOTE: make decision about type of sample we have
|
||||
self.sub['submission_type'] = dict(value=RSLNamer.retrieve_submission_type(filename=self.filepath),
|
||||
missing=True)
|
||||
self.submission_type = SubmissionType.query(name=self.sub['submission_type'])
|
||||
self.sub_object = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type)
|
||||
# grab the info map from the submission type in database
|
||||
# NOTE: grab the info map from the submission type in database
|
||||
self.parse_info()
|
||||
self.import_kit_validation_check()
|
||||
self.parse_reagents()
|
||||
# self.import_reagent_validation_check()
|
||||
self.parse_samples()
|
||||
self.parse_equipment()
|
||||
self.finalize_parse()
|
||||
logger.debug(f"Parser.sub after info scrape: {pformat(self.sub)}")
|
||||
# logger.debug(f"Parser.sub after info scrape: {pformat(self.sub)}")
|
||||
|
||||
def parse_info(self):
|
||||
"""
|
||||
@@ -141,7 +140,7 @@ class SheetParser(object):
|
||||
pyd_dict = copy(self.sub)
|
||||
pyd_dict['samples'] = [PydSample(**sample) for sample in self.sub['samples']]
|
||||
pyd_dict['reagents'] = [PydReagent(**reagent) for reagent in self.sub['reagents']]
|
||||
logger.debug(f"Equipment: {self.sub['equipment']}")
|
||||
# logger.debug(f"Equipment: {self.sub['equipment']}")
|
||||
try:
|
||||
check = len(self.sub['equipment']) == 0
|
||||
except TypeError:
|
||||
@@ -157,7 +156,7 @@ class SheetParser(object):
|
||||
class InfoParser(object):
|
||||
|
||||
def __init__(self, xl: Workbook, submission_type: str|SubmissionType, sub_object: BasicSubmission|None=None):
|
||||
logger.info(f"\n\Hello from InfoParser!\n\n")
|
||||
logger.info(f"\n\nHello from InfoParser!\n\n")
|
||||
if isinstance(submission_type, str):
|
||||
submission_type = SubmissionType.query(name=submission_type)
|
||||
if sub_object is None:
|
||||
@@ -166,7 +165,7 @@ class InfoParser(object):
|
||||
self.sub_object = sub_object
|
||||
self.map = self.fetch_submission_info_map()
|
||||
self.xl = xl
|
||||
logger.debug(f"Info map for InfoParser: {pformat(self.map)}")
|
||||
# logger.debug(f"Info map for InfoParser: {pformat(self.map)}")
|
||||
|
||||
def fetch_submission_info_map(self) -> dict:
|
||||
"""
|
||||
@@ -179,13 +178,9 @@ class InfoParser(object):
|
||||
dict: Location map of all info for this submission type
|
||||
"""
|
||||
self.submission_type = dict(value=self.submission_type_obj.name, missing=True)
|
||||
logger.debug(f"Looking up submission type: {self.submission_type['value']}")
|
||||
# submission_type = SubmissionType.query(name=self.submission_type['value'])
|
||||
# info_map = submission_type.info_map
|
||||
# self.sub_object: BasicSubmission = \
|
||||
# BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type['value'])
|
||||
# logger.debug(f"Looking up submission type: {self.submission_type['value']}")
|
||||
info_map = self.sub_object.construct_info_map("read")
|
||||
# Get the parse_info method from the submission type specified
|
||||
# NOTE: Get the parse_info method from the submission type specified
|
||||
return info_map
|
||||
|
||||
def parse_info(self) -> dict:
|
||||
@@ -195,30 +190,19 @@ class InfoParser(object):
|
||||
Returns:
|
||||
dict: key:value of basic info
|
||||
"""
|
||||
# if isinstance(self.submission_type, str):
|
||||
# self.submission_type = dict(value=self.submission_type, missing=True)
|
||||
dicto = {}
|
||||
# exclude_from_generic = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type['value']).get_default_info("parser_ignore")
|
||||
# This loop parses generic info
|
||||
logger.debug(f"Map: {self.map}")
|
||||
# for sheet in self.xl.sheet_names:
|
||||
# NOTE: This loop parses generic info
|
||||
# logger.debug(f"Map: {self.map}")
|
||||
for sheet in self.xl.sheetnames:
|
||||
# df = self.xl.parse(sheet, header=None)
|
||||
ws = self.xl[sheet]
|
||||
relevant = []
|
||||
for k, v in self.map.items():
|
||||
# If the value is hardcoded put it in the dictionary directly.
|
||||
# NOTE: If the value is hardcoded put it in the dictionary directly.
|
||||
if isinstance(v, str):
|
||||
dicto[k] = dict(value=v, missing=False)
|
||||
continue
|
||||
logger.debug(f"Looking for {k} in self.map")
|
||||
logger.debug(f"Locations: {v}")
|
||||
# try:
|
||||
# check = sheet in self.map[k]['sheets']
|
||||
# except TypeError:
|
||||
# continue
|
||||
# if check:
|
||||
# relevant[k] = v
|
||||
# logger.debug(f"Looking for {k} in self.map")
|
||||
# logger.debug(f"Locations: {v}")
|
||||
for location in v:
|
||||
try:
|
||||
check = location['sheet'] == sheet
|
||||
@@ -235,7 +219,6 @@ class InfoParser(object):
|
||||
continue
|
||||
for item in relevant:
|
||||
# NOTE: Get cell contents at this location
|
||||
# value = df.iat[item['row']-1, item['column']-1]
|
||||
value = ws.cell(row=item['row'], column=item['column']).value
|
||||
logger.debug(f"Value for {item['name']} = {value}")
|
||||
match item['name']:
|
||||
@@ -250,10 +233,10 @@ class InfoParser(object):
|
||||
dicto[item['name']]['value'] += value
|
||||
continue
|
||||
except KeyError:
|
||||
logger.debug(f"New value for {item['name']}")
|
||||
logger.error(f"New value for {item['name']}")
|
||||
case _:
|
||||
value, missing = is_missing(value)
|
||||
logger.debug(f"Setting {item} on {sheet} to {value}")
|
||||
# logger.debug(f"Setting {item} on {sheet} to {value}")
|
||||
if item['name'] not in dicto.keys():
|
||||
try:
|
||||
dicto[item['name']] = dict(value=value, missing=missing)
|
||||
@@ -265,14 +248,14 @@ class InfoParser(object):
|
||||
class ReagentParser(object):
|
||||
|
||||
def __init__(self, xl: Workbook, submission_type: str, extraction_kit: str, sub_object:BasicSubmission|None=None):
|
||||
logger.debug("\n\nHello from ReagentParser!\n\n")
|
||||
# logger.debug("\n\nHello from ReagentParser!\n\n")
|
||||
self.submission_type_obj = submission_type
|
||||
self.sub_object = sub_object
|
||||
if isinstance(extraction_kit, dict):
|
||||
extraction_kit = extraction_kit['value']
|
||||
self.kit_object = KitType.query(name=extraction_kit)
|
||||
self.map = self.fetch_kit_info_map(extraction_kit=extraction_kit, submission_type=submission_type)
|
||||
logger.debug(f"Reagent Parser map: {self.map}")
|
||||
# logger.debug(f"Reagent Parser map: {self.map}")
|
||||
self.xl = xl
|
||||
|
||||
def fetch_kit_info_map(self, extraction_kit: dict, submission_type: str) -> dict:
|
||||
@@ -305,45 +288,40 @@ class ReagentParser(object):
|
||||
"""
|
||||
listo = []
|
||||
for sheet in self.xl.sheetnames:
|
||||
# df = self.xl.parse(sheet, header=None, dtype=object)
|
||||
ws = self.xl[sheet]
|
||||
# df.replace({np.nan: None}, inplace = True)
|
||||
relevant = {k.strip(): v for k, v in self.map.items() if sheet in self.map[k]['sheet']}
|
||||
logger.debug(f"relevant map for {sheet}: {pformat(relevant)}")
|
||||
# logger.debug(f"relevant map for {sheet}: {pformat(relevant)}")
|
||||
if relevant == {}:
|
||||
continue
|
||||
for item in relevant:
|
||||
logger.debug(f"Attempting to scrape: {item}")
|
||||
# logger.debug(f"Attempting to scrape: {item}")
|
||||
try:
|
||||
reagent = relevant[item]
|
||||
# name = df.iat[relevant[item]['name']['row']-1, relevant[item]['name']['column']-1]
|
||||
# lot = df.iat[relevant[item]['lot']['row']-1, relevant[item]['lot']['column']-1]
|
||||
# expiry = df.iat[relevant[item]['expiry']['row']-1, relevant[item]['expiry']['column']-1]
|
||||
name = ws.cell(row=reagent['name']['row'], column=reagent['name']['column']).value
|
||||
lot = ws.cell(row=reagent['lot']['row'], column=reagent['lot']['column']).value
|
||||
expiry = ws.cell(row=reagent['expiry']['row'], column=reagent['expiry']['column']).value
|
||||
if 'comment' in relevant[item].keys():
|
||||
logger.debug(f"looking for {relevant[item]} comment.")
|
||||
# comment = df.iat[relevant[item]['comment']['row']-1, relevant[item]['comment']['column']-1]
|
||||
expiry = ws.cell(row=reagent['comment']['row'], column=reagent['comment']['column']).value
|
||||
# logger.debug(f"looking for {relevant[item]} comment.")
|
||||
comment = ws.cell(row=reagent['comment']['row'], column=reagent['comment']['column']).value
|
||||
else:
|
||||
comment = ""
|
||||
except (KeyError, IndexError):
|
||||
listo.append(
|
||||
PydReagent(type=item.strip(), lot=None, expiry=None, name=None, comment="", missing=True))
|
||||
continue
|
||||
# If the cell is blank tell the PydReagent
|
||||
# NOTE: If the cell is blank tell the PydReagent
|
||||
if check_not_nan(lot):
|
||||
missing = False
|
||||
else:
|
||||
missing = True
|
||||
# logger.debug(f"Got lot for {item}-{name}: {lot} as {type(lot)}")
|
||||
lot = str(lot)
|
||||
logger.debug(
|
||||
f"Going into pydantic: name: {name}, lot: {lot}, expiry: {expiry}, type: {item.strip()}, comment: {comment}")
|
||||
# logger.debug(
|
||||
# f"Going into pydantic: name: {name}, lot: {lot}, expiry: {expiry}, type: {item.strip()}, comment: {comment}")
|
||||
try:
|
||||
check = name.lower() != "not applicable"
|
||||
except AttributeError:
|
||||
logger.warning(f"name is not a string.")
|
||||
check = True
|
||||
if check:
|
||||
listo.append(dict(type=item.strip(), lot=lot, expiry=expiry, name=name, comment=comment,
|
||||
@@ -364,26 +342,20 @@ class SampleParser(object):
|
||||
df (pd.DataFrame): input sample dataframe
|
||||
elution_map (pd.DataFrame | None, optional): optional map of elution plate. Defaults to None.
|
||||
"""
|
||||
logger.debug("\n\nHello from SampleParser!\n\n")
|
||||
# logger.debug("\n\nHello from SampleParser!\n\n")
|
||||
self.samples = []
|
||||
self.xl = xl
|
||||
if isinstance(submission_type, str):
|
||||
submission_type = SubmissionType.query(name=submission_type)
|
||||
self.submission_type = submission_type.name
|
||||
self.submission_type_obj = submission_type
|
||||
if sub_object is None:
|
||||
sub_object = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type_obj.name)
|
||||
self.sub_object = sub_object
|
||||
self.sample_info_map = self.fetch_sample_info_map(submission_type=submission_type, sample_map=sample_map)
|
||||
logger.debug(f"sample_info_map: {self.sample_info_map}")
|
||||
# self.plate_map = self.construct_plate_map(plate_map_location=sample_info_map['plate_map'])
|
||||
# logger.debug(f"plate_map: {self.plate_map}")
|
||||
# self.lookup_table = self.construct_lookup_table(lookup_table_location=sample_info_map['lookup_table'])
|
||||
# if "plates" in sample_info_map:
|
||||
# self.plates = sample_info_map['plates']
|
||||
# self.excel_to_db_map = sample_info_map['xl_db_translation']
|
||||
# logger.debug(f"sample_info_map: {self.sample_info_map}")
|
||||
self.plate_map_samples = self.parse_plate_map()
|
||||
self.lookup_samples = self.parse_lookup_table()
|
||||
# if isinstance(self.lookup_table, pd.DataFrame):
|
||||
# self.parse_lookup_table()
|
||||
|
||||
def fetch_sample_info_map(self, submission_type: str, sample_map: dict | None = None) -> dict:
|
||||
"""
|
||||
@@ -395,17 +367,12 @@ class SampleParser(object):
|
||||
Returns:
|
||||
dict: Info locations.
|
||||
"""
|
||||
logger.debug(f"Looking up submission type: {submission_type}")
|
||||
# submission_type = SubmissionType.query(name=submission_type)
|
||||
# self.sub_object = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=submission_type)
|
||||
# self.custom_sub_parser = .parse_samples
|
||||
# logger.debug(f"Looking up submission type: {submission_type}")
|
||||
self.sample_type = self.sub_object.get_default_info("sample_type")
|
||||
self.samp_object = BasicSample.find_polymorphic_subclass(polymorphic_identity=self.sample_type)
|
||||
logger.debug(f"Got sample class: {self.samp_object.__name__}")
|
||||
# self.custom_sample_parser = .parse_sample
|
||||
# logger.debug(f"Got sample class: {self.samp_object.__name__}")
|
||||
# logger.debug(f"info_map: {pformat(se)}")
|
||||
if sample_map is None:
|
||||
# sample_info_map = submission_type.info_map['samples']
|
||||
sample_info_map = self.sub_object.construct_sample_map()
|
||||
else:
|
||||
sample_info_map = sample_map
|
||||
@@ -459,22 +426,6 @@ class SampleParser(object):
|
||||
invalids = [0, "0", "EMPTY"]
|
||||
smap = self.sample_info_map['plate_map']
|
||||
ws = self.xl[smap['sheet']]
|
||||
# ws.protection = SheetProtection()
|
||||
# new_df = self.plate_map.dropna(axis=1, how='all')
|
||||
# columns = new_df.columns.tolist()
|
||||
# for _, iii in new_df.iterrows():
|
||||
# for c in columns:
|
||||
# if check_not_nan(iii[c]):
|
||||
# if iii[c] in invalids:
|
||||
# logger.debug(f"Invalid sample name: {iii[c]}, skipping.")
|
||||
# continue
|
||||
# id = iii[c]
|
||||
# logger.debug(f"Adding sample {iii[c]}")
|
||||
# try:
|
||||
# c = self.plate_map.columns.get_loc(c) + 1
|
||||
# except Exception as e:
|
||||
# logger.error(f"Unable to get column index of {c} due to {e}")
|
||||
# self.samples.append(dict(submitter_id=id, row=row_keys[iii._name], column=c))
|
||||
plate_map_samples = []
|
||||
for ii, row in enumerate(range(smap['start_row'], smap['end_row'] + 1), start=1):
|
||||
# logger.debug(f"Parsing row: {row}")
|
||||
@@ -494,42 +445,12 @@ class SampleParser(object):
|
||||
pass
|
||||
return plate_map_samples
|
||||
|
||||
def parse_lookup_table(self) -> dict:
|
||||
def parse_lookup_table(self) -> List[dict]:
|
||||
"""
|
||||
Parse misc info from lookup table.
|
||||
"""
|
||||
lmap = self.sample_info_map['lookup_table']
|
||||
ws = self.xl[lmap['sheet']]
|
||||
# for sample in self.samples:
|
||||
# addition = self.lookup_table[self.lookup_table.isin([sample['submitter_id']]).any(axis=1)].squeeze()
|
||||
# # logger.debug(addition)
|
||||
# if isinstance(addition, pd.DataFrame) and not addition.empty:
|
||||
# addition = addition.iloc[0]
|
||||
# # logger.debug(f"Lookuptable info: {addition.to_dict()}")
|
||||
# for k,v in addition.to_dict().items():
|
||||
# # logger.debug(f"Checking {k} in lookup table.")
|
||||
# if check_not_nan(k) and isinstance(k, str):
|
||||
# if k.lower() not in sample:
|
||||
# k = k.replace(" ", "_").replace("#","num").lower()
|
||||
# # logger.debug(f"Adding {type(v)} - {k}, {v} to the lookuptable output dict")
|
||||
# match v:
|
||||
# case pd.Timestamp():
|
||||
# sample[k] = v.date()
|
||||
# case str():
|
||||
# sample[k] = determine_if_date(v)
|
||||
# case _:
|
||||
# sample[k] = v
|
||||
# # Set row in lookup table to blank values to prevent multipe lookups.
|
||||
# try:
|
||||
# self.lookup_table.loc[self.lookup_table['Sample #']==addition['Sample #']] = np.nan
|
||||
# except (ValueError, KeyError):
|
||||
# pass
|
||||
# try:
|
||||
# self.lookup_table.loc[self.lookup_table['Well']==addition['Well']] = np.nan
|
||||
# except (ValueError, KeyError):
|
||||
# pass
|
||||
# # logger.debug(f"Output sample dict: {sample}")
|
||||
# logger.debug(f"Final lookup_table: \n\n {self.lookup_table}")
|
||||
lookup_samples = []
|
||||
for ii, row in enumerate(range(lmap['start_row'], lmap['end_row']+1), start=1):
|
||||
row_dict = {k:ws.cell(row=row, column=v).value for k, v in lmap['sample_columns'].items()}
|
||||
@@ -549,7 +470,7 @@ class SampleParser(object):
|
||||
|
||||
def parse_samples(self) -> Tuple[Report | None, List[dict] | List[PydSample]]:
|
||||
"""
|
||||
Parse merged platemap\lookup info into dicts/samples
|
||||
Parse merged platemap/lookup info into dicts/samples
|
||||
|
||||
Returns:
|
||||
List[dict]|List[models.BasicSample]: List of samples
|
||||
@@ -567,36 +488,14 @@ class SampleParser(object):
|
||||
v = convert_nans_to_nones(v)
|
||||
case _:
|
||||
v = v
|
||||
# try:
|
||||
# translated_dict[self.excel_to_db_map[k]] = convert_nans_to_nones(v)
|
||||
# except KeyError:
|
||||
translated_dict[k] = convert_nans_to_nones(v)
|
||||
translated_dict['sample_type'] = f"{self.submission_type} Sample"
|
||||
# translated_dict = self.custom_sub_parser(translated_dict)
|
||||
translated_dict = self.sub_object.parse_samples(translated_dict)
|
||||
# translated_dict = self.custom_sample_parser(translated_dict)
|
||||
translated_dict = self.samp_object.parse_sample(translated_dict)
|
||||
# logger.debug(f"Here is the output of the custom parser:\n{translated_dict}")
|
||||
new_samples.append(PydSample(**translated_dict))
|
||||
return result, new_samples
|
||||
|
||||
# def grab_plates(self) -> List[str]:
|
||||
# """
|
||||
# Parse plate names from
|
||||
#
|
||||
# Returns:
|
||||
# List[str]: list of plate names.
|
||||
# """
|
||||
# plates = []
|
||||
# for plate in self.plates:
|
||||
# df = self.xl.parse(plate['sheet'], header=None)
|
||||
# if isinstance(df.iat[plate['row'] - 1, plate['column'] - 1], str):
|
||||
# output = RSLNamer.retrieve_rsl_number(filename=df.iat[plate['row'] - 1, plate['column'] - 1])
|
||||
# else:
|
||||
# continue
|
||||
# plates.append(output)
|
||||
# return plates
|
||||
|
||||
def reconcile_samples(self):
|
||||
# TODO: Move to pydantic validator?
|
||||
if self.plate_map_samples is None or self.lookup_samples is None:
|
||||
@@ -606,29 +505,17 @@ class SampleParser(object):
|
||||
merge_on_id = self.sample_info_map['lookup_table']['merge_on_id']
|
||||
plate_map_samples = sorted(copy(self.plate_map_samples), key=lambda d: d['id'])
|
||||
lookup_samples = sorted(copy(self.lookup_samples), key=lambda d: d[merge_on_id])
|
||||
# try:
|
||||
# assert len(plate_map_samples) == len(lookup_samples)
|
||||
# except AssertionError:
|
||||
# if len(plate_map_samples) > len(lookup_samples):
|
||||
# logger.error(
|
||||
# f"Plate samples ({len(plate_map_samples)}) is longer than Lookup samples: ({len(lookup_samples)})")
|
||||
# return plate_map_samples
|
||||
# else:
|
||||
# logger.error(
|
||||
# f"Lookup samples ({len(lookup_samples)}) is longer than Plate samples: ({len(plate_map_samples)})")
|
||||
# return lookup_samples
|
||||
for ii, psample in enumerate(plate_map_samples):
|
||||
try:
|
||||
check = psample['id'] == lookup_samples[ii][merge_on_id]
|
||||
except (KeyError, IndexError):
|
||||
check = False
|
||||
if check:
|
||||
logger.debug(f"Direct match found for {psample['id']}")
|
||||
# logger.debug(f"Direct match found for {psample['id']}")
|
||||
new = lookup_samples[ii] | psample
|
||||
lookup_samples[ii] = {}
|
||||
# samples.append(new)
|
||||
else:
|
||||
logger.warning(f"Match for {psample['id']} not direct, running search.")
|
||||
# logger.warning(f"Match for {psample['id']} not direct, running search.")
|
||||
for jj, lsample in enumerate(lookup_samples):
|
||||
try:
|
||||
check = lsample[merge_on_id] == psample['id']
|
||||
@@ -637,13 +524,9 @@ class SampleParser(object):
|
||||
if check:
|
||||
new = lsample | psample
|
||||
lookup_samples[jj] = {}
|
||||
# self.samples.append(new)
|
||||
# samples.append(new)
|
||||
break
|
||||
else:
|
||||
new = psample
|
||||
# samples.append(psample)
|
||||
# new['sample_type'] = f"{self.submission_type} Sample"
|
||||
try:
|
||||
check = new['submitter_id'] is None
|
||||
except KeyError:
|
||||
|
||||
@@ -11,6 +11,7 @@ from backend.validators.pydant import PydSubmission
|
||||
from io import BytesIO
|
||||
from collections import OrderedDict
|
||||
|
||||
|
||||
logger = logging.getLogger(f"submissions.{__name__}")
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import logging, re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from openpyxl import load_workbook
|
||||
from backend.db.models import BasicSubmission, SubmissionType
|
||||
@@ -79,6 +80,8 @@ class RSLNamer(object):
|
||||
except UnboundLocalError:
|
||||
check = True
|
||||
if check:
|
||||
if "pytest" in sys.modules:
|
||||
return "Bacterial Culture"
|
||||
# logger.debug("Final option, ask the user for submission type")
|
||||
from frontend.widgets import ObjectSelector
|
||||
dlg = ObjectSelector(title="Couldn't parse submission type.",
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
Contains pydantic models and accompanying validators
|
||||
'''
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from operator import attrgetter
|
||||
import uuid, re, logging
|
||||
from pydantic import BaseModel, field_validator, Field, model_validator, PrivateAttr
|
||||
@@ -431,10 +433,14 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
value['value'] = None
|
||||
if value['value'] is None:
|
||||
value['missing'] = True
|
||||
if "pytest" in sys.modules:
|
||||
value['value'] = "Nosocomial"
|
||||
return value
|
||||
from frontend.widgets.pop_ups import ObjectSelector
|
||||
dlg = ObjectSelector(title="Missing Submitting Lab",
|
||||
message="We need a submitting lab. Please select from the list.",
|
||||
obj_type=Organization)
|
||||
|
||||
if dlg.exec():
|
||||
value['value'] = dlg.parse_form()
|
||||
else:
|
||||
@@ -651,9 +657,13 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
logger.debug(f"Setting {key} to {value}")
|
||||
match key:
|
||||
case "reagents":
|
||||
if code == 1:
|
||||
instance.submission_reagent_associations = []
|
||||
logger.debug(f"Looking through {self.reagents}")
|
||||
for reagent in self.reagents:
|
||||
reagent, assoc = reagent.toSQL(submission=instance)
|
||||
if assoc is not None and assoc not in instance.submission_reagent_associations:
|
||||
logger.debug(f"Association: {assoc}")
|
||||
if assoc is not None:# and assoc not in instance.submission_reagent_associations:
|
||||
instance.submission_reagent_associations.append(assoc)
|
||||
# instance.reagents.append(reagent)
|
||||
case "samples":
|
||||
@@ -666,13 +676,13 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
case "equipment":
|
||||
logger.debug(f"Equipment: {pformat(self.equipment)}")
|
||||
try:
|
||||
if equip == None:
|
||||
if equip is None:
|
||||
continue
|
||||
except UnboundLocalError:
|
||||
continue
|
||||
for equip in self.equipment:
|
||||
equip, association = equip.toSQL(submission=instance)
|
||||
if association != None:
|
||||
if association is not None:
|
||||
association.save()
|
||||
logger.debug(
|
||||
f"Equipment association SQL object to be added to submission: {association.__dict__}")
|
||||
@@ -719,7 +729,7 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
# We need to make sure there's a proper rsl plate number
|
||||
logger.debug(f"We've got a total cost of {instance.run_cost}")
|
||||
try:
|
||||
logger.debug(f"Constructed instance: {instance.to_string()}")
|
||||
logger.debug(f"Constructed instance: {instance}")
|
||||
except AttributeError as e:
|
||||
logger.debug(f"Something went wrong constructing instance {self.rsl_plate_num}: {e}")
|
||||
logger.debug(f"Constructed submissions message: {msg}")
|
||||
|
||||
@@ -263,45 +263,45 @@ class SubmissionFormWidget(QWidget):
|
||||
self.app.report.add_result(report)
|
||||
self.app.result_reporter()
|
||||
|
||||
def kit_integrity_completion_function(self, extraction_kit:str|None=None):
|
||||
"""
|
||||
Compare kit contents to parsed contents and creates widgets.
|
||||
|
||||
Args:
|
||||
obj (QMainWindow): The original app window
|
||||
|
||||
Returns:
|
||||
Tuple[QMainWindow, dict]: Collection of new main app window and result dict
|
||||
"""
|
||||
report = Report()
|
||||
missing_reagents = []
|
||||
# logger.debug(inspect.currentframe().f_back.f_code.co_name)
|
||||
# find the widget that contains kit info
|
||||
if extraction_kit is None:
|
||||
kit_widget = self.find_widgets(object_name="extraction_kit")[0].input
|
||||
logger.debug(f"Kit selector: {kit_widget}")
|
||||
# get current kit being used
|
||||
self.ext_kit = kit_widget.currentText()
|
||||
else:
|
||||
self.ext_kit = extraction_kit
|
||||
for reagent in self.reagents:
|
||||
logger.debug(f"Creating widget for {reagent}")
|
||||
add_widget = self.ReagentFormWidget(parent=self, reagent=reagent, extraction_kit=self.ext_kit)
|
||||
# self.form.layout().addWidget(add_widget)
|
||||
self.layout.addWidget(add_widget)
|
||||
if reagent.missing:
|
||||
missing_reagents.append(reagent)
|
||||
logger.debug(f"Checking integrity of {self.ext_kit}")
|
||||
# TODO: put check_kit_integrity here instead of what's here?
|
||||
# see if there are any missing reagents
|
||||
if len(missing_reagents) > 0:
|
||||
result = Result(msg=f"""The submission you are importing is missing some reagents expected by the kit.\n\n
|
||||
It looks like you are missing: {[item.type.upper() for item in missing_reagents]}\n\n
|
||||
Alternatively, you may have set the wrong extraction kit.\n\nThe program will populate lists using existing reagents.
|
||||
\n\nPlease make sure you check the lots carefully!""".replace(" ", ""), status="Warning")
|
||||
report.add_result(result)
|
||||
self.report.add_result(report)
|
||||
logger.debug(f"Outgoing report: {self.report.results}")
|
||||
# def kit_integrity_completion_function(self, extraction_kit:str|None=None):
|
||||
# """
|
||||
# Compare kit contents to parsed contents and creates widgets.
|
||||
#
|
||||
# Args:
|
||||
# obj (QMainWindow): The original app window
|
||||
#
|
||||
# Returns:
|
||||
# Tuple[QMainWindow, dict]: Collection of new main app window and result dict
|
||||
# """
|
||||
# report = Report()
|
||||
# missing_reagents = []
|
||||
# # logger.debug(inspect.currentframe().f_back.f_code.co_name)
|
||||
# # find the widget that contains kit info
|
||||
# if extraction_kit is None:
|
||||
# kit_widget = self.find_widgets(object_name="extraction_kit")[0].input
|
||||
# logger.debug(f"Kit selector: {kit_widget}")
|
||||
# # get current kit being used
|
||||
# self.ext_kit = kit_widget.currentText()
|
||||
# else:
|
||||
# self.ext_kit = extraction_kit
|
||||
# for reagent in self.reagents:
|
||||
# logger.debug(f"Creating widget for {reagent}")
|
||||
# add_widget = self.ReagentFormWidget(parent=self, reagent=reagent, extraction_kit=self.ext_kit)
|
||||
# # self.form.layout().addWidget(add_widget)
|
||||
# self.layout.addWidget(add_widget)
|
||||
# if reagent.missing:
|
||||
# missing_reagents.append(reagent)
|
||||
# logger.debug(f"Checking integrity of {self.ext_kit}")
|
||||
# # TODO: put check_kit_integrity here instead of what's here?
|
||||
# # see if there are any missing reagents
|
||||
# if len(missing_reagents) > 0:
|
||||
# result = Result(msg=f"""The submission you are importing is missing some reagents expected by the kit.\n\n
|
||||
# It looks like you are missing: {[item.type.upper() for item in missing_reagents]}\n\n
|
||||
# Alternatively, you may have set the wrong extraction kit.\n\nThe program will populate lists using existing reagents.
|
||||
# \n\nPlease make sure you check the lots carefully!""".replace(" ", ""), status="Warning")
|
||||
# report.add_result(result)
|
||||
# self.report.add_result(report)
|
||||
# logger.debug(f"Outgoing report: {self.report.results}")
|
||||
|
||||
def clear_form(self):
|
||||
"""
|
||||
@@ -374,17 +374,12 @@ class SubmissionFormWidget(QWidget):
|
||||
return
|
||||
case _:
|
||||
pass
|
||||
# assert base_submission.reagents != []
|
||||
# add reagents to submission object
|
||||
# NOTE: add reagents to submission object
|
||||
for reagent in base_submission.reagents:
|
||||
# logger.debug(f"Updating: {reagent} with {reagent.lot}")
|
||||
reagent.update_last_used(kit=base_submission.extraction_kit)
|
||||
# logger.debug(f"Here is the final submission: {pformat(base_submission.__dict__)}")
|
||||
# logger.debug(f"Parsed reagents: {pformat(base_submission.reagents)}")
|
||||
# logger.debug(f"Sending submission: {base_submission.rsl_plate_num} to database.")
|
||||
# logger.debug(f"Samples from pyd: {pformat(self.pyd.samples)}")
|
||||
# logger.debug(f"Samples SQL: {pformat([item.__dict__ for item in base_submission.samples])}")
|
||||
# logger.debug(f"")
|
||||
# logger.debug(f"Final reagents: {pformat(base_submission.reagents)}")
|
||||
# sys.exit("Programmed stop submission_widget.py, line 381")
|
||||
base_submission.save()
|
||||
# update summary sheet
|
||||
self.app.table_widget.sub_wid.setData()
|
||||
@@ -414,12 +409,12 @@ class SubmissionFormWidget(QWidget):
|
||||
except AttributeError:
|
||||
logger.error(f"No csv file found in the submission at this point.")
|
||||
|
||||
def parse_form(self) -> PydSubmission:
|
||||
def parse_form(self) -> Report:
|
||||
"""
|
||||
Transforms form info into PydSubmission
|
||||
|
||||
Returns:
|
||||
PydSubmission: Pydantic submission object
|
||||
Report: Report on status of parse.
|
||||
"""
|
||||
report = Report()
|
||||
logger.debug(f"Hello from form parser!")
|
||||
@@ -430,15 +425,16 @@ class SubmissionFormWidget(QWidget):
|
||||
match widget:
|
||||
case self.ReagentFormWidget():
|
||||
reagent, _ = widget.parse_form()
|
||||
if reagent != None:
|
||||
if reagent is not None:
|
||||
reagents.append(reagent)
|
||||
case self.InfoItem():
|
||||
field, value = widget.parse_form()
|
||||
if field != None:
|
||||
if field is not None:
|
||||
info[field] = value
|
||||
logger.debug(f"Info: {pformat(info)}")
|
||||
logger.debug(f"Reagents: {pformat(reagents)}")
|
||||
logger.debug(f"Reagents going into pyd: {pformat(reagents)}")
|
||||
self.pyd.reagents = reagents
|
||||
|
||||
# logger.debug(f"Attrs not in info: {[k for k, v in self.__dict__.items() if k not in info.keys()]}")
|
||||
for item in self.recover:
|
||||
logger.debug(f"Attempting to recover: {item}")
|
||||
|
||||
@@ -238,13 +238,16 @@ class Settings(BaseSettings, extra="allow"):
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
||||
super().__init__(*args, **kwargs)
|
||||
# self.set_from_db(db_path=kwargs['database_path'])
|
||||
self.set_from_db(db_path=kwargs['database_path'])
|
||||
|
||||
def set_from_db(self, db_path:Path):
|
||||
session = Session(create_engine(f"sqlite:///{db_path}"))
|
||||
config_items = session.execute(text("SELECT * FROM _configitem")).all()
|
||||
session.close()
|
||||
config_items = {item[1]:json.loads(item[2]) for item in config_items}
|
||||
if 'pytest' in sys.modules:
|
||||
config_items = dict(power_users=['lwark', 'styson', 'ruwang'])
|
||||
else:
|
||||
session = Session(create_engine(f"sqlite:///{db_path}"))
|
||||
config_items = session.execute(text("SELECT * FROM _configitem")).all()
|
||||
session.close()
|
||||
config_items = {item[1]:json.loads(item[2]) for item in config_items}
|
||||
for k, v in config_items.items():
|
||||
if not hasattr(self, k):
|
||||
self.__setattr__(k, v)
|
||||
|
||||
Reference in New Issue
Block a user