Code cleanup, dependency update, various bug fixes
This commit is contained in:
@@ -102,7 +102,7 @@ class BaseClass(Base):
|
||||
@classmethod
|
||||
def query(cls, **kwargs) -> Any | List[Any]:
|
||||
"""
|
||||
Default query function for models
|
||||
Default query function for models. Overridden in most models.
|
||||
|
||||
Returns:
|
||||
Any | List[Any]: Result of query execution.
|
||||
@@ -128,7 +128,7 @@ class BaseClass(Base):
|
||||
query: Query = cls.__database_session__.query(model)
|
||||
# logger.debug(f"Grabbing singles using {model.get_default_info}")
|
||||
singles = model.get_default_info('singles')
|
||||
logger.debug(f"Querying: {model}, with kwargs: {kwargs}")
|
||||
logger.info(f"Querying: {model}, with kwargs: {kwargs}")
|
||||
for k, v in kwargs.items():
|
||||
# logger.debug(f"Using key: {k} with value: {v}")
|
||||
try:
|
||||
|
||||
@@ -63,16 +63,16 @@ class ControlType(BaseClass):
|
||||
Returns:
|
||||
List[str]: list of subtypes available
|
||||
"""
|
||||
# Get first instance since all should have same subtypes
|
||||
# Get mode of instance
|
||||
# NOTE: Get first instance since all should have same subtypes
|
||||
# NOTE: Get mode of instance
|
||||
jsoner = getattr(self.instances[0], mode)
|
||||
# logger.debug(f"JSON out: {jsoner.keys()}")
|
||||
try:
|
||||
# Pick genera (all should have same subtypes)
|
||||
# NOTE: Pick genera (all should have same subtypes)
|
||||
genera = list(jsoner.keys())[0]
|
||||
except IndexError:
|
||||
return []
|
||||
# remove items that don't have relevant data
|
||||
# NOTE: remove items that don't have relevant data
|
||||
subtypes = [item for item in jsoner[genera] if "_hashes" not in item and "_ratio" not in item]
|
||||
return subtypes
|
||||
|
||||
@@ -135,7 +135,6 @@ class Control(BaseClass):
|
||||
"""
|
||||
# logger.debug("loading json string into dict")
|
||||
try:
|
||||
# kraken = json.loads(self.kraken)
|
||||
kraken = self.kraken
|
||||
except TypeError:
|
||||
kraken = {}
|
||||
@@ -178,7 +177,7 @@ class Control(BaseClass):
|
||||
data = self.__getattribute__(mode)
|
||||
except TypeError:
|
||||
data = {}
|
||||
logger.debug(f"Length of data: {len(data)}")
|
||||
# logger.debug(f"Length of data: {len(data)}")
|
||||
# logger.debug("dict keys are genera of bacteria, e.g. 'Streptococcus'")
|
||||
for genus in data:
|
||||
_dict = dict(
|
||||
@@ -236,7 +235,7 @@ class Control(BaseClass):
|
||||
models.Control|List[models.Control]: Control object of interest.
|
||||
"""
|
||||
query: Query = cls.__database_session__.query(cls)
|
||||
# by control type
|
||||
# NOTE: by control type
|
||||
match control_type:
|
||||
case ControlType():
|
||||
# logger.debug(f"Looking up control by control type: {control_type}")
|
||||
@@ -246,7 +245,7 @@ class Control(BaseClass):
|
||||
query = query.join(ControlType).filter(ControlType.name == control_type)
|
||||
case _:
|
||||
pass
|
||||
# by date range
|
||||
# NOTE: by date range
|
||||
if start_date is not None and end_date is None:
|
||||
logger.warning(f"Start date with no end date, using today.")
|
||||
end_date = date.today()
|
||||
|
||||
@@ -120,8 +120,8 @@ class KitType(BaseClass):
|
||||
submission_type (str | Submissiontype | None, optional): Submission type to narrow results. Defaults to None.
|
||||
|
||||
Returns:
|
||||
list: List of reagent types
|
||||
"""
|
||||
List[ReagentType]: List of reagents linked to this kit.
|
||||
"""
|
||||
match submission_type:
|
||||
case SubmissionType():
|
||||
# logger.debug(f"Getting reagents by SubmissionType {submission_type}")
|
||||
@@ -152,17 +152,15 @@ class KitType(BaseClass):
|
||||
dict: Dictionary containing information locations.
|
||||
"""
|
||||
info_map = {}
|
||||
# Account for submission_type variable type.
|
||||
# NOTE: Account for submission_type variable type.
|
||||
match submission_type:
|
||||
case str():
|
||||
# logger.debug(f"Constructing xl map with str {submission_type}")
|
||||
assocs = [item for item in self.kit_reagenttype_associations if
|
||||
item.submission_type.name == submission_type]
|
||||
# st_assoc = [item for item in self.used_for if submission_type == item.name][0]
|
||||
case SubmissionType():
|
||||
# logger.debug(f"Constructing xl map with SubmissionType {submission_type}")
|
||||
assocs = [item for item in self.kit_reagenttype_associations if item.submission_type == submission_type]
|
||||
# st_assoc = submission_type
|
||||
case _:
|
||||
raise ValueError(f"Wrong variable type: {type(submission_type)} used!")
|
||||
# logger.debug("Get all KitTypeReagentTypeAssociation for SubmissionType")
|
||||
@@ -371,10 +369,10 @@ class Reagent(BaseClass):
|
||||
dict: representation of the reagent's attributes
|
||||
"""
|
||||
if extraction_kit is not None:
|
||||
# Get the intersection of this reagent's ReagentType and all ReagentTypes in KitType
|
||||
# NOTE: Get the intersection of this reagent's ReagentType and all ReagentTypes in KitType
|
||||
try:
|
||||
reagent_role = list(set(self.type).intersection(extraction_kit.reagent_types))[0]
|
||||
# Most will be able to fall back to first ReagentType in itself because most will only have 1.
|
||||
# NOTE: Most will be able to fall back to first ReagentType in itself because most will only have 1.
|
||||
except:
|
||||
reagent_role = self.type[0]
|
||||
else:
|
||||
@@ -383,7 +381,7 @@ class Reagent(BaseClass):
|
||||
rtype = reagent_role.name.replace("_", " ")
|
||||
except AttributeError:
|
||||
rtype = "Unknown"
|
||||
# Calculate expiry with EOL from ReagentType
|
||||
# NOTE: Calculate expiry with EOL from ReagentType
|
||||
try:
|
||||
place_holder = self.expiry + reagent_role.eol_ext
|
||||
except (TypeError, AttributeError) as e:
|
||||
@@ -467,7 +465,7 @@ class Reagent(BaseClass):
|
||||
match name:
|
||||
case str():
|
||||
# logger.debug(f"Looking up reagent by name str: {name}")
|
||||
# Not limited due to multiple reagents having same name.
|
||||
# NOTE: Not limited due to multiple reagents having same name.
|
||||
query = query.filter(cls.name == name)
|
||||
case _:
|
||||
pass
|
||||
@@ -475,7 +473,7 @@ class Reagent(BaseClass):
|
||||
case str():
|
||||
# logger.debug(f"Looking up reagent by lot number str: {lot_number}")
|
||||
query = query.filter(cls.lot == lot_number)
|
||||
# In this case limit number returned.
|
||||
# NOTE: In this case limit number returned.
|
||||
limit = 1
|
||||
case _:
|
||||
pass
|
||||
@@ -516,10 +514,6 @@ class Discount(BaseClass):
|
||||
organization (models.Organization | str | int): Organization receiving discount.
|
||||
kit_type (models.KitType | str | int): Kit discount received on.
|
||||
|
||||
Raises:
|
||||
ValueError: Invalid Organization
|
||||
ValueError: Invalid kit.
|
||||
|
||||
Returns:
|
||||
models.Discount|List[models.Discount]: Discount(s) of interest.
|
||||
"""
|
||||
@@ -535,7 +529,6 @@ class Discount(BaseClass):
|
||||
# logger.debug(f"Looking up discount with organization id: {organization}")
|
||||
query = query.join(Organization).filter(Organization.id == organization)
|
||||
case _:
|
||||
# raise ValueError(f"Invalid value for organization: {organization}")
|
||||
pass
|
||||
match kit_type:
|
||||
case KitType():
|
||||
@@ -548,7 +541,6 @@ class Discount(BaseClass):
|
||||
# logger.debug(f"Looking up discount with kit type id: {kit_type}")
|
||||
query = query.join(KitType).filter(KitType.id == kit_type)
|
||||
case _:
|
||||
# raise ValueError(f"Invalid value for kit type: {kit_type}")
|
||||
pass
|
||||
return cls.execute_query(query=query)
|
||||
|
||||
@@ -634,11 +626,18 @@ class SubmissionType(BaseClass):
|
||||
self.save()
|
||||
|
||||
def construct_info_map(self, mode: Literal['read', 'write']) -> dict:
|
||||
"""
|
||||
Make of map of where all fields are located in excel sheet
|
||||
|
||||
Args:
|
||||
mode (Literal["read", "write"]): Which mode to get locations for
|
||||
|
||||
Returns:
|
||||
dict: Map of locations
|
||||
"""
|
||||
info = self.info_map
|
||||
# logger.debug(f"Info map: {info}")
|
||||
output = {}
|
||||
# for k,v in info.items():
|
||||
# info[k]['write'] += info[k]['read']
|
||||
match mode:
|
||||
case "read":
|
||||
output = {k: v[mode] for k, v in info.items() if v[mode]}
|
||||
@@ -647,7 +646,13 @@ class SubmissionType(BaseClass):
|
||||
output = {k: v for k, v in output.items() if all([isinstance(item, dict) for item in v])}
|
||||
return output
|
||||
|
||||
def construct_sample_map(self):
|
||||
def construct_sample_map(self) -> dict:
|
||||
"""
|
||||
Returns sample map
|
||||
|
||||
Returns:
|
||||
dict: sample location map
|
||||
"""
|
||||
return self.sample_map
|
||||
|
||||
def construct_equipment_map(self) -> dict:
|
||||
@@ -655,7 +660,7 @@ class SubmissionType(BaseClass):
|
||||
Constructs map of equipment to excel cells.
|
||||
|
||||
Returns:
|
||||
List[dict]: List of equipment locations in excel sheet
|
||||
dict: Map equipment locations in excel sheet
|
||||
"""
|
||||
output = {}
|
||||
# logger.debug("Iterating through equipment roles")
|
||||
@@ -671,7 +676,7 @@ class SubmissionType(BaseClass):
|
||||
Returns PydEquipmentRole of all equipment associated with this SubmissionType
|
||||
|
||||
Returns:
|
||||
List['PydEquipmentRole']: List of equipment roles
|
||||
List[PydEquipmentRole]: List of equipment roles
|
||||
"""
|
||||
return [item.to_pydantic(submission_type=self, extraction_kit=extraction_kit) for item in self.equipment]
|
||||
|
||||
@@ -702,7 +707,13 @@ class SubmissionType(BaseClass):
|
||||
raise TypeError(f"Type {type(equipment_role)} is not allowed")
|
||||
return list(set([item for items in relevant for item in items if item != None]))
|
||||
|
||||
def get_submission_class(self):
|
||||
def get_submission_class(self) -> "BasicSubmission":
|
||||
"""
|
||||
Gets submission class associated with this submission type.
|
||||
|
||||
Returns:
|
||||
BasicSubmission: Submission class
|
||||
"""
|
||||
from .submissions import BasicSubmission
|
||||
return BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.name)
|
||||
|
||||
@@ -1063,7 +1074,7 @@ class Equipment(BaseClass):
|
||||
processes (bool, optional): Whether to include processes. Defaults to False.
|
||||
|
||||
Returns:
|
||||
dict: _description_
|
||||
dict: Dictionary representation of this equipment
|
||||
"""
|
||||
if not processes:
|
||||
return {k: v for k, v in self.__dict__.items() if k != 'processes'}
|
||||
@@ -1152,7 +1163,7 @@ class Equipment(BaseClass):
|
||||
extraction_kit (str | KitType | None, optional): Relevant KitType. Defaults to None.
|
||||
|
||||
Returns:
|
||||
PydEquipment: _description_
|
||||
PydEquipment: pydantic equipment object
|
||||
"""
|
||||
from backend.validators.pydant import PydEquipment
|
||||
return PydEquipment(
|
||||
@@ -1179,7 +1190,6 @@ class Equipment(BaseClass):
|
||||
class EquipmentRole(BaseClass):
|
||||
"""
|
||||
Abstract roles for equipment
|
||||
|
||||
"""
|
||||
|
||||
id = Column(INTEGER, primary_key=True) #: Role id, primary key
|
||||
@@ -1331,7 +1341,7 @@ class SubmissionEquipmentAssociation(BaseClass):
|
||||
equipment = relationship(Equipment, back_populates="equipment_submission_associations") #: associated equipment
|
||||
|
||||
def __repr__(self):
|
||||
return f"<SubmissionEquipmentAssociation({self.submission.rsl_plate_num}&{self.equipment.name})>"
|
||||
return f"<SubmissionEquipmentAssociation({self.submission.rsl_plate_num} & {self.equipment.name})>"
|
||||
|
||||
def __init__(self, submission, equipment, role: str = "None"):
|
||||
self.submission = submission
|
||||
|
||||
@@ -107,6 +107,12 @@ class BasicSubmission(BaseClass):
|
||||
|
||||
@classmethod
|
||||
def jsons(cls) -> List[str]:
|
||||
"""
|
||||
Get list of JSON db columns
|
||||
|
||||
Returns:
|
||||
List[str]: List of column names
|
||||
"""
|
||||
output = [item.name for item in cls.__table__.columns if isinstance(item.type, JSON)]
|
||||
if issubclass(cls, BasicSubmission) and not cls.__name__ == "BasicSubmission":
|
||||
output += BasicSubmission.jsons()
|
||||
@@ -114,6 +120,12 @@ class BasicSubmission(BaseClass):
|
||||
|
||||
@classmethod
|
||||
def timestamps(cls) -> List[str]:
|
||||
"""
|
||||
Get list of TIMESTAMP columns
|
||||
|
||||
Returns:
|
||||
List[str]: List of column names
|
||||
"""
|
||||
output = [item.name for item in cls.__table__.columns if isinstance(item.type, TIMESTAMP)]
|
||||
if issubclass(cls, BasicSubmission) and not cls.__name__ == "BasicSubmission":
|
||||
output += BasicSubmission.timestamps()
|
||||
@@ -122,7 +134,7 @@ class BasicSubmission(BaseClass):
|
||||
# TODO: Beef up this to include info_map from DB
|
||||
@classmethod
|
||||
def get_default_info(cls, *args):
|
||||
# Create defaults for all submission_types
|
||||
# NOTE: Create defaults for all submission_types
|
||||
parent_defs = super().get_default_info()
|
||||
recover = ['filepath', 'samples', 'csv', 'comment', 'equipment']
|
||||
dicto = dict(
|
||||
@@ -132,9 +144,7 @@ class BasicSubmission(BaseClass):
|
||||
# NOTE: Fields not placed in ui form
|
||||
form_ignore=['reagents', 'ctx', 'id', 'cost', 'extraction_info', 'signed_by', 'comment'] + recover,
|
||||
# NOTE: Fields not placed in ui form to be moved to pydantic
|
||||
form_recover=recover,
|
||||
# parser_ignore=['samples', 'signed_by'] + [item for item in cls.jsons() if item != "comment"],
|
||||
# excel_ignore=[],
|
||||
form_recover=recover
|
||||
)
|
||||
# logger.debug(dicto['singles'])
|
||||
# NOTE: Singles tells the query which fields to set limit to 1
|
||||
@@ -151,7 +161,6 @@ class BasicSubmission(BaseClass):
|
||||
st = cls.get_submission_type()
|
||||
if st is None:
|
||||
logger.error("No default info for BasicSubmission.")
|
||||
# return output
|
||||
else:
|
||||
output['submission_type'] = st.name
|
||||
for k, v in st.defaults.items():
|
||||
@@ -169,16 +178,37 @@ class BasicSubmission(BaseClass):
|
||||
return output
|
||||
|
||||
@classmethod
|
||||
def get_submission_type(cls):
|
||||
def get_submission_type(cls) -> SubmissionType:
|
||||
"""
|
||||
Gets the SubmissionType associated with this class
|
||||
|
||||
Returns:
|
||||
SubmissionType: SubmissionType with name equal to this polymorphic identity
|
||||
"""
|
||||
name = cls.__mapper_args__['polymorphic_identity']
|
||||
return SubmissionType.query(name=name)
|
||||
|
||||
@classmethod
|
||||
def construct_info_map(cls, mode:Literal['read', 'write']):
|
||||
def construct_info_map(cls, mode:Literal["read", "write"]) -> dict:
|
||||
"""
|
||||
Method to call submission type's construct info map.
|
||||
|
||||
Args:
|
||||
mode (Literal["read", "write"]): Which map to construct.
|
||||
|
||||
Returns:
|
||||
dict: Map of info locations.
|
||||
"""
|
||||
return cls.get_submission_type().construct_info_map(mode=mode)
|
||||
|
||||
@classmethod
|
||||
def construct_sample_map(cls):
|
||||
def construct_sample_map(cls) -> dict:
|
||||
"""
|
||||
Method to call submission type's construct_sample_map
|
||||
|
||||
Returns:
|
||||
dict: sample location map
|
||||
"""
|
||||
return cls.get_submission_type().construct_sample_map()
|
||||
|
||||
def to_dict(self, full_data: bool = False, backup: bool = False, report: bool = False) -> dict:
|
||||
@@ -192,7 +222,7 @@ class BasicSubmission(BaseClass):
|
||||
Returns:
|
||||
dict: dictionary used in submissions summary and details
|
||||
"""
|
||||
# get lab from nested organization object
|
||||
# NOTE: get lab from nested organization object
|
||||
# logger.debug(f"Converting {self.rsl_plate_num} to dict...")
|
||||
try:
|
||||
sub_lab = self.submitting_lab.name
|
||||
@@ -202,12 +232,12 @@ class BasicSubmission(BaseClass):
|
||||
sub_lab = sub_lab.replace("_", " ").title()
|
||||
except AttributeError:
|
||||
pass
|
||||
# get extraction kit name from nested kit object
|
||||
# NOTE: get extraction kit name from nested kit object
|
||||
try:
|
||||
ext_kit = self.extraction_kit.name
|
||||
except AttributeError:
|
||||
ext_kit = None
|
||||
# load scraped extraction info
|
||||
# NOTE: load scraped extraction info
|
||||
try:
|
||||
ext_info = self.extraction_info
|
||||
except TypeError:
|
||||
@@ -324,7 +354,7 @@ class BasicSubmission(BaseClass):
|
||||
|
||||
def make_plate_map(self, plate_rows: int = 8, plate_columns=12) -> str:
|
||||
"""
|
||||
Constructs an html based plate map.
|
||||
Constructs an html based plate map for submission details.
|
||||
|
||||
Args:
|
||||
sample_list (list): List of submission samples
|
||||
@@ -386,7 +416,7 @@ class BasicSubmission(BaseClass):
|
||||
subs = [item.to_dict() for item in cls.query(submission_type=submission_type, limit=limit, chronologic=chronologic)]
|
||||
# logger.debug(f"Got {len(subs)} submissions.")
|
||||
df = pd.DataFrame.from_records(subs)
|
||||
# Exclude sub information
|
||||
# NOTE: Exclude sub information
|
||||
for item in ['controls', 'extraction_info', 'pcr_info', 'comment', 'comments', 'samples', 'reagents',
|
||||
'equipment', 'gel_info', 'gel_image', 'dna_core_submission_number', 'gel_controls']:
|
||||
try:
|
||||
@@ -414,9 +444,6 @@ class BasicSubmission(BaseClass):
|
||||
# logger.debug(f"Looking up organization: {value}")
|
||||
field_value = Organization.query(name=value)
|
||||
# logger.debug(f"Got {field_value} for organization {value}")
|
||||
# case "submitter_plate_num":
|
||||
# # logger.debug(f"Submitter plate id: {value}")
|
||||
# field_value = value
|
||||
case "samples":
|
||||
for sample in value:
|
||||
# logger.debug(f"Parsing {sample} to sql.")
|
||||
@@ -436,17 +463,6 @@ class BasicSubmission(BaseClass):
|
||||
field_value = value
|
||||
case "ctx" | "csv" | "filepath" | "equipment":
|
||||
return
|
||||
# case "comment":
|
||||
# if value == "" or value == None or value == 'null':
|
||||
# field_value = None
|
||||
# else:
|
||||
# field_value = dict(name=getuser(), text=value, time=datetime.now())
|
||||
# # if self.comment is None:
|
||||
# # self.comment = [field_value]
|
||||
# # else:
|
||||
# # self.comment.append(field_value)
|
||||
# self.update_json(field=key, value=field_value)
|
||||
# return
|
||||
case item if item in self.jsons():
|
||||
logger.debug(f"Setting JSON attribute.")
|
||||
existing = self.__getattribute__(key)
|
||||
@@ -1852,13 +1868,6 @@ class WastewaterArtic(BasicSubmission):
|
||||
set_plate = None
|
||||
for assoc in self.submission_sample_associations:
|
||||
dicto = assoc.to_sub_dict()
|
||||
# old_sub = assoc.sample.get_previous_ww_submission(current_artic_submission=self)
|
||||
# try:
|
||||
# dicto['plate_name'] = old_sub.rsl_plate_num
|
||||
# except AttributeError:
|
||||
# dicto['plate_name'] = ""
|
||||
# old_assoc = WastewaterAssociation.query(submission=old_sub, sample=assoc.sample, limit=1)
|
||||
# dicto['well'] = f"{row_map[old_assoc.row]}{old_assoc.column}"
|
||||
for item in self.source_plates:
|
||||
old_plate = WastewaterAssociation.query(submission=item['plate'], sample=assoc.sample, limit=1)
|
||||
if old_plate is not None:
|
||||
@@ -1879,6 +1888,12 @@ class WastewaterArtic(BasicSubmission):
|
||||
events['Gel Box'] = self.gel_box
|
||||
return events
|
||||
|
||||
def set_attribute(self, key: str, value):
|
||||
super().set_attribute(key=key, value=value)
|
||||
if key == 'gel_info':
|
||||
if len(self.gel_info) > 3:
|
||||
self.gel_info = self.gel_info[-3:]
|
||||
|
||||
def gel_box(self, obj):
|
||||
"""
|
||||
Creates widget to perform gel viewing operations
|
||||
|
||||
@@ -543,7 +543,6 @@ class EquipmentParser(object):
|
||||
def __init__(self, xl: Workbook, submission_type: str|SubmissionType) -> None:
|
||||
if isinstance(submission_type, str):
|
||||
submission_type = SubmissionType.query(name=submission_type)
|
||||
|
||||
self.submission_type = submission_type
|
||||
self.xl = xl
|
||||
self.map = self.fetch_equipment_map()
|
||||
@@ -555,7 +554,6 @@ class EquipmentParser(object):
|
||||
Returns:
|
||||
List[dict]: List of locations
|
||||
"""
|
||||
# submission_type = SubmissionType.query(name=self.submission_type)
|
||||
return self.submission_type.construct_equipment_map()
|
||||
|
||||
def get_asset_number(self, input: str) -> str:
|
||||
@@ -569,7 +567,7 @@ class EquipmentParser(object):
|
||||
str: asset number
|
||||
"""
|
||||
regex = Equipment.get_regex()
|
||||
logger.debug(f"Using equipment regex: {regex} on {input}")
|
||||
# logger.debug(f"Using equipment regex: {regex} on {input}")
|
||||
try:
|
||||
return regex.search(input).group().strip("-")
|
||||
except AttributeError:
|
||||
@@ -582,11 +580,10 @@ class EquipmentParser(object):
|
||||
Returns:
|
||||
List[PydEquipment]: list of equipment
|
||||
"""
|
||||
logger.debug(f"Equipment parser going into parsing: {pformat(self.__dict__)}")
|
||||
# logger.debug(f"Equipment parser going into parsing: {pformat(self.__dict__)}")
|
||||
output = []
|
||||
# logger.debug(f"Sheets: {sheets}")
|
||||
for sheet in self.xl.sheetnames:
|
||||
# df = self.xl.parse(sheet, header=None, dtype=object)
|
||||
ws = self.xl[sheet]
|
||||
try:
|
||||
relevant = [item for item in self.map if item['sheet'] == sheet]
|
||||
@@ -595,7 +592,6 @@ class EquipmentParser(object):
|
||||
# logger.debug(f"Relevant equipment: {pformat(relevant)}")
|
||||
previous_asset = ""
|
||||
for equipment in relevant:
|
||||
# asset = df.iat[equipment['name']['row']-1, equipment['name']['column']-1]
|
||||
asset = ws.cell(equipment['name']['row'], equipment['name']['column'])
|
||||
if not check_not_nan(asset):
|
||||
asset = previous_asset
|
||||
@@ -603,7 +599,6 @@ class EquipmentParser(object):
|
||||
previous_asset = asset
|
||||
asset = self.get_asset_number(input=asset)
|
||||
eq = Equipment.query(asset_number=asset)
|
||||
# process = df.iat[equipment['process']['row']-1, equipment['process']['column']-1]
|
||||
process = ws.cell(row=equipment['process']['row'], column=equipment['process']['column'])
|
||||
try:
|
||||
output.append(
|
||||
@@ -614,72 +609,6 @@ class EquipmentParser(object):
|
||||
# logger.debug(f"Here is the output so far: {pformat(output)}")
|
||||
return output
|
||||
|
||||
|
||||
# class PCRParser(object):
|
||||
# """
|
||||
# Object to pull data from Design and Analysis PCR export file.
|
||||
# """
|
||||
#
|
||||
# def __init__(self, filepath: Path | None = None) -> None:
|
||||
# """
|
||||
# Initializes object.
|
||||
#
|
||||
# Args:
|
||||
# filepath (Path | None, optional): file to parse. Defaults to None.
|
||||
# """
|
||||
# logger.debug(f"Parsing {filepath.__str__()}")
|
||||
# if filepath == None:
|
||||
# logger.error(f"No filepath given.")
|
||||
# self.xl = None
|
||||
# else:
|
||||
# try:
|
||||
# self.xl = pd.ExcelFile(filepath.__str__())
|
||||
# except ValueError as e:
|
||||
# logger.error(f"Incorrect value: {e}")
|
||||
# self.xl = None
|
||||
# except PermissionError:
|
||||
# logger.error(f"Couldn't get permissions for {filepath.__str__()}. Operation might have been cancelled.")
|
||||
# return
|
||||
# self.parse_general(sheet_name="Results")
|
||||
# namer = RSLNamer(filename=filepath.__str__())
|
||||
# self.plate_num = namer.parsed_name
|
||||
# self.submission_type = namer.submission_type
|
||||
# logger.debug(f"Set plate number to {self.plate_num} and type to {self.submission_type}")
|
||||
# parser = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type)
|
||||
# self.samples = parser.parse_pcr(xl=self.xl, rsl_number=self.plate_num)
|
||||
#
|
||||
# def parse_general(self, sheet_name: str):
|
||||
# """
|
||||
# Parse general info rows for all types of PCR results
|
||||
#
|
||||
# Args:
|
||||
# sheet_name (str): Name of sheet in excel workbook that holds info.
|
||||
# """
|
||||
# self.pcr = {}
|
||||
# df = self.xl.parse(sheet_name=sheet_name, dtype=object).fillna("")
|
||||
# self.pcr['comment'] = df.iloc[0][1]
|
||||
# self.pcr['operator'] = df.iloc[1][1]
|
||||
# self.pcr['barcode'] = df.iloc[2][1]
|
||||
# self.pcr['instrument'] = df.iloc[3][1]
|
||||
# self.pcr['block_type'] = df.iloc[4][1]
|
||||
# self.pcr['instrument_name'] = df.iloc[5][1]
|
||||
# self.pcr['instrument_serial'] = df.iloc[6][1]
|
||||
# self.pcr['heated_cover_serial'] = df.iloc[7][1]
|
||||
# self.pcr['block_serial'] = df.iloc[8][1]
|
||||
# self.pcr['run-start'] = df.iloc[9][1]
|
||||
# self.pcr['run_end'] = df.iloc[10][1]
|
||||
# self.pcr['run_duration'] = df.iloc[11][1]
|
||||
# self.pcr['sample_volume'] = df.iloc[12][1]
|
||||
# self.pcr['cover_temp'] = df.iloc[13][1]
|
||||
# self.pcr['passive_ref'] = df.iloc[14][1]
|
||||
# self.pcr['pcr_step'] = df.iloc[15][1]
|
||||
# self.pcr['quant_cycle_method'] = df.iloc[16][1]
|
||||
# self.pcr['analysis_time'] = df.iloc[17][1]
|
||||
# self.pcr['software'] = df.iloc[18][1]
|
||||
# self.pcr['plugin'] = df.iloc[19][1]
|
||||
# self.pcr['exported_on'] = df.iloc[20][1]
|
||||
# self.pcr['imported_by'] = getuser()
|
||||
|
||||
class PCRParser(object):
|
||||
"""Object to pull data from Design and Analysis PCR export file."""
|
||||
|
||||
@@ -690,7 +619,7 @@ class PCRParser(object):
|
||||
Args:
|
||||
filepath (Path | None, optional): file to parse. Defaults to None.
|
||||
"""
|
||||
logger.debug(f'Parsing {filepath.__str__()}')
|
||||
# logger.debug(f'Parsing {filepath.__str__()}')
|
||||
if filepath is None:
|
||||
logger.error('No filepath given.')
|
||||
self.xl = None
|
||||
|
||||
@@ -27,7 +27,7 @@ def make_report_xlsx(records:list[dict]) -> Tuple[DataFrame, DataFrame]:
|
||||
# aggregate cost and sample count columns
|
||||
df2 = df.groupby(["Submitting Lab", "Extraction Kit"]).agg({'Extraction Kit':'count', 'Cost': 'sum', 'Sample Count':'sum'})
|
||||
df2 = df2.rename(columns={"Extraction Kit": 'Run Count'})
|
||||
logger.debug(f"Output daftaframe for xlsx: {df2.columns}")
|
||||
# logger.debug(f"Output daftaframe for xlsx: {df2.columns}")
|
||||
df = df.drop('id', axis=1)
|
||||
df = df.sort_values(['Submitting Lab', "Submitted Date"])
|
||||
return df, df2
|
||||
@@ -47,13 +47,13 @@ def make_report_html(df:DataFrame, start_date:date, end_date:date) -> str:
|
||||
"""
|
||||
old_lab = ""
|
||||
output = []
|
||||
logger.debug(f"Report DataFrame: {df}")
|
||||
# logger.debug(f"Report DataFrame: {df}")
|
||||
for ii, row in enumerate(df.iterrows()):
|
||||
logger.debug(f"Row {ii}: {row}")
|
||||
# logger.debug(f"Row {ii}: {row}")
|
||||
lab = row[0][0]
|
||||
logger.debug(type(row))
|
||||
logger.debug(f"Old lab: {old_lab}, Current lab: {lab}")
|
||||
logger.debug(f"Name: {row[0][1]}")
|
||||
# logger.debug(type(row))
|
||||
# logger.debug(f"Old lab: {old_lab}, Current lab: {lab}")
|
||||
# logger.debug(f"Name: {row[0][1]}")
|
||||
data = [item for item in row[1]]
|
||||
kit = dict(name=row[0][1], cost=data[1], run_count=int(data[0]), sample_count=int(data[2]))
|
||||
# if this is the same lab as before add together
|
||||
@@ -67,7 +67,7 @@ def make_report_html(df:DataFrame, start_date:date, end_date:date) -> str:
|
||||
adder = dict(lab=lab, kits=[kit], total_cost=kit['cost'], total_samples=kit['sample_count'], total_runs=kit['run_count'])
|
||||
output.append(adder)
|
||||
old_lab = lab
|
||||
logger.debug(output)
|
||||
# logger.debug(output)
|
||||
dicto = {'start_date':start_date, 'end_date':end_date, 'labs':output}#, "table":table}
|
||||
temp = env.get_template('summary_report.html')
|
||||
html = temp.render(input=dicto)
|
||||
@@ -91,14 +91,14 @@ def convert_data_list_to_df(input:list[dict], subtype:str|None=None) -> DataFram
|
||||
for column in df.columns:
|
||||
if "percent" in column:
|
||||
count_col = [item for item in df.columns if "count" in item][0]
|
||||
# The actual percentage from kraken was off due to exclusion of NaN, recalculating.
|
||||
# NOTE: The actual percentage from kraken was off due to exclusion of NaN, recalculating.
|
||||
df[column] = 100 * df[count_col] / df.groupby('name')[count_col].transform('sum')
|
||||
if column not in safe:
|
||||
if subtype != None and column != subtype:
|
||||
del df[column]
|
||||
# move date of sample submitted on same date as previous ahead one.
|
||||
# NOTE: move date of sample submitted on same date as previous ahead one.
|
||||
df = displace_date(df)
|
||||
# ad hoc method to make data labels more accurate.
|
||||
# NOTE: ad hoc method to make data labels more accurate.
|
||||
df = df_column_renamer(df=df)
|
||||
return df
|
||||
|
||||
@@ -131,8 +131,8 @@ def displace_date(df:DataFrame) -> DataFrame:
|
||||
Returns:
|
||||
DataFrame: output dataframe with dates incremented.
|
||||
"""
|
||||
logger.debug(f"Unique items: {df['name'].unique()}")
|
||||
# get submitted dates for each control
|
||||
# logger.debug(f"Unique items: {df['name'].unique()}")
|
||||
# NOTE: get submitted dates for each control
|
||||
dict_list = [dict(name=item, date=df[df.name == item].iloc[0]['submitted_date']) for item in sorted(df['name'].unique())]
|
||||
previous_dates = []
|
||||
for _, item in enumerate(dict_list):
|
||||
@@ -157,10 +157,10 @@ def check_date(df:DataFrame, item:dict, previous_dates:list) -> Tuple[DataFrame,
|
||||
check = False
|
||||
previous_dates.append(item['date'])
|
||||
if check:
|
||||
logger.debug(f"We found one! Increment date!\n\t{item['date']} to {item['date'] + timedelta(days=1)}")
|
||||
# get df locations where name == item name
|
||||
# logger.debug(f"We found one! Increment date!\n\t{item['date']} to {item['date'] + timedelta(days=1)}")
|
||||
# NOTE: get df locations where name == item name
|
||||
mask = df['name'] == item['name']
|
||||
# increment date in dataframe
|
||||
# NOTE: increment date in dataframe
|
||||
df.loc[mask, 'submitted_date'] = df.loc[mask, 'submitted_date'].apply(lambda x: x + timedelta(days=1))
|
||||
item['date'] += timedelta(days=1)
|
||||
passed = False
|
||||
@@ -170,9 +170,9 @@ def check_date(df:DataFrame, item:dict, previous_dates:list) -> Tuple[DataFrame,
|
||||
# logger.debug(f"DF: {type(df)}, previous_dates: {type(previous_dates)}")
|
||||
# if run didn't lead to changed date, return values
|
||||
if passed:
|
||||
logger.debug(f"Date check passed, returning.")
|
||||
# logger.debug(f"Date check passed, returning.")
|
||||
return df, previous_dates
|
||||
# if date was changed, rerun with new date
|
||||
# NOTE: if date was changed, rerun with new date
|
||||
else:
|
||||
logger.warning(f"Date check failed, running recursion")
|
||||
df, previous_dates = check_date(df, item, previous_dates)
|
||||
|
||||
@@ -31,7 +31,6 @@ class SheetWriter(object):
|
||||
case 'filepath':
|
||||
self.__setattr__(k, v)
|
||||
case 'submission_type':
|
||||
# self.__setattr__('submission_type', submission.submission_type['value'])
|
||||
self.sub[k] = v['value']
|
||||
self.submission_type = SubmissionType.query(name=v['value'])
|
||||
self.sub_object = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type)
|
||||
@@ -40,7 +39,7 @@ class SheetWriter(object):
|
||||
self.sub[k] = v['value']
|
||||
else:
|
||||
self.sub[k] = v
|
||||
logger.debug(f"\n\nWriting to {submission.filepath.__str__()}\n\n")
|
||||
# logger.debug(f"\n\nWriting to {submission.filepath.__str__()}\n\n")
|
||||
|
||||
if self.filepath.stem.startswith("tmp"):
|
||||
template = self.submission_type.template_file
|
||||
@@ -95,7 +94,7 @@ class InfoWriter(object):
|
||||
self.xl = xl
|
||||
map = submission_type.construct_info_map(mode='write')
|
||||
self.info = self.reconcile_map(info_dict, map)
|
||||
logger.debug(pformat(self.info))
|
||||
# logger.debug(pformat(self.info))
|
||||
|
||||
def reconcile_map(self, info_dict: dict, map: dict) -> dict:
|
||||
output = {}
|
||||
@@ -121,8 +120,7 @@ class InfoWriter(object):
|
||||
logger.error(f"No locations for {k}, skipping")
|
||||
continue
|
||||
for loc in locations:
|
||||
|
||||
logger.debug(f"Writing {k} to {loc['sheet']}, row: {loc['row']}, column: {loc['column']}")
|
||||
# logger.debug(f"Writing {k} to {loc['sheet']}, row: {loc['row']}, column: {loc['column']}")
|
||||
sheet = self.xl[loc['sheet']]
|
||||
sheet.cell(row=loc['row'], column=loc['column'], value=v['value'])
|
||||
return self.sub_object.custom_info_writer(self.xl, info=self.info)
|
||||
@@ -152,7 +150,7 @@ class ReagentWriter(object):
|
||||
try:
|
||||
dicto = dict(value=v, row=mp_info[k]['row'], column=mp_info[k]['column'])
|
||||
except KeyError as e:
|
||||
# logger.error(f"Keyerror: {e}")
|
||||
logger.error(f"KeyError: {e}")
|
||||
dicto = v
|
||||
placeholder[k] = dicto
|
||||
placeholder['sheet'] = mp_info['sheet']
|
||||
@@ -197,7 +195,6 @@ class SampleWriter(object):
|
||||
def write_samples(self):
|
||||
sheet = self.xl[self.map['sheet']]
|
||||
columns = self.map['sample_columns']
|
||||
# rows = range(self.map['start_row'], self.map['end_row']+1)
|
||||
for ii, sample in enumerate(self.samples):
|
||||
row = self.map['start_row'] + (sample['submission_rank'] - 1)
|
||||
for k, v in sample.items():
|
||||
@@ -229,8 +226,6 @@ class EquipmentWriter(object):
|
||||
for jj, (k, v) in enumerate(equipment.items(), start=1):
|
||||
dicto = dict(value=v, row=ii, column=jj)
|
||||
placeholder[k] = dicto
|
||||
|
||||
# output.append(placeholder)
|
||||
else:
|
||||
for jj, (k, v) in enumerate(equipment.items(), start=1):
|
||||
try:
|
||||
@@ -258,8 +253,8 @@ class EquipmentWriter(object):
|
||||
for k, v in equipment.items():
|
||||
if not isinstance(v, dict):
|
||||
continue
|
||||
logger.debug(
|
||||
f"Writing {k}: {v['value']} to {equipment['sheet']}, row: {v['row']}, column: {v['column']}")
|
||||
# logger.debug(
|
||||
# f"Writing {k}: {v['value']} to {equipment['sheet']}, row: {v['row']}, column: {v['column']}")
|
||||
if isinstance(v['value'], list):
|
||||
v['value'] = v['value'][0]
|
||||
try:
|
||||
|
||||
@@ -23,7 +23,7 @@ class RSLNamer(object):
|
||||
if self.submission_type is None:
|
||||
# logger.debug("Creating submission type because none exists")
|
||||
self.submission_type = self.retrieve_submission_type(filename=filename)
|
||||
logger.debug(f"got submission type: {self.submission_type}")
|
||||
# logger.debug(f"got submission type: {self.submission_type}")
|
||||
if self.submission_type is not None:
|
||||
# logger.debug("Retrieving BasicSubmission subclass")
|
||||
self.sub_object = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type)
|
||||
@@ -47,7 +47,7 @@ class RSLNamer(object):
|
||||
"""
|
||||
match filename:
|
||||
case Path():
|
||||
logger.debug(f"Using path method for {filename}.")
|
||||
# logger.debug(f"Using path method for {filename}.")
|
||||
if filename.exists():
|
||||
wb = load_workbook(filename)
|
||||
try:
|
||||
@@ -67,7 +67,7 @@ class RSLNamer(object):
|
||||
submission_type = cls.retrieve_submission_type(filename=filename.stem.__str__())
|
||||
case str():
|
||||
regex = BasicSubmission.construct_regex()
|
||||
logger.debug(f"Using string method for {filename}.")
|
||||
# logger.debug(f"Using string method for {filename}.")
|
||||
m = regex.search(filename)
|
||||
try:
|
||||
submission_type = m.lastgroup
|
||||
@@ -100,17 +100,17 @@ class RSLNamer(object):
|
||||
regex (str): string to construct pattern
|
||||
filename (str): string to be parsed
|
||||
"""
|
||||
logger.debug(f"Input string to be parsed: {filename}")
|
||||
# logger.debug(f"Input string to be parsed: {filename}")
|
||||
if regex is None:
|
||||
regex = BasicSubmission.construct_regex()
|
||||
else:
|
||||
regex = re.compile(rf'{regex}', re.IGNORECASE | re.VERBOSE)
|
||||
logger.debug(f"Using regex: {regex}")
|
||||
# logger.debug(f"Using regex: {regex}")
|
||||
match filename:
|
||||
case Path():
|
||||
m = regex.search(filename.stem)
|
||||
case str():
|
||||
logger.debug(f"Using string method.")
|
||||
# logger.debug(f"Using string method.")
|
||||
m = regex.search(filename)
|
||||
case _:
|
||||
m = None
|
||||
@@ -121,7 +121,7 @@ class RSLNamer(object):
|
||||
parsed_name = None
|
||||
else:
|
||||
parsed_name = None
|
||||
logger.debug(f"Got parsed submission name: {parsed_name}")
|
||||
# logger.debug(f"Got parsed submission name: {parsed_name}")
|
||||
return parsed_name
|
||||
|
||||
@classmethod
|
||||
@@ -167,8 +167,8 @@ class RSLNamer(object):
|
||||
Returns:
|
||||
str: output file name.
|
||||
"""
|
||||
logger.debug(f"Kwargs: {kwargs}")
|
||||
logger.debug(f"Template: {template}")
|
||||
# logger.debug(f"Kwargs: {kwargs}")
|
||||
# logger.debug(f"Template: {template}")
|
||||
environment = jinja_template_loading()
|
||||
template = environment.from_string(template)
|
||||
return template.render(**kwargs)
|
||||
|
||||
@@ -134,15 +134,15 @@ class PydReagent(BaseModel):
|
||||
# logger.debug("Adding extra fields.")
|
||||
if self.model_extra != None:
|
||||
self.__dict__.update(self.model_extra)
|
||||
logger.debug(f"Reagent SQL constructor is looking up type: {self.type}, lot: {self.lot}")
|
||||
# logger.debug(f"Reagent SQL constructor is looking up type: {self.type}, lot: {self.lot}")
|
||||
reagent = Reagent.query(lot_number=self.lot, name=self.name)
|
||||
logger.debug(f"Result: {reagent}")
|
||||
# logger.debug(f"Result: {reagent}")
|
||||
if reagent is None:
|
||||
reagent = Reagent()
|
||||
for key, value in self.__dict__.items():
|
||||
if isinstance(value, dict):
|
||||
value = value['value']
|
||||
logger.debug(f"Reagent info item for {key}: {value}")
|
||||
# logger.debug(f"Reagent info item for {key}: {value}")
|
||||
# set fields based on keys in dictionary
|
||||
match key:
|
||||
case "lot":
|
||||
@@ -191,7 +191,7 @@ class PydSample(BaseModel, extra='allow'):
|
||||
@model_validator(mode='after')
|
||||
@classmethod
|
||||
def validate_model(cls, data):
|
||||
logger.debug(f"Data for pydsample: {data}")
|
||||
# logger.debug(f"Data for pydsample: {data}")
|
||||
model = BasicSample.find_polymorphic_subclass(polymorphic_identity=data.sample_type)
|
||||
for k, v in data.model_extra.items():
|
||||
print(k, v)
|
||||
@@ -200,7 +200,7 @@ class PydSample(BaseModel, extra='allow'):
|
||||
v = datetime.strptime(v, "%Y-%m-%d")
|
||||
data.__setattr__(k, v)
|
||||
# print(dir(data))
|
||||
logger.debug(f"Data coming out of validation: {pformat(data)}")
|
||||
# logger.debug(f"Data coming out of validation: {pformat(data)}")
|
||||
return data
|
||||
|
||||
@field_validator("row", "column", "assoc_id", "submission_rank")
|
||||
@@ -233,7 +233,7 @@ class PydSample(BaseModel, extra='allow'):
|
||||
"""
|
||||
report = None
|
||||
self.__dict__.update(self.model_extra)
|
||||
logger.debug(f"Here is the incoming sample dict: \n{self.__dict__}")
|
||||
# logger.debug(f"Here is the incoming sample dict: \n{self.__dict__}")
|
||||
instance = BasicSample.query_or_create(sample_type=self.sample_type, submitter_id=self.submitter_id)
|
||||
for key, value in self.__dict__.items():
|
||||
match key:
|
||||
@@ -246,8 +246,8 @@ class PydSample(BaseModel, extra='allow'):
|
||||
if submission is not None:
|
||||
assoc_type = self.sample_type.replace("Sample", "").strip()
|
||||
for row, column, aid, submission_rank in zip(self.row, self.column, self.assoc_id, self.submission_rank):
|
||||
logger.debug(f"Looking up association with identity: ({submission.submission_type_name} Association)")
|
||||
logger.debug(f"Looking up association with identity: ({assoc_type} Association)")
|
||||
# logger.debug(f"Looking up association with identity: ({submission.submission_type_name} Association)")
|
||||
# logger.debug(f"Looking up association with identity: ({assoc_type} Association)")
|
||||
association = SubmissionSampleAssociation.query_or_create(association_type=f"{assoc_type} Association",
|
||||
submission=submission,
|
||||
sample=instance,
|
||||
@@ -357,7 +357,7 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
@field_validator('equipment', mode='before')
|
||||
@classmethod
|
||||
def convert_equipment_dict(cls, value):
|
||||
logger.debug(f"Equipment: {value}")
|
||||
# logger.debug(f"Equipment: {value}")
|
||||
if isinstance(value, dict):
|
||||
return value['value']
|
||||
return value
|
||||
@@ -381,7 +381,7 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
@field_validator("submitted_date", mode="before")
|
||||
@classmethod
|
||||
def rescue_date(cls, value):
|
||||
logger.debug(f"\n\nDate coming into pydantic: {value}\n\n")
|
||||
# logger.debug(f"\n\nDate coming into pydantic: {value}\n\n")
|
||||
try:
|
||||
check = value['value'] == None
|
||||
except TypeError:
|
||||
@@ -426,7 +426,7 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
@classmethod
|
||||
def lookup_submitting_lab(cls, value):
|
||||
if isinstance(value['value'], str):
|
||||
logger.debug(f"Looking up organization {value['value']}")
|
||||
# logger.debug(f"Looking up organization {value['value']}")
|
||||
try:
|
||||
value['value'] = Organization.query(name=value['value']).name
|
||||
except AttributeError:
|
||||
@@ -457,12 +457,12 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
@field_validator("rsl_plate_num")
|
||||
@classmethod
|
||||
def rsl_from_file(cls, value, values):
|
||||
logger.debug(f"RSL-plate initial value: {value['value']} and other values: {values.data}")
|
||||
# logger.debug(f"RSL-plate initial value: {value['value']} and other values: {values.data}")
|
||||
sub_type = values.data['submission_type']['value']
|
||||
if check_not_nan(value['value']):
|
||||
return value
|
||||
else:
|
||||
logger.debug("Constructing plate name.")
|
||||
# logger.debug("Constructing plate name.")
|
||||
output = RSLNamer(filename=values.data['filepath'].__str__(), sub_type=sub_type,
|
||||
data=values.data).parsed_name
|
||||
return dict(value=output, missing=True)
|
||||
@@ -649,32 +649,32 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
rsl_plate_num=self.rsl_plate_num['value'])
|
||||
result = Result(msg=msg, code=code)
|
||||
self.handle_duplicate_samples()
|
||||
logger.debug(f"Here's our list of duplicate removed samples: {self.samples}")
|
||||
# logger.debug(f"Here's our list of duplicate removed samples: {self.samples}")
|
||||
# for key, value in self.__dict__.items():
|
||||
for key, value in dicto.items():
|
||||
if isinstance(value, dict):
|
||||
value = value['value']
|
||||
logger.debug(f"Setting {key} to {value}")
|
||||
# logger.debug(f"Setting {key} to {value}")
|
||||
match key:
|
||||
case "reagents":
|
||||
if code == 1:
|
||||
instance.submission_reagent_associations = []
|
||||
logger.debug(f"Looking through {self.reagents}")
|
||||
# logger.debug(f"Looking through {self.reagents}")
|
||||
for reagent in self.reagents:
|
||||
reagent, assoc = reagent.toSQL(submission=instance)
|
||||
logger.debug(f"Association: {assoc}")
|
||||
# logger.debug(f"Association: {assoc}")
|
||||
if assoc is not None:# and assoc not in instance.submission_reagent_associations:
|
||||
instance.submission_reagent_associations.append(assoc)
|
||||
# instance.reagents.append(reagent)
|
||||
case "samples":
|
||||
for sample in self.samples:
|
||||
sample, associations, _ = sample.toSQL(submission=instance)
|
||||
logger.debug(f"Sample SQL object to be added to submission: {sample.__dict__}")
|
||||
# logger.debug(f"Sample SQL object to be added to submission: {sample.__dict__}")
|
||||
for assoc in associations:
|
||||
if assoc is not None and assoc not in instance.submission_sample_associations:
|
||||
instance.submission_sample_associations.append(assoc)
|
||||
case "equipment":
|
||||
logger.debug(f"Equipment: {pformat(self.equipment)}")
|
||||
# logger.debug(f"Equipment: {pformat(self.equipment)}")
|
||||
try:
|
||||
if equip is None:
|
||||
continue
|
||||
@@ -684,11 +684,11 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
equip, association = equip.toSQL(submission=instance)
|
||||
if association is not None:
|
||||
association.save()
|
||||
logger.debug(
|
||||
f"Equipment association SQL object to be added to submission: {association.__dict__}")
|
||||
# logger.debug(
|
||||
# f"Equipment association SQL object to be added to submission: {association.__dict__}")
|
||||
instance.submission_equipment_associations.append(association)
|
||||
case item if item in instance.jsons():
|
||||
logger.debug(f"{item} is a json.")
|
||||
# logger.debug(f"{item} is a json.")
|
||||
try:
|
||||
ii = value.items()
|
||||
except AttributeError:
|
||||
@@ -701,38 +701,38 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
try:
|
||||
instance.set_attribute(key=key, value=value)
|
||||
except AttributeError as e:
|
||||
logger.debug(f"Could not set attribute: {key} to {value} due to: \n\n {e}")
|
||||
logger.error(f"Could not set attribute: {key} to {value} due to: \n\n {e}")
|
||||
continue
|
||||
except KeyError:
|
||||
continue
|
||||
try:
|
||||
logger.debug(f"Calculating costs for procedure...")
|
||||
# logger.debug(f"Calculating costs for procedure...")
|
||||
instance.calculate_base_cost()
|
||||
except (TypeError, AttributeError) as e:
|
||||
logger.debug(f"Looks like that kit doesn't have cost breakdown yet due to: {e}, using full plate cost.")
|
||||
# logger.debug(f"Looks like that kit doesn't have cost breakdown yet due to: {e}, using full plate cost.")
|
||||
try:
|
||||
instance.run_cost = instance.extraction_kit.cost_per_run
|
||||
except AttributeError:
|
||||
instance.run_cost = 0
|
||||
logger.debug(f"Calculated base run cost of: {instance.run_cost}")
|
||||
# logger.debug(f"Calculated base run cost of: {instance.run_cost}")
|
||||
# Apply any discounts that are applicable for client and kit.
|
||||
try:
|
||||
logger.debug("Checking and applying discounts...")
|
||||
# logger.debug("Checking and applying discounts...")
|
||||
discounts = [item.amount for item in
|
||||
Discount.query(kit_type=instance.extraction_kit, organization=instance.submitting_lab)]
|
||||
logger.debug(f"We got discounts: {discounts}")
|
||||
# logger.debug(f"We got discounts: {discounts}")
|
||||
if len(discounts) > 0:
|
||||
discounts = sum(discounts)
|
||||
instance.run_cost = instance.run_cost - discounts
|
||||
except Exception as e:
|
||||
logger.error(f"An unknown exception occurred when calculating discounts: {e}")
|
||||
# We need to make sure there's a proper rsl plate number
|
||||
logger.debug(f"We've got a total cost of {instance.run_cost}")
|
||||
try:
|
||||
logger.debug(f"Constructed instance: {instance}")
|
||||
except AttributeError as e:
|
||||
logger.debug(f"Something went wrong constructing instance {self.rsl_plate_num}: {e}")
|
||||
logger.debug(f"Constructed submissions message: {msg}")
|
||||
# logger.debug(f"We've got a total cost of {instance.run_cost}")
|
||||
# try:
|
||||
# logger.debug(f"Constructed instance: {instance}")
|
||||
# except AttributeError as e:
|
||||
# logger.debug(f"Something went wrong constructing instance {self.rsl_plate_num}: {e}")
|
||||
# logger.debug(f"Constructed submissions message: {msg}")
|
||||
return instance, result
|
||||
|
||||
def to_form(self, parent: QWidget):
|
||||
@@ -777,26 +777,26 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
Report: Result object containing a message and any missing components.
|
||||
"""
|
||||
report = Report()
|
||||
logger.debug(f"Extraction kit: {extraction_kit}. Is it a string? {isinstance(extraction_kit, str)}")
|
||||
# logger.debug(f"Extraction kit: {extraction_kit}. Is it a string? {isinstance(extraction_kit, str)}")
|
||||
if isinstance(extraction_kit, str):
|
||||
extraction_kit = dict(value=extraction_kit)
|
||||
if extraction_kit is not None and extraction_kit != self.extraction_kit['value']:
|
||||
self.extraction_kit['value'] = extraction_kit['value']
|
||||
logger.debug(f"Looking up {self.extraction_kit['value']}")
|
||||
# logger.debug(f"Looking up {self.extraction_kit['value']}")
|
||||
ext_kit = KitType.query(name=self.extraction_kit['value'])
|
||||
ext_kit_rtypes = [item.to_pydantic() for item in
|
||||
ext_kit.get_reagents(required=True, submission_type=self.submission_type['value'])]
|
||||
logger.debug(f"Kit reagents: {ext_kit_rtypes}")
|
||||
logger.debug(f"Submission reagents: {self.reagents}")
|
||||
# logger.debug(f"Kit reagents: {ext_kit_rtypes}")
|
||||
# logger.debug(f"Submission reagents: {self.reagents}")
|
||||
# Exclude any reagenttype found in this pyd not expected in kit.
|
||||
expected_check = [item.type for item in ext_kit_rtypes]
|
||||
output_reagents = [rt for rt in self.reagents if rt.type in expected_check]
|
||||
logger.debug(f"Already have these reagent types: {output_reagents}")
|
||||
# logger.debug(f"Already have these reagent types: {output_reagents}")
|
||||
missing_check = [item.type for item in output_reagents]
|
||||
missing_reagents = [rt for rt in ext_kit_rtypes if rt.type not in missing_check]
|
||||
missing_reagents += [rt for rt in output_reagents if rt.missing]
|
||||
output_reagents += [rt for rt in missing_reagents if rt not in output_reagents]
|
||||
logger.debug(f"Missing reagents types: {missing_reagents}")
|
||||
# logger.debug(f"Missing reagents types: {missing_reagents}")
|
||||
# if lists are equal return no problem
|
||||
if len(missing_reagents) == 0:
|
||||
result = None
|
||||
@@ -873,7 +873,7 @@ class PydReagentType(BaseModel):
|
||||
instance: ReagentType = ReagentType.query(name=self.name)
|
||||
if instance == None:
|
||||
instance = ReagentType(name=self.name, eol_ext=self.eol_ext)
|
||||
logger.debug(f"This is the reagent type instance: {instance.__dict__}")
|
||||
# logger.debug(f"This is the reagent type instance: {instance.__dict__}")
|
||||
try:
|
||||
assoc = KitTypeReagentTypeAssociation.query(reagent_type=instance, kit_type=kit)
|
||||
except StatementError:
|
||||
|
||||
Reference in New Issue
Block a user