Pre-frontend refactor.

This commit is contained in:
Landon Wark
2023-11-09 09:55:51 -06:00
parent 5570d87b7c
commit bf4149b1b3
25 changed files with 1002 additions and 784 deletions

View File

@@ -1,5 +1,5 @@
'''Contains or imports all database convenience functions'''
from tools import Settings
from tools import Result, Report
from sqlalchemy import event
from sqlalchemy.engine import Engine
from sqlalchemy.exc import OperationalError as AlcOperationalError, IntegrityError as AlcIntegrityError
@@ -7,10 +7,7 @@ from sqlite3 import OperationalError as SQLOperationalError, IntegrityError as S
import logging
import pandas as pd
import json
from pathlib import Path
from .models import *
# from sqlalchemy.exc import OperationalError as AlcOperationalError, IntegrityError as AlcIntegrityError
# from sqlite3 import OperationalError as SQLOperationalError, IntegrityError as SQLIntegrityError
import logging
from backend.validators.pydant import *
@@ -47,7 +44,7 @@ def submissions_to_df(submission_type:str|None=None, limit:int=0) -> pd.DataFram
# use lookup function to create list of dicts
# subs = [item.to_dict() for item in lookup_submissions(ctx=ctx, submission_type=submission_type, limit=limit)]
subs = [item.to_dict() for item in BasicSubmission.query(submission_type=submission_type, limit=limit)]
logger.debug(f"Got {len(subs)} results.")
logger.debug(f"Got {len(subs)} submissions.")
# make df from dicts (records) in list
df = pd.DataFrame.from_records(subs)
# Exclude sub information
@@ -111,79 +108,24 @@ def update_last_used(reagent:Reagent, kit:KitType):
Updates the 'last_used' field in kittypes/reagenttypes
Args:
ctx (Settings): settings object passed down from gui
reagent (models.Reagent): reagent to be used for update
kit (models.KitType): kit to be used for lookup
"""
# rt = list(set(reagent.type).intersection(kit.reagent_types))[0]
report = Report()
logger.debug(f"Attempting update of reagent type at intersection of ({reagent}), ({kit})")
# rt = lookup_reagent_types(ctx=ctx, kit_type=kit, reagent=reagent)
rt = ReagentType.query(kit_type=kit, reagent=reagent)
if rt != None:
# assoc = lookup_reagenttype_kittype_association(ctx=ctx, kit_type=kit, reagent_type=rt)
assoc = KitTypeReagentTypeAssociation.query(kit_type=kit, reagent_type=rt)
if assoc != None:
if assoc.last_used != reagent.lot:
logger.debug(f"Updating {assoc} last used to {reagent.lot}")
assoc.last_used = reagent.lot
# ctx.database_session.merge(assoc)
# ctx.database_session.commit()
# result = store_object(ctx=ctx, object=assoc)
result = assoc.save()
return result
return dict(message=f"Updating last used {rt} was not performed.")
result = assoc.save()
return(report.add_result(result))
return report.add_result(Result(msg=f"Updating last used {rt} was not performed.", status="Information"))
# def delete_submission(id:int) -> dict|None:
# """
# Deletes a submission and its associated samples from the database.
# Args:
# ctx (Settings): settings object passed down from gui
# id (int): id of submission to be deleted.
# """
# # In order to properly do this Im' going to have to delete all of the secondary table stuff as well.
# # Retrieve submission
# # sub = lookup_submissions(ctx=ctx, id=id)
# sub = models.BasicSubmission.query(id=id)
# # Convert to dict for storing backup as a yml
# sub.delete()
# return None
# def update_ww_sample(sample_obj:dict) -> dict|None:
# """
# Retrieves wastewater sample by rsl number (sample_obj['sample']) and updates values from constructed dictionary
# Args:
# ctx (Settings): settings object passed down from gui
# sample_obj (dict): dictionary representing new values for database object
# """
# logger.debug(f"dictionary to use for update: {pformat(sample_obj)}")
# logger.debug(f"Looking up {sample_obj['sample']} in plate {sample_obj['plate_rsl']}")
# # assoc = lookup_submission_sample_association(ctx=ctx, submission=sample_obj['plate_rsl'], sample=sample_obj['sample'])
# assoc = models.SubmissionSampleAssociation.query(submission=sample_obj['plate_rsl'], sample=sample_obj['sample'])
# if assoc != None:
# for key, value in sample_obj.items():
# # set attribute 'key' to 'value'
# try:
# check = getattr(assoc, key)
# except AttributeError as e:
# logger.error(f"Item doesn't have field {key} due to {e}")
# continue
# if check != value:
# logger.debug(f"Setting association key: {key} to {value}")
# try:
# setattr(assoc, key, value)
# except AttributeError as e:
# logger.error(f"Can't set field {key} to {value} due to {e}")
# continue
# else:
# logger.error(f"Unable to find sample {sample_obj['sample']}")
# return
# # result = store_object(ctx=ctx, object=assoc)
# result = assoc.save()
# return result
def check_kit_integrity(sub:BasicSubmission|KitType|PydSubmission, reagenttypes:list=[]) -> dict|None:
def check_kit_integrity(sub:BasicSubmission|KitType|PydSubmission, reagenttypes:list=[]) -> Tuple[list, Report]:
"""
Ensures all reagents expected in kit are listed in Submission
@@ -194,6 +136,7 @@ def check_kit_integrity(sub:BasicSubmission|KitType|PydSubmission, reagenttypes:
Returns:
dict|None: Result object containing a message and any missing components.
"""
report = Report()
logger.debug(type(sub))
# What type is sub?
# reagenttypes = []
@@ -238,8 +181,9 @@ def check_kit_integrity(sub:BasicSubmission|KitType|PydSubmission, reagenttypes:
if len(missing)==0:
result = None
else:
result = {'message' : f"The submission you are importing is missing some reagents expected by the kit.\n\nIt looks like you are missing: {[item.upper() for item in missing]}\n\nAlternatively, you may have set the wrong extraction kit.\n\nThe program will populate lists using existing reagents.\n\nPlease make sure you check the lots carefully!", 'missing': missing}
return result
result = Result(msg=f"The submission you are importing is missing some reagents expected by the kit.\n\nIt looks like you are missing: {[item.upper() for item in missing]}\n\nAlternatively, you may have set the wrong extraction kit.\n\nThe program will populate lists using existing reagents.\n\nPlease make sure you check the lots carefully!", status="Warning")
report.add_result(result)
return report
def update_subsampassoc_with_pcr(submission:BasicSubmission, sample:BasicSample, input_dict:dict) -> dict|None:
"""

View File

@@ -2,12 +2,12 @@
All kit and reagent related models
'''
from __future__ import annotations
from sqlalchemy import Column, String, TIMESTAMP, JSON, INTEGER, ForeignKey, Interval, Table, FLOAT
from sqlalchemy import Column, String, TIMESTAMP, JSON, INTEGER, ForeignKey, Interval, Table, FLOAT, func
from sqlalchemy.orm import relationship, validates, Query
from sqlalchemy.ext.associationproxy import association_proxy
from datetime import date
import logging
from tools import Settings, check_authorization, Base, setup_lookup, query_return
from tools import check_authorization, Base, setup_lookup, query_return, Report, Result
from typing import List
from . import Organization
@@ -322,10 +322,11 @@ class KitTypeReagentTypeAssociation(Base):
limit = 1
return query_return(query=query, limit=limit)
def save(self):
def save(self) -> Report:
report = Report()
self.metadata.session.add(self)
self.metadata.session.commit()
return None
return report
class Reagent(Base):
"""
@@ -564,6 +565,7 @@ class SubmissionType(Base):
@setup_lookup
def query(cls,
name:str|None=None,
key:str|None=None,
limit:int=0
) -> SubmissionType|List[SubmissionType]:
"""
@@ -585,7 +587,17 @@ class SubmissionType(Base):
limit = 1
case _:
pass
match key:
case str():
query = query.filter(cls.info_map.op('->')(key)!=None)
case _:
pass
return query_return(query=query, limit=limit)
def save(self):
self.metadata.session.add(self)
self.metadata.session.commit()
return None
class SubmissionTypeKitTypeAssociation(Base):
"""

View File

@@ -5,7 +5,7 @@ from __future__ import annotations
from getpass import getuser
import math
from pprint import pformat
from . import Reagent, SubmissionType
from . import Reagent, SubmissionType, KitType, Organization
from sqlalchemy import Column, String, TIMESTAMP, INTEGER, ForeignKey, Table, JSON, FLOAT, case
from sqlalchemy.orm import relationship, validates, Query
import logging
@@ -21,9 +21,11 @@ from tools import check_not_nan, row_map, Base, query_return, setup_lookup
from datetime import datetime, date
from typing import List
from dateutil.parser import parse
from dateutil.parser._parser import ParserError
import yaml
from sqlalchemy.exc import OperationalError as AlcOperationalError, IntegrityError as AlcIntegrityError
from sqlalchemy.exc import OperationalError as AlcOperationalError, IntegrityError as AlcIntegrityError, StatementError
from sqlite3 import OperationalError as SQLOperationalError, IntegrityError as SQLIntegrityError
import sys
logger = logging.getLogger(f"submissions.{__name__}")
@@ -54,8 +56,8 @@ class BasicSubmission(Base):
pcr_info = Column(JSON) #: unstructured output from pcr table logger or user(Artic)
run_cost = Column(FLOAT(2)) #: total cost of running the plate. Set from constant and mutable kit costs at time of creation.
uploaded_by = Column(String(32)) #: user name of person who submitted the submission to the database.
comment = Column(JSON)
submission_category = Column(String(64))
comment = Column(JSON) #: user notes
submission_category = Column(String(64)) #: ["Research", "Diagnostic", "Surveillance"], else defaults to submission_type_name
submission_sample_associations = relationship(
"SubmissionSampleAssociation",
@@ -253,7 +255,7 @@ class BasicSubmission(Base):
Stupid stopgap solution to there being an issue with the Bacterial Culture plate map
Args:
xl (pd.ExcelFile): original xl workbook
xl (pd.ExcelFile): original xl workbook, used for child classes mostly
plate_map (pd.DataFrame): original plate map
Returns:
@@ -268,6 +270,7 @@ class BasicSubmission(Base):
Args:
input_dict (dict): Input sample dictionary
xl (pd.ExcelFile): original xl workbook, used for child classes mostly
Returns:
dict: Updated sample dictionary
@@ -289,6 +292,10 @@ class BasicSubmission(Base):
# logger.debug(f"Called {cls.__name__} sample parser")
return input_dict
@classmethod
def finalize_parse(cls, input_dict:dict, xl:pd.ExcelFile|None=None, info_map:dict|None=None, plate_map:dict|None=None) -> dict:
return input_dict
@classmethod
def custom_autofill(cls, input_excel:Workbook) -> Workbook:
"""
@@ -315,9 +322,14 @@ class BasicSubmission(Base):
return regex
@classmethod
def find_subclasses(cls, attrs:dict|None=None, submission_type:str|None=None):
if submission_type != None:
return cls.find_polymorphic_subclass(submission_type)
def find_subclasses(cls, attrs:dict|None=None, submission_type:str|SubmissionType|None=None):
match submission_type:
case str():
return cls.find_polymorphic_subclass(submission_type)
case SubmissionType():
return cls.find_polymorphic_subclass(submission_type.name)
case _:
pass
if len(attrs) == 0 or attrs == None:
return cls
if any([not hasattr(cls, attr) for attr in attrs]):
@@ -361,7 +373,7 @@ class BasicSubmission(Base):
yaml.dump(backup, f)
except KeyError:
pass
self.metadata.database_session.delete(self)
self.metadata.session.delete(self)
try:
self.metadata.session.commit()
except (SQLIntegrityError, SQLOperationalError, AlcIntegrityError, AlcOperationalError) as e:
@@ -396,6 +408,7 @@ class BasicSubmission(Base):
Returns:
models.BasicSubmission | List[models.BasicSubmission]: Submission(s) of interest
"""
logger.debug(kwargs)
# NOTE: if you go back to using 'model' change the appropriate cls to model in the query filters
if submission_type == None:
model = cls.find_subclasses(attrs=kwargs)
@@ -404,19 +417,7 @@ class BasicSubmission(Base):
model = cls.find_subclasses(submission_type=submission_type.name)
else:
model = cls.find_subclasses(submission_type=submission_type)
# query: Query = setup_lookup(ctx=ctx, locals=locals()).query(model)
query: Query = cls.metadata.session.query(model)
# by submission type
# match submission_type:
# case SubmissionType():
# logger.debug(f"Looking up BasicSubmission with submission type: {submission_type}")
# query = query.filter(model.submission_type_name==submission_type.name)
# case str():
# logger.debug(f"Looking up BasicSubmission with submission type: {submission_type}")
# query = query.filter(model.submission_type_name==submission_type)
# case _:
# pass
# by date range
if start_date != None and end_date == None:
logger.warning(f"Start date with no end date, using today.")
end_date = date.today()
@@ -482,10 +483,94 @@ class BasicSubmission(Base):
query.order_by(cls.submitted_date)
return query_return(query=query, limit=limit)
@classmethod
def query_or_create(cls, submission_type:str|SubmissionType|None=None, **kwargs) -> BasicSubmission:
"""
Returns object from db if exists, else, creates new. Due to need for user input, doesn't see much use ATM.
Args:
submission_type (str | SubmissionType | None, optional): Submission type to be created. Defaults to None.
Raises:
ValueError: _description_
ValueError: _description_
Returns:
cls: _description_
"""
code = 0
msg = None
disallowed = ["id"]
if kwargs == {}:
raise ValueError("Need to narrow down query or the first available instance will be returned.")
for key in kwargs.keys():
if key in disallowed:
raise ValueError(f"{key} is not allowed as a query argument as it could lead to creation of duplicate objects. Use .query() instead.")
instance = cls.query(submission_type=submission_type, limit=1, **kwargs)
logger.debug(f"Retrieved instance: {instance}")
if instance == None:
used_class = cls.find_subclasses(attrs=kwargs, submission_type=submission_type)
instance = used_class(**kwargs)
match submission_type:
case str():
submission_type = SubmissionType.query(name=submission_type)
case _:
pass
instance.submission_type = submission_type
instance.submission_type_name = submission_type.name
if "submitted_date" not in kwargs.keys():
instance.submitted_date = date.today()
else:
code = 1
msg = "This submission already exists.\nWould you like to overwrite?"
return instance, code, msg
@classmethod
def filename_template(cls):
return "{{ rsl_plate_num }}"
def set_attribute(self, key, value):
match key:
case "extraction_kit":
logger.debug(f"Looking up kit {value}")
# field_value = lookup_kit_types(ctx=self.ctx, name=value)
field_value = KitType.query(name=value)
logger.debug(f"Got {field_value} for kit {value}")
case "submitting_lab":
logger.debug(f"Looking up organization: {value}")
# field_value = lookup_organizations(ctx=self.ctx, name=value)
field_value = Organization.query(name=value)
logger.debug(f"Got {field_value} for organization {value}")
case "submitter_plate_num":
logger.debug(f"Submitter plate id: {value}")
field_value = value
case "samples":
# instance = construct_samples(ctx=ctx, instance=instance, samples=value)
for sample in value:
# logger.debug(f"Parsing {sample} to sql.")
sample, _ = sample.toSQL(submission=self)
# instance.samples.append(sample)
return
case "reagents":
field_value = [reagent['value'].toSQL()[0] if isinstance(reagent, dict) else reagent.toSQL()[0] for reagent in value]
case "submission_type":
# field_value = lookup_submission_type(ctx=self.ctx, name=value)
field_value = SubmissionType.query(name=value)
case "sample_count":
if value == None:
field_value = len(self.samples)
else:
field_value = value
case "ctx" | "csv" | "filepath":
return
case _:
field_value = value
# insert into field
try:
setattr(self, key, field_value)
except AttributeError:
logger.error(f"Could not set {self} attribute {key} to {value}")
# Below are the custom submission types
class BacterialCulture(BasicSubmission):
@@ -759,7 +844,7 @@ class WastewaterArtic(BasicSubmission):
input_dict['sample_type'] = "Wastewater Sample"
# Because generate_sample_object needs the submitter_id and the artic has the "({origin well})"
# at the end, this has to be done here. No moving to sqlalchemy object :(
input_dict['submitter_id'] = re.sub(r"\s\(.+\)$", "", str(input_dict['submitter_id'])).strip()
input_dict['submitter_id'] = re.sub(r"\s\(.+\)\s?$", "", str(input_dict['submitter_id'])).strip()
return input_dict
@classmethod
@@ -782,6 +867,53 @@ class WastewaterArtic(BasicSubmission):
def get_regex(cls):
return "(?P<Wastewater_Artic>(\\d{4}-\\d{2}-\\d{2}(?:-|_)(?:\\d_)?artic)|(RSL(?:-|_)?AR(?:-|_)?20\\d{2}-?\\d{2}-?\\d{2}(?:(_|-)\\d?(\\D|$)R?\\d?)?))"
@classmethod
def finalize_parse(cls, input_dict: dict, xl: pd.ExcelFile | None = None, info_map: dict | None = None, plate_map: dict | None = None) -> dict:
input_dict = super().finalize_parse(input_dict, xl, info_map, plate_map)
logger.debug(pformat(input_dict))
logger.debug(pformat(info_map))
logger.debug(pformat(plate_map))
samples = []
for sample in input_dict['samples']:
if sample.submitter_id == "NTC1":
samples.append(dict(sample=sample.submitter_id, destination_row=8, destination_column=2, source_row=0, source_column=0, plate_number='control', plate=None))
continue
elif sample.submitter_id == "NTC2":
samples.append(dict(sample=sample.submitter_id, destination_row=8, destination_column=5, source_row=0, source_column=0, plate_number='control', plate=None))
continue
destination_row = sample.row[0]
destination_column = sample.column[0]
logger.debug(f"Looking up: {sample.submitter_id} friend.")
lookup_sample = BasicSample.query(submitter_id=sample.submitter_id)
lookup_ssa = SubmissionSampleAssociation.query(sample=lookup_sample, exclude_submission_type=cls.__mapper_args__['polymorphic_identity'] , chronologic=True, reverse=True, limit=1)
try:
plate = lookup_ssa.submission.rsl_plate_num
source_row = lookup_ssa.row
source_column = lookup_ssa.column
except AttributeError:
plate = ""
source_row = 0
source_column = 0
samples.append(dict(
sample=sample.submitter_id,
destination_column=destination_column,
destination_row=destination_row,
plate=plate,
source_column=source_column,
source_row = source_row
))
plates = sorted(list(set([sample['plate'] for sample in samples if sample['plate'] != None])))
for iii, plate in enumerate(plates):
for sample in samples:
if sample['plate'] == plate:
sample['plate_number'] = iii + 1
df = pd.DataFrame.from_records(samples).fillna(value="")
df.source_row = df.source_row.astype(int)
df.source_column = df.source_column.astype(int)
df.sort_values(by=['destination_column', 'destination_row'], inplace=True)
input_dict['csv'] = df
return input_dict
class BasicSample(Base):
"""
Base of basic sample which polymorphs into BCSample and WWSample
@@ -870,16 +1002,21 @@ class BasicSample(Base):
return dict(name=self.submitter_id[:10], positive=False, tooltip=tooltip_text)
@classmethod
def find_subclasses(cls, attrs:dict|None=None, rsl_number:str|None=None):
def find_subclasses(cls, attrs:dict|None=None, sample_type:str|None=None):
if sample_type != None:
return cls.find_polymorphic_subclass(polymorphic_identity=sample_type)
if len(attrs) == 0 or attrs == None:
logger.debug(f"No attr, returning {cls}")
return cls
if any([not hasattr(cls, attr) for attr in attrs]):
logger.debug(f"{cls} is missing attrs. searching for better match.")
# looks for first model that has all included kwargs
try:
model = [subclass for subclass in cls.__subclasses__() if all([hasattr(subclass, attr) for attr in attrs])][0]
except IndexError as e:
raise AttributeError(f"Couldn't find existing class/subclass of {cls} with all attributes:\n{pformat(attrs)}")
else:
logger.debug(f"{cls} has all necessary attributes, returning")
return cls
logger.debug(f"Using model: {model}")
return model
@@ -906,7 +1043,7 @@ class BasicSample(Base):
@setup_lookup
def query(cls,
submitter_id:str|None=None,
# sample_type:str|None=None,
sample_type:str|None=None,
limit:int=0,
**kwargs
) -> BasicSample|List[BasicSample]:
@@ -922,14 +1059,18 @@ class BasicSample(Base):
Returns:
models.BasicSample|List[models.BasicSample]: Sample(s) of interest.
"""
if sample_type == None:
model = cls.find_subclasses(attrs=kwargs)
else:
model = cls.find_subclasses(sample_type=sample_type)
logger.debug(f"Length of kwargs: {len(kwargs)}")
# model = models.BasicSample.find_subclasses(ctx=ctx, attrs=kwargs)
# query: Query = setup_lookup(ctx=ctx, locals=locals()).query(model)
query: Query = cls.metadata.session.query(cls)
query: Query = cls.metadata.session.query(model)
match submitter_id:
case str():
logger.debug(f"Looking up {cls} with submitter id: {submitter_id}")
query = query.filter(cls.submitter_id==submitter_id)
logger.debug(f"Looking up {model} with submitter id: {submitter_id}")
query = query.filter(model.submitter_id==submitter_id)
limit = 1
case _:
pass
@@ -940,12 +1081,28 @@ class BasicSample(Base):
# case _:
# pass
for k, v in kwargs.items():
attr = getattr(cls, k)
attr = getattr(model, k)
logger.debug(f"Got attr: {attr}")
query = query.filter(attr==v)
if len(kwargs) > 0:
limit = 1
return query_return(query=query, limit=limit)
@classmethod
def query_or_create(cls, sample_type:str, **kwargs):
disallowed = ["id"]
if kwargs == {}:
raise ValueError("Need to narrow down query or the first available instance will be returned.")
for key in kwargs.keys():
if key in disallowed:
raise ValueError(f"{key} is not allowed as a query argument as it could lead to creation of duplicate objects.")
instance = cls.query(sample_type=sample_type, limit=1, **kwargs)
logger.debug(f"Retrieved instance: {instance}")
if instance == None:
used_class = cls.find_subclasses(attrs=kwargs, sample_type=sample_type)
instance = used_class(**kwargs)
instance.sample_type = sample_type
return instance
class WastewaterSample(BasicSample):
"""
@@ -996,6 +1153,20 @@ class WastewaterSample(BasicSample):
output_dict['rsl_number'] = output_dict['submitter_id']
if output_dict['ww_full_sample_id'] != None:
output_dict["submitter_id"] = output_dict['ww_full_sample_id']
# Ad hoc repair method for WW (or possibly upstream) not formatting some dates properly.
match output_dict['collection_date']:
case str():
try:
output_dict['collection_date'] = parse(output_dict['collection_date']).date()
except ParserError:
logger.error(f"Problem parsing collection_date: {output_dict['collection_date']}")
output_dict['collection_date'] = date(1,1,1)
case datetime():
output_dict['collection_date'] = output_dict['collection_date'].date()
case date():
pass
case _:
del output_dict['collection_date']
return output_dict
class BacterialCultureSample(BasicSample):
@@ -1070,11 +1241,13 @@ class SubmissionSampleAssociation(Base):
@setup_lookup
def query(cls,
submission:BasicSubmission|str|None=None,
exclude_submission_type:str|None=None,
sample:BasicSample|str|None=None,
row:int=0,
column:int=0,
limit:int=0,
chronologic:bool=False
chronologic:bool=False,
reverse:bool=False,
) -> SubmissionSampleAssociation|List[SubmissionSampleAssociation]:
"""
Lookup junction of Submission and Sample in the database
@@ -1109,12 +1282,64 @@ class SubmissionSampleAssociation(Base):
query = query.filter(cls.row==row)
if column > 0:
query = query.filter(cls.column==column)
logger.debug(f"Query count: {query.count()}")
match exclude_submission_type:
case str():
query = query.join(BasicSubmission).filter(BasicSubmission.submission_type_name != exclude_submission_type)
case _:
pass
# logger.debug(f"Query count: {query.count()}")
if reverse and not chronologic:
query = query.order_by(BasicSubmission.id.desc())
# query = query.join(BasicSubmission).order_by(BasicSubmission.id.desc())
# query.join(BasicSubmission).order_by(cls.submission.id.desc())
if chronologic:
query.join(BasicSubmission).order_by(BasicSubmission.submitted_date)
if query.count() == 1:
limit = 1
if reverse:
query = query.order_by(BasicSubmission.submitted_date.desc())
# query = query.join(BasicSubmission).order_by(BasicSubmission.submitted_date.desc())
# query.join(BasicSubmission).order_by(cls.submission.submitted_date.desc())
else:
query = query.order_by(BasicSubmission.submitted_date)
# query.join(BasicSubmission).order_by(cls.submission.submitted_date)
# if query.count() == 1:
# limit = 1
return query_return(query=query, limit=limit)
@classmethod
def query_or_create(cls,
association_type:str="Basic Association",
submission:BasicSubmission|str|None=None,
sample:BasicSample|str|None=None,
**kwargs):
match submission:
case BasicSubmission():
pass
case str():
submission = BasicSubmission.query(rsl_number=submission)
case _:
raise ValueError()
match sample:
case BasicSample():
pass
case str():
sample = BasicSample.query(submitter_id=sample)
case _:
raise ValueError()
try:
row = kwargs['row']
except KeyError:
row = None
try:
column = kwargs['column']
except KeyError:
column = None
try:
instance = cls.query(submission=submission, sample=sample, row=row, column=column, limit=1)
except StatementError:
instance = None
if instance == None:
used_cls = cls.find_polymorphic_subclass(polymorphic_identity=association_type)
instance = used_cls(submission=submission, sample=sample, **kwargs)
return instance
def save(self):
self.metadata.session.add(self)

View File

@@ -7,8 +7,7 @@ from typing import List
import pandas as pd
import numpy as np
from pathlib import Path
from backend.db import models
from backend.db.functions import lookup_kit_types, lookup_submission_type, lookup_samples
from backend.db.models import *
from backend.validators import PydSubmission, PydReagent, RSLNamer, PydSample
import logging
from collections import OrderedDict
@@ -49,19 +48,22 @@ class SheetParser(object):
raise FileNotFoundError(f"Couldn't parse file {self.filepath}")
self.sub = OrderedDict()
# make decision about type of sample we have
self.sub['submission_type'] = dict(value=RSLNamer.retrieve_submission_type(ctx=self.ctx, instr=self.filepath), missing=True)
self.sub['submission_type'] = dict(value=RSLNamer.retrieve_submission_type(instr=self.filepath), missing=True)
# # grab the info map from the submission type in database
self.parse_info()
self.import_kit_validation_check()
self.parse_reagents()
self.import_reagent_validation_check()
self.parse_samples()
self.finalize_parse()
def parse_info(self):
"""
Pulls basic information from the excel sheet
"""
info = InfoParser(ctx=self.ctx, xl=self.xl, submission_type=self.sub['submission_type']['value']).parse_info()
parser = InfoParser(xl=self.xl, submission_type=self.sub['submission_type']['value'])
info = parser.parse_info()
self.info_map = parser.map
for k,v in info.items():
match k:
case "sample":
@@ -77,13 +79,15 @@ class SheetParser(object):
if extraction_kit == None:
extraction_kit = extraction_kit=self.sub['extraction_kit']
logger.debug(f"Parsing reagents for {extraction_kit}")
self.sub['reagents'] = ReagentParser(ctx=self.ctx, xl=self.xl, submission_type=self.sub['submission_type'], extraction_kit=extraction_kit).parse_reagents()
self.sub['reagents'] = ReagentParser(xl=self.xl, submission_type=self.sub['submission_type'], extraction_kit=extraction_kit).parse_reagents()
def parse_samples(self):
"""
Pulls sample info from the excel sheet
"""
self.sample_result, self.sub['samples'] = SampleParser(ctx=self.ctx, xl=self.xl, submission_type=self.sub['submission_type']['value']).parse_samples()
parser = SampleParser(xl=self.xl, submission_type=self.sub['submission_type']['value'])
self.sample_result, self.sub['samples'] = parser.parse_samples()
self.plate_map = parser.plate_map
def import_kit_validation_check(self):
"""
@@ -97,7 +101,7 @@ class SheetParser(object):
List[PydReagent]: List of reagents
"""
if not check_not_nan(self.sub['extraction_kit']['value']):
dlg = KitSelector(ctx=self.ctx, title="Kit Needed", message="At minimum a kit is needed. Please select one.")
dlg = KitSelector(title="Kit Needed", message="At minimum a kit is needed. Please select one.")
if dlg.exec():
self.sub['extraction_kit'] = dict(value=dlg.getValues(), missing=True)
else:
@@ -111,11 +115,16 @@ class SheetParser(object):
Enforce that only allowed reagents get into the Pydantic Model
"""
# kit = lookup_kit_types(ctx=self.ctx, name=self.sub['extraction_kit']['value'])
kit = models.KitType.query(name=self.sub['extraction_kit']['value'])
kit = KitType.query(name=self.sub['extraction_kit']['value'])
allowed_reagents = [item.name for item in kit.get_reagents()]
logger.debug(f"List of reagents for comparison with allowed_reagents: {pprint.pformat(self.sub['reagents'])}")
# self.sub['reagents'] = [reagent for reagent in self.sub['reagents'] if reagent['value'].type in allowed_reagents]
self.sub['reagents'] = [reagent for reagent in self.sub['reagents'] if reagent.type in allowed_reagents]
def finalize_parse(self):
finisher = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.sub['submission_type']).finalize_parse
self.sub = finisher(input_dict=self.sub, xl=self.xl, info_map=self.info_map, plate_map=self.plate_map)
def to_pydantic(self) -> PydSubmission:
"""
@@ -125,15 +134,15 @@ class SheetParser(object):
PydSubmission: output pydantic model
"""
logger.debug(f"Submission dictionary coming into 'to_pydantic':\n{pprint.pformat(self.sub)}")
psm = PydSubmission(ctx=self.ctx, filepath=self.filepath, **self.sub)
psm = PydSubmission(filepath=self.filepath, **self.sub)
# delattr(psm, "filepath")
return psm
class InfoParser(object):
def __init__(self, ctx:Settings, xl:pd.ExcelFile, submission_type:str):
def __init__(self, xl:pd.ExcelFile, submission_type:str):
logger.debug(f"\n\nHello from InfoParser!")
self.ctx = ctx
# self.ctx = ctx
self.map = self.fetch_submission_info_map(submission_type=submission_type)
self.xl = xl
logger.debug(f"Info map for InfoParser: {pprint.pformat(self.map)}")
@@ -152,11 +161,10 @@ class InfoParser(object):
if isinstance(submission_type, str):
submission_type = dict(value=submission_type, missing=True)
logger.debug(f"Looking up submission type: {submission_type['value']}")
# submission_type = lookup_submission_type(ctx=self.ctx, name=submission_type['value'])
submission_type = models.SubmissionType.query(name=submission_type['value'])
submission_type = SubmissionType.query(name=submission_type['value'])
info_map = submission_type.info_map
# Get the parse_info method from the submission type specified
self.custom_parser = models.BasicSubmission.find_polymorphic_subclass(polymorphic_identity=submission_type.name).parse_info
self.custom_parser = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=submission_type.name).parse_info
return info_map
def parse_info(self) -> dict:
@@ -174,7 +182,7 @@ class InfoParser(object):
if isinstance(v, str):
dicto[k] = dict(value=v, missing=False)
continue
if k == "samples":
if k in ["samples", "all_sheets"]:
continue
if sheet in self.map[k]['sheets']:
relevant[k] = v
@@ -205,9 +213,9 @@ class InfoParser(object):
class ReagentParser(object):
def __init__(self, ctx:Settings, xl:pd.ExcelFile, submission_type:str, extraction_kit:str):
def __init__(self, xl:pd.ExcelFile, submission_type:str, extraction_kit:str):
logger.debug("\n\nHello from ReagentParser!\n\n")
self.ctx = ctx
# self.ctx = ctx
self.map = self.fetch_kit_info_map(extraction_kit=extraction_kit, submission_type=submission_type)
self.xl = xl
@@ -215,7 +223,7 @@ class ReagentParser(object):
if isinstance(extraction_kit, dict):
extraction_kit = extraction_kit['value']
# kit = lookup_kit_types(ctx=self.ctx, name=extraction_kit)
kit = models.KitType.query(name=extraction_kit)
kit = KitType.query(name=extraction_kit)
if isinstance(submission_type, dict):
submission_type = submission_type['value']
reagent_map = kit.construct_xl_map_for_use(submission_type.title())
@@ -238,7 +246,7 @@ class ReagentParser(object):
lot = df.iat[relevant[item]['lot']['row']-1, relevant[item]['lot']['column']-1]
expiry = df.iat[relevant[item]['expiry']['row']-1, relevant[item]['expiry']['column']-1]
except (KeyError, IndexError):
listo.append(PydReagent(ctx=self.ctx, type=item.strip(), lot=None, expiry=None, name=None, missing=True))
listo.append(PydReagent(type=item.strip(), lot=None, expiry=None, name=None, missing=True))
continue
# If the cell is blank tell the PydReagent
if check_not_nan(lot):
@@ -248,7 +256,7 @@ class ReagentParser(object):
# logger.debug(f"Got lot for {item}-{name}: {lot} as {type(lot)}")
lot = str(lot)
logger.debug(f"Going into pydantic: name: {name}, lot: {lot}, expiry: {expiry}, type: {item.strip()}")
listo.append(PydReagent(ctx=self.ctx, type=item.strip(), lot=lot, expiry=expiry, name=name, missing=missing))
listo.append(PydReagent(type=item.strip(), lot=lot, expiry=expiry, name=name, missing=missing))
# logger.debug(f"Returning listo: {listo}")
return listo
@@ -257,7 +265,7 @@ class SampleParser(object):
object to pull data for samples in excel sheet and construct individual sample objects
"""
def __init__(self, ctx:Settings, xl:pd.ExcelFile, submission_type:str) -> None:
def __init__(self, xl:pd.ExcelFile, submission_type:str) -> None:
"""
convert sample sub-dataframe to dictionary of records
@@ -268,7 +276,7 @@ class SampleParser(object):
"""
logger.debug("\n\nHello from SampleParser!")
self.samples = []
self.ctx = ctx
# self.ctx = ctx
self.xl = xl
self.submission_type = submission_type
sample_info_map = self.fetch_sample_info_map(submission_type=submission_type)
@@ -293,12 +301,12 @@ class SampleParser(object):
"""
logger.debug(f"Looking up submission type: {submission_type}")
# submission_type = lookup_submission_type(ctx=self.ctx, name=submission_type)
submission_type = models.SubmissionType.query(name=submission_type)
submission_type = SubmissionType.query(name=submission_type)
logger.debug(f"info_map: {pprint.pformat(submission_type.info_map)}")
sample_info_map = submission_type.info_map['samples']
# self.custom_parser = get_polymorphic_subclass(models.BasicSubmission, submission_type.name).parse_samples
self.custom_sub_parser = models.BasicSubmission.find_polymorphic_subclass(polymorphic_identity=submission_type.name).parse_samples
self.custom_sample_parser = models.BasicSample.find_polymorphic_subclass(polymorphic_identity=f"{submission_type.name} Sample").parse_sample
self.custom_sub_parser = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=submission_type.name).parse_samples
self.custom_sample_parser = BasicSample.find_polymorphic_subclass(polymorphic_identity=f"{submission_type.name} Sample").parse_sample
return sample_info_map
def construct_plate_map(self, plate_map_location:dict) -> pd.DataFrame:
@@ -316,7 +324,7 @@ class SampleParser(object):
df = pd.DataFrame(df.values[1:], columns=df.iloc[0])
df = df.set_index(df.columns[0])
# custom_mapper = get_polymorphic_subclass(models.BasicSubmission, self.submission_type)
custom_mapper = models.BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type)
custom_mapper = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type)
df = custom_mapper.custom_platemap(self.xl, df)
logger.debug(f"Custom platemap:\n{df}")
return df
@@ -407,7 +415,7 @@ class SampleParser(object):
# logger.debug(f"Output sample dict: {sample}")
logger.debug(f"Final lookup_table: \n\n {self.lookup_table}")
def parse_samples(self, generate:bool=True) -> List[dict]|List[models.BasicSample]:
def parse_samples(self, generate:bool=True) -> List[dict]|List[BasicSample]:
"""
Parse merged platemap\lookup info into dicts/samples
@@ -445,36 +453,39 @@ class SampleParser(object):
new_samples.append(PydSample(**translated_dict))
return result, new_samples
def generate_sample_object(self, input_dict) -> models.BasicSample:
"""
Constructs sample object from dict
# def generate_sample_object(self, input_dict) -> BasicSample:
# """
# Constructs sample object from dict.
# NOTE: Depreciated due to using Pydantic object up until db saving.
Args:
input_dict (dict): sample information
# Args:
# input_dict (dict): sample information
Returns:
models.BasicSample: Sample object
"""
query = input_dict['sample_type'].replace(" ", "")
try:
database_obj = getattr(models, query)
except AttributeError as e:
logger.error(f"Could not find the model {query}. Using generic.")
database_obj = models.BasicSample
logger.debug(f"Searching database for {input_dict['submitter_id']}...")
# instance = lookup_samples(ctx=self.ctx, submitter_id=str(input_dict['submitter_id']))
instance = models.BasicSample.query(submitter_id=str(input_dict['submitter_id']))
if instance == None:
logger.debug(f"Couldn't find sample {input_dict['submitter_id']}. Creating new sample.")
instance = database_obj()
for k,v in input_dict.items():
try:
instance.set_attribute(k, v)
except Exception as e:
logger.error(f"Failed to set {k} due to {type(e).__name__}: {e}")
else:
logger.debug(f"Sample {instance.submitter_id} already exists, will run update.")
return dict(sample=instance, row=input_dict['row'], column=input_dict['column'])
# Returns:
# models.BasicSample: Sample object
# """
# database_obj = BasicSample.find_polymorphic_subclass(polymorphic_identity=input_dict['sample_type'])
# # query = input_dict['sample_type'].replace(" ", "")
# # try:
# # # database_obj = getattr(models, query)
# # except AttributeError as e:
# # logger.error(f"Could not find the model {query}. Using generic.")
# # database_obj = models.BasicSample
# logger.debug(f"Searching database for {input_dict['submitter_id']}...")
# # instance = lookup_samples(ctx=self.ctx, submitter_id=str(input_dict['submitter_id']))
# instance = BasicSample.query(submitter_id=str(input_dict['submitter_id']))
# if instance == None:
# logger.debug(f"Couldn't find sample {input_dict['submitter_id']}. Creating new sample.")
# instance = database_obj()
# for k,v in input_dict.items():
# try:
# instance.set_attribute(k, v)
# except Exception as e:
# logger.error(f"Failed to set {k} due to {type(e).__name__}: {e}")
# else:
# logger.debug(f"Sample {instance.submitter_id} already exists, will run update.")
# return dict(sample=instance, row=input_dict['row'], column=input_dict['column'])
def grab_plates(self) -> List[str]:
"""
@@ -487,7 +498,7 @@ class SampleParser(object):
for plate in self.plates:
df = self.xl.parse(plate['sheet'], header=None)
if isinstance(df.iat[plate['row']-1, plate['column']-1], str):
output = RSLNamer.retrieve_rsl_number(ctx=self.ctx, instr=df.iat[plate['row']-1, plate['column']-1])
output = RSLNamer.retrieve_rsl_number(instr=df.iat[plate['row']-1, plate['column']-1])
else:
continue
plates.append(output)
@@ -497,7 +508,7 @@ class PCRParser(object):
"""
Object to pull data from Design and Analysis PCR export file.
"""
def __init__(self, ctx:dict, filepath:Path|None = None) -> None:
def __init__(self, filepath:Path|None = None) -> None:
"""
Initializes object.
@@ -505,7 +516,7 @@ class PCRParser(object):
ctx (dict): settings passed down from gui.
filepath (Path | None, optional): file to parse. Defaults to None.
"""
self.ctx = ctx
# self.ctx = ctx
logger.debug(f"Parsing {filepath.__str__()}")
if filepath == None:
logger.error(f"No filepath given.")
@@ -521,11 +532,11 @@ class PCRParser(object):
return
# self.pcr = OrderedDict()
self.parse_general(sheet_name="Results")
namer = RSLNamer(ctx=self.ctx, instr=filepath.__str__())
namer = RSLNamer(instr=filepath.__str__())
self.plate_num = namer.parsed_name
self.submission_type = namer.submission_type
logger.debug(f"Set plate number to {self.plate_num} and type to {self.submission_type}")
parser = models.BasicSubmission.find_polymorphic_subclass(self.submission_type)
parser = BasicSubmission.find_polymorphic_subclass(self.submission_type)
self.samples = parser.parse_pcr(xl=self.xl, rsl_number=self.plate_num)
def parse_general(self, sheet_name:str):

View File

@@ -76,7 +76,7 @@ def make_report_html(df:DataFrame, start_date:date, end_date:date) -> str:
return html
def convert_data_list_to_df(ctx:dict, input:list[dict], subtype:str|None=None) -> DataFrame:
def convert_data_list_to_df(input:list[dict], subtype:str|None=None) -> DataFrame:
"""
Convert list of control records to dataframe
@@ -171,8 +171,8 @@ def check_date(df:DataFrame, item:dict, previous_dates:list) -> Tuple[DataFrame,
passed = False
else:
passed = True
logger.debug(f"\n\tCurrent date: {item['date']}\n\tPrevious dates:{previous_dates}")
logger.debug(f"DF: {type(df)}, previous_dates: {type(previous_dates)}")
# logger.debug(f"\n\tCurrent date: {item['date']}\n\tPrevious dates:{previous_dates}")
# logger.debug(f"DF: {type(df)}, previous_dates: {type(previous_dates)}")
# if run didn't lead to changed date, return values
if passed:
logger.debug(f"Date check passed, returning.")

View File

@@ -1,8 +1,7 @@
import logging, re
from pathlib import Path
from openpyxl import load_workbook
from backend.db.models import BasicSubmission
from tools import Settings
from backend.db import BasicSubmission, SubmissionType
logger = logging.getLogger(f"submissions.{__name__}")
@@ -10,14 +9,12 @@ logger = logging.getLogger(f"submissions.{__name__}")
class RSLNamer(object):
"""
Object that will enforce proper formatting on RSL plate names.
NOTE: Depreciated in favour of object based methods in 'submissions.py'
"""
def __init__(self, ctx, instr:str, sub_type:str|None=None):
self.ctx = ctx
def __init__(self, instr:str, sub_type:str|None=None):
self.submission_type = sub_type
if self.submission_type == None:
self.submission_type = self.retrieve_submission_type(ctx=self.ctx, instr=instr)
self.submission_type = self.retrieve_submission_type(instr=instr)
logger.debug(f"got submission type: {self.submission_type}")
if self.submission_type != None:
enforcer = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type)
@@ -25,25 +22,30 @@ class RSLNamer(object):
self.parsed_name = enforcer.enforce_name(instr=self.parsed_name)
@classmethod
def retrieve_submission_type(cls, ctx:Settings, instr:str|Path) -> str:
def retrieve_submission_type(cls, instr:str|Path) -> str:
match instr:
case Path():
logger.debug(f"Using path method.")
logger.debug(f"Using path method for {instr}.")
if instr.exists():
wb = load_workbook(instr)
try:
submission_type = [item.strip().title() for item in wb.properties.category.split(";")][0]
except AttributeError:
try:
for type in ctx.submission_types:
sts = {item.name:item.info_map['all_sheets'] for item in SubmissionType.query(key="all_sheets")}
for k,v in sts.items():
# This gets the *first* submission type that matches the sheet names in the workbook
if wb.sheetnames == ctx.submission_types[type]['excel_map']:
submission_type = type.title()
if wb.sheetnames == v:
submission_type = k.title()
break
except:
submission_type = cls.retrieve_submission_type(ctx=ctx, instr=instr.stem.__str__())
# On failure recurse using filename as string for string method
submission_type = cls.retrieve_submission_type(instr=instr.stem.__str__())
else:
submission_type = cls.retrieve_submission_type(instr=instr.stem.__str__())
case str():
regex = BasicSubmission.construct_regex()
logger.debug(f"Using string method.")
logger.debug(f"Using string method for {instr}.")
m = regex.search(instr)
try:
submission_type = m.lastgroup
@@ -51,9 +53,13 @@ class RSLNamer(object):
logger.critical("No RSL plate number found or submission type found!")
case _:
submission_type = None
if submission_type == None:
try:
check = submission_type == None
except UnboundLocalError:
check = True
if check:
from frontend.custom_widgets import SubmissionTypeSelector
dlg = SubmissionTypeSelector(ctx, title="Couldn't parse submission type.", message="Please select submission type from list below.")
dlg = SubmissionTypeSelector(title="Couldn't parse submission type.", message="Please select submission type from list below.")
if dlg.exec():
submission_type = dlg.parse_form()
submission_type = submission_type.replace("_", " ")

View File

@@ -6,14 +6,14 @@ from pydantic import BaseModel, field_validator, Field
from datetime import date, datetime, timedelta
from dateutil.parser import parse
from dateutil.parser._parser import ParserError
from typing import List, Any, Tuple
from typing import List, Any, Tuple, Literal
from . import RSLNamer
from pathlib import Path
import re
import logging
from tools import check_not_nan, convert_nans_to_nones, Settings, jinja_template_loading
from tools import check_not_nan, convert_nans_to_nones, jinja_template_loading
from backend.db.models import *
from sqlalchemy.exc import InvalidRequestError, StatementError
from sqlalchemy.exc import StatementError
from PyQt6.QtWidgets import QComboBox, QWidget
from pprint import pformat
from openpyxl import load_workbook
@@ -21,7 +21,6 @@ from openpyxl import load_workbook
logger = logging.getLogger(f"submissions.{__name__}")
class PydReagent(BaseModel):
ctx: Settings
lot: str|None
type: str|None
expiry: date|None
@@ -139,15 +138,17 @@ class PydSample(BaseModel, extra='allow'):
def int_to_str(cls, value):
return str(value)
def toSQL(self, ctx:Settings, submission):
def toSQL(self, submission=None):
result = None
self.__dict__.update(self.model_extra)
logger.debug(f"Here is the incoming sample dict: \n{self.__dict__}")
# instance = lookup_samples(ctx=ctx, submitter_id=self.submitter_id)
instance = BasicSample.query(submitter_id=self.submitter_id)
if instance == None:
logger.debug(f"Sample {self.submitter_id} doesn't exist yet. Looking up sample object with polymorphic identity: {self.sample_type}")
instance = BasicSample.find_polymorphic_subclass(polymorphic_identity=self.sample_type)()
# instance = BasicSample.query(submitter_id=self.submitter_id)
# if instance == None:
# logger.debug(f"Sample {self.submitter_id} doesn't exist yet. Looking up sample object with polymorphic identity: {self.sample_type}")
# instance = BasicSample.find_polymorphic_subclass(polymorphic_identity=self.sample_type)()
# instance = BasicSample.query_or_create(**{k:v for k,v in self.__dict__.items() if k not in ['row', 'column']})
instance = BasicSample.query_or_create(sample_type=self.sample_type, submitter_id=self.submitter_id)
for key, value in self.__dict__.items():
# logger.debug(f"Setting sample field {key} to {value}")
match key:
@@ -155,20 +156,26 @@ class PydSample(BaseModel, extra='allow'):
continue
case _:
instance.set_attribute(name=key, value=value)
for row, column in zip(self.row, self.column):
logger.debug(f"Looking up association with identity: ({submission.submission_type_name} Association)")
# association = lookup_submission_sample_association(ctx=ctx, submission=submission, row=row, column=column)
association = SubmissionSampleAssociation.query(submission=submission, row=row, column=column)
logger.debug(f"Returned association: {association}")
if association == None or association == []:
logger.debug(f"Looked up association at row {row}, column {column} didn't exist, creating new association.")
association = SubmissionSampleAssociation.find_polymorphic_subclass(polymorphic_identity=f"{submission.submission_type_name} Association")
association = association(submission=submission, sample=instance, row=row, column=column)
if submission != None:
assoc_type = self.sample_type.replace("Sample", "").strip()
for row, column in zip(self.row, self.column):
# logger.debug(f"Looking up association with identity: ({submission.submission_type_name} Association)")
logger.debug(f"Looking up association with identity: ({assoc_type} Association)")
# association = lookup_submission_sample_association(ctx=ctx, submission=submission, row=row, column=column)
# association = SubmissionSampleAssociation.query(submission=submission, row=row, column=column)
# logger.debug(f"Returned association: {association}")
# if association == None or association == []:
# logger.debug(f"Looked up association at row {row}, column {column} didn't exist, creating new association.")
# association = SubmissionSampleAssociation.find_polymorphic_subclass(polymorphic_identity=f"{submission.submission_type_name} Association")
# association = association(submission=submission, sample=instance, row=row, column=column)
association = SubmissionSampleAssociation.query_or_create(association_type=f"{assoc_type} Association",
submission=submission,
sample=instance,
row=row, column=column)
instance.sample_submission_associations.append(association)
return instance, result
class PydSubmission(BaseModel, extra='allow'):
ctx: Settings
filepath: Path
submission_type: dict|None
# For defaults
@@ -240,15 +247,16 @@ class PydSubmission(BaseModel, extra='allow'):
sub_type = values.data['submission_type']['value']
if check_not_nan(value['value']):
# if lookup_submissions(ctx=values.data['ctx'], rsl_number=value['value']) == None:
if BasicSubmission.query(rsl_number=value['value']) == None:
return dict(value=value['value'], missing=False)
else:
logger.warning(f"Submission number {value} already exists in DB, attempting salvage with filepath")
# output = RSLNamer(ctx=values.data['ctx'], instr=values.data['filepath'].__str__(), sub_type=sub_type).parsed_name
output = RSLNamer(ctx=values.data['ctx'], instr=values.data['filepath'].__str__(), sub_type=sub_type).parsed_name
return dict(value=output, missing=True)
# if BasicSubmission.query(rsl_number=value['value']) == None:
# return dict(value=value['value'], missing=False)
# else:
# logger.warning(f"Submission number {value} already exists in DB, attempting salvage with filepath")
# # output = RSLNamer(ctx=values.data['ctx'], instr=values.data['filepath'].__str__(), sub_type=sub_type).parsed_name
# output = RSLNamer(instr=values.data['filepath'].__str__(), sub_type=sub_type).parsed_name
# return dict(value=output, missing=True)
return value
else:
output = RSLNamer(ctx=values.data['ctx'], instr=values.data['filepath'].__str__(), sub_type=sub_type).parsed_name
output = RSLNamer(instr=values.data['filepath'].__str__(), sub_type=sub_type).parsed_name
return dict(value=output, missing=True)
@field_validator("technician", mode="before")
@@ -298,10 +306,8 @@ class PydSubmission(BaseModel, extra='allow'):
if check_not_nan(value['value']):
value = value['value'].title()
return dict(value=value, missing=False)
# else:
# return dict(value="RSL Name not found.")
else:
return dict(value=RSLNamer(ctx=values.data['ctx'], instr=values.data['filepath'].__str__()).submission_type.title(), missing=True)
return dict(value=RSLNamer(instr=values.data['filepath'].__str__()).submission_type.title(), missing=True)
@field_validator("submission_category")
@classmethod
@@ -345,58 +351,15 @@ class PydSubmission(BaseModel, extra='allow'):
msg = None
status = None
self.__dict__.update(self.model_extra)
# instance = lookup_submissions(ctx=self.ctx, rsl_number=self.rsl_plate_num['value'])
instance = BasicSubmission.query(rsl_number=self.rsl_plate_num['value'])
if instance == None:
instance = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type)()
else:
code = 1
msg = "This submission already exists.\nWould you like to overwrite?"
instance, code, msg = BasicSubmission.query_or_create(submission_type=self.submission_type['value'], rsl_plate_num=self.rsl_plate_num['value'])
self.handle_duplicate_samples()
logger.debug(f"Here's our list of duplicate removed samples: {self.samples}")
for key, value in self.__dict__.items():
if isinstance(value, dict):
value = value['value']
logger.debug(f"Setting {key} to {value}")
# set fields based on keys in dictionary
match key:
case "extraction_kit":
logger.debug(f"Looking up kit {value}")
# field_value = lookup_kit_types(ctx=self.ctx, name=value)
field_value = KitType.query(name=value)
logger.debug(f"Got {field_value} for kit {value}")
case "submitting_lab":
logger.debug(f"Looking up organization: {value}")
# field_value = lookup_organizations(ctx=self.ctx, name=value)
field_value = Organization.query(name=value)
logger.debug(f"Got {field_value} for organization {value}")
case "submitter_plate_num":
logger.debug(f"Submitter plate id: {value}")
field_value = value
case "samples":
# instance = construct_samples(ctx=ctx, instance=instance, samples=value)
for sample in value:
# logger.debug(f"Parsing {sample} to sql.")
sample, _ = sample.toSQL(ctx=self.ctx, submission=instance)
# instance.samples.append(sample)
continue
case "reagents":
field_value = [reagent['value'].toSQL()[0] if isinstance(reagent, dict) else reagent.toSQL()[0] for reagent in value]
case "submission_type":
# field_value = lookup_submission_type(ctx=self.ctx, name=value)
field_value = SubmissionType.query(name=value)
case "sample_count":
if value == None:
field_value = len(self.samples)
else:
field_value = value
case "ctx" | "csv" | "filepath":
continue
case _:
field_value = value
# insert into field
try:
setattr(instance, key, field_value)
instance.set_attribute(key=key, value=value)
except AttributeError as e:
logger.debug(f"Could not set attribute: {key} to {value} due to: \n\n {e}")
continue
@@ -412,7 +375,6 @@ class PydSubmission(BaseModel, extra='allow'):
# Apply any discounts that are applicable for client and kit.
try:
logger.debug("Checking and applying discounts...")
# discounts = [item.amount for item in lookup_discounts(ctx=self.ctx, kit_type=instance.extraction_kit, organization=instance.submitting_lab)]
discounts = [item.amount for item in Discount.query(kit_type=instance.extraction_kit, organization=instance.submitting_lab)]
logger.debug(f"We got discounts: {discounts}")
if len(discounts) > 0:
@@ -513,7 +475,9 @@ class PydSubmission(BaseModel, extra='allow'):
template = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type).filename_template()
logger.debug(f"Using template string: {template}")
template = env.from_string(template)
return template.render(**self.improved_dict(dictionaries=False))
render = template.render(**self.improved_dict(dictionaries=False)).replace("/", "")
logger.debug(f"Template rendered as: {render}")
return render
class PydContact(BaseModel):
@@ -521,7 +485,7 @@ class PydContact(BaseModel):
phone: str|None
email: str|None
def toSQL(self, ctx):
def toSQL(self):
return Contact(name=self.name, phone=self.phone, email=self.email)
class PydOrganization(BaseModel):
@@ -530,12 +494,12 @@ class PydOrganization(BaseModel):
cost_centre: str
contacts: List[PydContact]|None
def toSQL(self, ctx):
def toSQL(self):
instance = Organization()
for field in self.model_fields:
match field:
case "contacts":
value = [item.toSQL(ctx) for item in getattr(self, field)]
value = [item.toSQL() for item in getattr(self, field)]
case _:
value = getattr(self, field)
instance.set_attribute(name=field, value=value)
@@ -555,7 +519,7 @@ class PydReagentType(BaseModel):
return timedelta(days=value)
return value
def toSQL(self, ctx:Settings, kit:KitType):
def toSQL(self, kit:KitType):
# instance: ReagentType = lookup_reagent_types(ctx=ctx, name=self.name)
instance: ReagentType = ReagentType.query(name=self.name)
if instance == None:
@@ -576,14 +540,14 @@ class PydKit(BaseModel):
name: str
reagent_types: List[PydReagentType] = []
def toSQL(self, ctx):
def toSQL(self):
result = dict(message=None, status='Information')
# instance = lookup_kit_types(ctx=ctx, name=self.name)
instance = KitType.query(name=self.name)
if instance == None:
instance = KitType(name=self.name)
# instance.reagent_types = [item.toSQL(ctx, instance) for item in self.reagent_types]
[item.toSQL(ctx, instance) for item in self.reagent_types]
[item.toSQL(instance) for item in self.reagent_types]
return instance, result